summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2019-09-03 10:23:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2019-09-03 10:23:38 +0000
commit574098461cd45be12a497afbdac6f93c58978387 (patch)
tree9eb60a5930b7c20d42f7fde1e234cae3968ed3d9
parentAdding upstream version 1.16.1. (diff)
downloadnetdata-574098461cd45be12a497afbdac6f93c58978387.tar.xz
netdata-574098461cd45be12a497afbdac6f93c58978387.zip
Adding upstream version 1.17.0.upstream/1.17.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.codacy.yml15
-rw-r--r--.codeclimate.yml99
-rw-r--r--.gitattributes2
-rw-r--r--.github/CODEOWNERS84
-rw-r--r--.github/ISSUE_TEMPLATE.md15
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.md34
-rw-r--r--.github/ISSUE_TEMPLATE/feature_request.md16
-rw-r--r--.github/ISSUE_TEMPLATE/question.md26
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md19
-rw-r--r--.github/stale.yml18
-rw-r--r--.gitignore4
-rw-r--r--.lgtm.yml24
-rw-r--r--.remarkrc.js121
-rw-r--r--.travis.yml638
-rw-r--r--.travis/README.md143
-rwxr-xr-x.travis/check_changelog_last_modification.sh21
-rwxr-xr-x.travis/create_artifacts.sh59
-rwxr-xr-x.travis/draft_release.sh65
-rw-r--r--.travis/gcs-credentials.json.encbin2320 -> 0 bytes
-rwxr-xr-x.travis/generate_changelog_and_tag_release.sh67
-rwxr-xr-x.travis/generate_changelog_for_nightlies.sh70
-rwxr-xr-x.travis/generate_changelog_for_release.sh36
-rwxr-xr-x.travis/labeler.sh82
-rwxr-xr-x.travis/nightlies.sh49
-rw-r--r--.travis/package_management/build.sh31
-rwxr-xr-x.travis/package_management/build_package_in_container.sh82
-rwxr-xr-x.travis/package_management/common.py162
-rwxr-xr-x.travis/package_management/configure_deb_lxc_environment.py95
-rwxr-xr-x.travis/package_management/configure_rpm_lxc_environment.py102
-rwxr-xr-x.travis/package_management/create_lxc_for_build.sh101
-rw-r--r--.travis/package_management/functions.sh33
-rwxr-xr-x.travis/package_management/package_cloud_wrapper.sh48
-rwxr-xr-x.travis/package_management/prepare_packages.sh63
-rwxr-xr-x.travis/package_management/trigger_deb_lxc_build.py73
-rwxr-xr-x.travis/package_management/trigger_rpm_lxc_build.py55
-rwxr-xr-x.travis/package_management/yank_stale_rpm.sh35
-rwxr-xr-x.travis/run_install_with_dist_file.sh48
-rwxr-xr-x.travis/tagger.sh77
-rw-r--r--CHANGELOG.md813
-rw-r--r--CMakeLists.txt828
-rw-r--r--CODE_OF_CONDUCT.md31
-rw-r--r--CONTRIBUTING.md71
-rw-r--r--CONTRIBUTORS.md162
-rw-r--r--DOCUMENTATION.md53
-rw-r--r--HISTORICAL_CHANGELOG.md655
-rw-r--r--Makefile.am12
-rw-r--r--Makefile.in3601
-rw-r--r--README.md489
-rw-r--r--REDISTRIBUTED.md231
-rw-r--r--SECURITY.md43
-rw-r--r--aclocal.m41474
-rw-r--r--backends/Makefile.am1
-rw-r--r--backends/Makefile.in710
-rw-r--r--backends/README.md230
-rw-r--r--backends/WALKTHROUGH.md90
-rw-r--r--backends/aws_kinesis/Makefile.in571
-rw-r--r--backends/aws_kinesis/README.md15
-rw-r--r--backends/aws_kinesis/aws_kinesis.c15
-rw-r--r--backends/backends.c185
-rw-r--r--backends/backends.h35
-rw-r--r--backends/graphite/Makefile.in507
-rw-r--r--backends/json/Makefile.in507
-rw-r--r--backends/mongodb/Makefile.am12
-rw-r--r--backends/mongodb/Makefile.in571
-rw-r--r--backends/mongodb/README.md32
-rw-r--r--backends/mongodb/mongodb.c189
-rw-r--r--backends/mongodb/mongodb.conf12
-rw-r--r--backends/mongodb/mongodb.h16
-rw-r--r--backends/opentsdb/Makefile.in507
-rw-r--r--backends/opentsdb/README.md25
-rw-r--r--backends/prometheus/Makefile.in698
-rw-r--r--backends/prometheus/README.md134
-rw-r--r--backends/prometheus/remote_write/Makefile.in521
-rw-r--r--backends/prometheus/remote_write/README.md4
-rwxr-xr-xbuild/build.sh20
-rw-r--r--collectors/Makefile.in717
-rw-r--r--collectors/README.md87
-rw-r--r--collectors/all.h4
-rw-r--r--collectors/apps.plugin/Makefile.in571
-rw-r--r--collectors/apps.plugin/README.md172
-rw-r--r--collectors/apps.plugin/apps_plugin.c110
-rw-r--r--collectors/cgroups.plugin/Makefile.in613
-rw-r--r--collectors/cgroups.plugin/README.md95
-rw-r--r--collectors/cgroups.plugin/cgroup-name.sh218
-rw-r--r--collectors/charts.d.plugin/Makefile.in1003
-rw-r--r--collectors/charts.d.plugin/README.md76
-rw-r--r--collectors/charts.d.plugin/ap/README.md46
-rw-r--r--collectors/charts.d.plugin/apache/README.md26
-rw-r--r--collectors/charts.d.plugin/apcupsd/README.md4
-rw-r--r--collectors/charts.d.plugin/charts.d.plugin698
-rw-r--r--collectors/charts.d.plugin/cpu_apps/README.md2
-rw-r--r--collectors/charts.d.plugin/cpufreq/README.md2
-rw-r--r--collectors/charts.d.plugin/example/README.md3
-rw-r--r--collectors/charts.d.plugin/exim/README.md2
-rw-r--r--collectors/charts.d.plugin/hddtemp/README.md6
-rw-r--r--collectors/charts.d.plugin/libreswan/README.md14
-rw-r--r--collectors/charts.d.plugin/load_average/README.md2
-rw-r--r--collectors/charts.d.plugin/mem_apps/README.md2
-rw-r--r--collectors/charts.d.plugin/mysql/README.md101
-rw-r--r--collectors/charts.d.plugin/nginx/README.md2
-rw-r--r--collectors/charts.d.plugin/nut/README.md49
-rw-r--r--collectors/charts.d.plugin/opensips/README.md4
-rw-r--r--collectors/charts.d.plugin/phpfpm/README.md2
-rw-r--r--collectors/charts.d.plugin/postfix/README.md8
-rw-r--r--collectors/charts.d.plugin/sensors/README.md23
-rw-r--r--collectors/charts.d.plugin/squid/README.md48
-rw-r--r--collectors/charts.d.plugin/tomcat/README.md2
-rw-r--r--collectors/checks.plugin/Makefile.in514
-rw-r--r--collectors/checks.plugin/README.md2
-rw-r--r--collectors/cups.plugin/Makefile.in514
-rw-r--r--collectors/cups.plugin/README.md58
-rw-r--r--collectors/diskspace.plugin/Makefile.in514
-rw-r--r--collectors/diskspace.plugin/README.md11
-rw-r--r--collectors/fping.plugin/Makefile.in641
-rw-r--r--collectors/fping.plugin/README.md15
-rw-r--r--collectors/fping.plugin/fping.plugin200
-rw-r--r--collectors/freebsd.plugin/Makefile.in514
-rw-r--r--collectors/freebsd.plugin/README.md4
-rw-r--r--collectors/freeipmi.plugin/Makefile.in514
-rw-r--r--collectors/freeipmi.plugin/README.md37
-rw-r--r--collectors/idlejitter.plugin/Makefile.in514
-rw-r--r--collectors/idlejitter.plugin/README.md8
-rw-r--r--collectors/ioping.plugin/Makefile.in641
-rw-r--r--collectors/ioping.plugin/README.md15
-rw-r--r--collectors/ioping.plugin/ioping.plugin212
-rw-r--r--collectors/macos.plugin/Makefile.in514
-rw-r--r--collectors/macos.plugin/README.md4
-rw-r--r--collectors/nfacct.plugin/Makefile.in514
-rw-r--r--collectors/nfacct.plugin/README.md24
-rw-r--r--collectors/nfacct.plugin/plugin_nfacct.c22
-rw-r--r--collectors/node.d.plugin/Makefile.in855
-rw-r--r--collectors/node.d.plugin/README.md66
-rw-r--r--collectors/node.d.plugin/fronius/README.md55
-rw-r--r--collectors/node.d.plugin/named/README.md686
-rw-r--r--collectors/node.d.plugin/node.d.plugin303
-rw-r--r--collectors/node.d.plugin/sma_webbox/README.md57
-rw-r--r--collectors/node.d.plugin/snmp/README.md67
-rw-r--r--collectors/node.d.plugin/stiebeleltron/README.md102
-rw-r--r--collectors/perf.plugin/Makefile.in514
-rw-r--r--collectors/perf.plugin/README.md5
-rw-r--r--collectors/plugins.d/Makefile.in697
-rw-r--r--collectors/plugins.d/README.md269
-rw-r--r--collectors/plugins.d/plugins_d.c2
-rw-r--r--collectors/proc.plugin/Makefile.in514
-rw-r--r--collectors/proc.plugin/README.md322
-rw-r--r--collectors/proc.plugin/plugin_proc.c1
-rw-r--r--collectors/proc.plugin/plugin_proc.h1
-rw-r--r--collectors/proc.plugin/proc_uptime.c71
-rw-r--r--collectors/proc.plugin/sys_block_zram.c288
-rw-r--r--collectors/python.d.plugin/Makefile.in2063
-rw-r--r--collectors/python.d.plugin/README.md89
-rw-r--r--collectors/python.d.plugin/adaptec_raid/README.md32
-rw-r--r--collectors/python.d.plugin/apache/README.md53
-rw-r--r--collectors/python.d.plugin/beanstalk/README.md166
-rw-r--r--collectors/python.d.plugin/bind_rndc/README.md87
-rw-r--r--collectors/python.d.plugin/boinc/README.md4
-rw-r--r--collectors/python.d.plugin/ceph/README.md24
-rw-r--r--collectors/python.d.plugin/chrony/README.md23
-rw-r--r--collectors/python.d.plugin/couchdb/README.md22
-rw-r--r--collectors/python.d.plugin/dns_query_time/README.md5
-rw-r--r--collectors/python.d.plugin/dnsdist/README.md80
-rw-r--r--collectors/python.d.plugin/dockerd/README.md24
-rw-r--r--collectors/python.d.plugin/dovecot/README.md76
-rw-r--r--collectors/python.d.plugin/elasticsearch/README.md90
-rw-r--r--collectors/python.d.plugin/energid/README.md3
-rw-r--r--collectors/python.d.plugin/example/README.md2
-rw-r--r--collectors/python.d.plugin/exim/README.md7
-rw-r--r--collectors/python.d.plugin/fail2ban/README.md8
-rw-r--r--collectors/python.d.plugin/freeradius/README.md86
-rw-r--r--collectors/python.d.plugin/go_expvar/README.md120
-rw-r--r--collectors/python.d.plugin/haproxy/README.md32
-rw-r--r--collectors/python.d.plugin/hddtemp/README.md4
-rw-r--r--collectors/python.d.plugin/httpcheck/README.md34
-rw-r--r--collectors/python.d.plugin/icecast/README.md12
-rw-r--r--collectors/python.d.plugin/ipfs/README.md17
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/README.md23
-rw-r--r--collectors/python.d.plugin/litespeed/README.md57
-rw-r--r--collectors/python.d.plugin/logind/README.md57
-rw-r--r--collectors/python.d.plugin/megacli/README.md32
-rw-r--r--collectors/python.d.plugin/memcached/README.md85
-rw-r--r--collectors/python.d.plugin/mongodb/README.md265
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.chart.py70
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.conf10
-rw-r--r--collectors/python.d.plugin/monit/README.md29
-rw-r--r--collectors/python.d.plugin/mysql/README.md496
-rw-r--r--collectors/python.d.plugin/mysql/mysql.chart.py165
-rw-r--r--collectors/python.d.plugin/nginx/README.md35
-rw-r--r--collectors/python.d.plugin/nginx_plus/README.md167
-rw-r--r--collectors/python.d.plugin/nsd/README.md90
-rw-r--r--collectors/python.d.plugin/ntpd/README.md59
-rw-r--r--collectors/python.d.plugin/nvidia_smi/README.md38
-rw-r--r--collectors/python.d.plugin/openldap/README.md58
-rw-r--r--collectors/python.d.plugin/oracledb/README.md73
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/README.md24
-rw-r--r--collectors/python.d.plugin/phpfpm/README.md30
-rw-r--r--collectors/python.d.plugin/portcheck/README.md29
-rw-r--r--collectors/python.d.plugin/postfix/README.md12
-rw-r--r--collectors/python.d.plugin/postgres/README.md72
-rw-r--r--collectors/python.d.plugin/powerdns/README.md129
-rw-r--r--collectors/python.d.plugin/proxysql/README.md103
-rw-r--r--collectors/python.d.plugin/puppet/README.md48
-rw-r--r--collectors/python.d.plugin/python.d.plugin733
-rw-r--r--collectors/python.d.plugin/rabbitmq/README.md66
-rw-r--r--collectors/python.d.plugin/redis/README.md38
-rw-r--r--collectors/python.d.plugin/rethinkdbs/README.md27
-rw-r--r--collectors/python.d.plugin/retroshare/README.md2
-rw-r--r--collectors/python.d.plugin/riakkv/README.md128
-rw-r--r--collectors/python.d.plugin/samba/README.md101
-rw-r--r--collectors/python.d.plugin/sensors/README.md4
-rw-r--r--collectors/python.d.plugin/smartd_log/README.md52
-rw-r--r--collectors/python.d.plugin/spigotmc/README.md4
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.chart.py19
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.conf2
-rw-r--r--collectors/python.d.plugin/springboot/README.md47
-rw-r--r--collectors/python.d.plugin/squid/README.md36
-rw-r--r--collectors/python.d.plugin/tomcat/README.md26
-rw-r--r--collectors/python.d.plugin/tor/README.md16
-rw-r--r--collectors/python.d.plugin/traefik/README.md53
-rw-r--r--collectors/python.d.plugin/unbound/README.md100
-rw-r--r--collectors/python.d.plugin/uwsgi/README.md28
-rw-r--r--collectors/python.d.plugin/varnish/README.md98
-rw-r--r--collectors/python.d.plugin/w1sensor/README.md4
-rw-r--r--collectors/python.d.plugin/web_log/README.md98
-rw-r--r--collectors/statsd.plugin/Makefile.in605
-rw-r--r--collectors/statsd.plugin/README.md203
-rw-r--r--collectors/tc.plugin/Makefile.in612
-rw-r--r--collectors/tc.plugin/README.md59
-rw-r--r--collectors/tc.plugin/tc-qos-helper.sh297
-rw-r--r--collectors/xenstat.plugin/Makefile.in514
-rw-r--r--collectors/xenstat.plugin/README.md20
-rwxr-xr-xcompile347
-rwxr-xr-xconfig.guess1480
-rw-r--r--config.h.in358
-rwxr-xr-xconfig.sub1801
-rwxr-xr-xconfigure12468
-rw-r--r--configure.ac53
-rw-r--r--contrib/README.md49
-rw-r--r--contrib/debian/changelog1
-rw-r--r--contrib/debian/control.trusty56
-rwxr-xr-xcontrib/debian/install_go.sh14
-rwxr-xr-xcontrib/debian/rules6
-rw-r--r--contrib/sles11/README.md15
-rwxr-xr-xcoverity-install.sh36
-rwxr-xr-xcoverity-scan.sh181
-rw-r--r--daemon/Makefile.in614
-rw-r--r--daemon/README.md190
-rw-r--r--daemon/anonymous-statistics.sh89
-rw-r--r--daemon/config/README.md139
-rwxr-xr-xdaemon/system-info.sh7
-rw-r--r--daemon/unit_test.c354
-rw-r--r--database/Makefile.in698
-rw-r--r--database/README.md107
-rw-r--r--database/engine/Makefile.in514
-rw-r--r--database/engine/README.md42
-rw-r--r--database/engine/journalfile.c9
-rw-r--r--database/engine/pagecache.c284
-rw-r--r--database/engine/pagecache.h33
-rw-r--r--database/engine/rrdengine.c3
-rw-r--r--database/engine/rrdengineapi.c298
-rw-r--r--database/engine/rrdengineapi.h11
-rw-r--r--database/engine/rrdenginelib.h2
-rw-r--r--database/rrd.h7
-rw-r--r--database/rrddim.c3
-rw-r--r--database/rrdvar.c28
-rwxr-xr-xdepcomp791
-rw-r--r--diagrams/Makefile.in532
-rw-r--r--diagrams/data_structures/README.md26
-rw-r--r--docs/Add-more-charts-to-netdata.md420
-rw-r--r--docs/Charts.md2
-rw-r--r--docs/Demo-Sites.md28
-rw-r--r--docs/Donations-netdata-has-received.md24
-rw-r--r--docs/GettingStarted.md48
-rw-r--r--docs/Performance.md52
-rw-r--r--docs/README.md4
-rw-r--r--docs/Running-behind-apache.md61
-rw-r--r--docs/Running-behind-caddy.md6
-rw-r--r--docs/Running-behind-haproxy.md6
-rw-r--r--docs/Running-behind-lighttpd.md8
-rw-r--r--docs/Running-behind-nginx.md39
-rw-r--r--docs/Third-Party-Plugins.md2
-rw-r--r--docs/a-github-star-is-important.md2
-rw-r--r--docs/anonymous-statistics.md59
-rw-r--r--docs/configuration-guide.md61
-rw-r--r--docs/contributing/contributing-documentation.md137
-rw-r--r--docs/contributing/style-guide.md317
-rwxr-xr-xdocs/generator/buildyaml.sh21
-rwxr-xr-xdocs/generator/checklinks.sh2
-rw-r--r--docs/generator/custom/css/netdata.css25
-rw-r--r--docs/high-performance-netdata.md41
-rw-r--r--docs/netdata-cloud/README.md44
-rw-r--r--docs/netdata-cloud/nodes-view.md206
-rw-r--r--docs/netdata-cloud/signing-in.md155
-rw-r--r--docs/netdata-for-IoT.md6
-rw-r--r--docs/netdata-security.md60
-rw-r--r--docs/privacy-policy.md41
-rw-r--r--docs/terms-of-use.md8
-rw-r--r--docs/what-is-netdata.md385
-rw-r--r--docs/why-netdata/1s-granularity.md21
-rw-r--r--docs/why-netdata/README.md18
-rw-r--r--docs/why-netdata/immediate-results.md24
-rw-r--r--docs/why-netdata/meaningful-presentation.md26
-rw-r--r--docs/why-netdata/unlimited-metrics.md12
-rw-r--r--health/Makefile.am1
-rw-r--r--health/Makefile.in861
-rw-r--r--health/README.md234
-rw-r--r--health/health.c670
-rw-r--r--health/health.d/vsphere.conf157
-rw-r--r--health/health.h2
-rw-r--r--health/health_json.c37
-rw-r--r--health/notifications/Makefile.in821
-rw-r--r--health/notifications/README.md34
-rw-r--r--health/notifications/alarm-notify.sh2298
-rw-r--r--health/notifications/alerta/README.md14
-rw-r--r--health/notifications/awssns/README.md34
-rw-r--r--health/notifications/custom/README.md76
-rw-r--r--health/notifications/discord/README.md7
-rw-r--r--health/notifications/email/README.md9
-rw-r--r--health/notifications/flock/README.md13
-rw-r--r--health/notifications/irc/README.md20
-rw-r--r--health/notifications/kavenegar/README.md12
-rw-r--r--health/notifications/messagebird/README.md17
-rw-r--r--health/notifications/pagerduty/README.md10
-rw-r--r--health/notifications/prowl/Makefile.inc12
-rw-r--r--health/notifications/prowl/README.md22
-rw-r--r--health/notifications/pushbullet/README.md15
-rw-r--r--health/notifications/pushover/README.md8
-rw-r--r--health/notifications/rocketchat/README.md13
-rw-r--r--health/notifications/slack/README.md18
-rw-r--r--health/notifications/smstools3/README.md8
-rw-r--r--health/notifications/syslog/README.md12
-rw-r--r--health/notifications/telegram/README.md10
-rw-r--r--health/notifications/twilio/README.md14
-rw-r--r--health/notifications/web/README.md4
-rwxr-xr-xinstall-sh508
-rw-r--r--libnetdata/Makefile.in716
-rw-r--r--libnetdata/README.md7
-rw-r--r--libnetdata/adaptive_resortable_list/Makefile.in514
-rw-r--r--libnetdata/adaptive_resortable_list/README.md47
-rw-r--r--libnetdata/avl/Makefile.in514
-rw-r--r--libnetdata/avl/README.md2
-rw-r--r--libnetdata/buffer/Makefile.in514
-rw-r--r--libnetdata/buffer/README.md2
-rw-r--r--libnetdata/clocks/Makefile.in514
-rw-r--r--libnetdata/clocks/README.md3
-rw-r--r--libnetdata/clocks/clocks.c65
-rw-r--r--libnetdata/clocks/clocks.h2
-rw-r--r--libnetdata/config/Makefile.in514
-rw-r--r--libnetdata/config/README.md19
-rw-r--r--libnetdata/dictionary/Makefile.in514
-rw-r--r--libnetdata/dictionary/README.md3
-rw-r--r--libnetdata/eval/Makefile.in514
-rw-r--r--libnetdata/eval/README.md3
-rw-r--r--libnetdata/health/Makefile.in513
-rw-r--r--libnetdata/json/Makefile.in514
-rw-r--r--libnetdata/json/README.md4
-rw-r--r--libnetdata/locks/Makefile.in514
-rw-r--r--libnetdata/locks/README.md3
-rw-r--r--libnetdata/log/Makefile.in514
-rw-r--r--libnetdata/log/README.md3
-rw-r--r--libnetdata/popen/Makefile.in514
-rw-r--r--libnetdata/popen/README.md3
-rw-r--r--libnetdata/procfile/Makefile.in514
-rw-r--r--libnetdata/procfile/README.md49
-rw-r--r--libnetdata/simple_pattern/Makefile.in514
-rw-r--r--libnetdata/simple_pattern/README.md11
-rw-r--r--libnetdata/socket/Makefile.in514
-rw-r--r--libnetdata/socket/README.md3
-rw-r--r--libnetdata/statistical/Makefile.in514
-rw-r--r--libnetdata/statistical/README.md3
-rw-r--r--libnetdata/storage_number/Makefile.in514
-rw-r--r--libnetdata/storage_number/README.md4
-rw-r--r--libnetdata/threads/Makefile.in514
-rw-r--r--libnetdata/threads/README.md3
-rw-r--r--libnetdata/url/Makefile.in514
-rw-r--r--libnetdata/url/README.md3
-rwxr-xr-xmissing215
-rwxr-xr-xnetdata-installer.sh13
-rw-r--r--netdata.spec511
-rw-r--r--netdata.spec.in15
-rw-r--r--package.json2
-rw-r--r--packaging/DISTRIBUTIONS.md37
-rw-r--r--packaging/docker/Dockerfile92
-rw-r--r--packaging/docker/README.md229
-rwxr-xr-xpackaging/docker/build-test.sh74
-rwxr-xr-xpackaging/docker/build.sh72
-rwxr-xr-xpackaging/docker/check_login.sh41
-rwxr-xr-xpackaging/docker/publish.sh117
-rwxr-xr-xpackaging/docker/run.sh25
-rw-r--r--packaging/go.d.checksums32
-rw-r--r--packaging/go.d.version1
-rw-r--r--packaging/installer/README.md241
-rw-r--r--packaging/installer/UNINSTALL.md23
-rw-r--r--packaging/installer/UPDATE.md24
-rw-r--r--packaging/installer/functions.sh9
-rwxr-xr-xpackaging/installer/kickstart-static64.sh8
-rwxr-xr-xpackaging/installer/kickstart.sh6
-rw-r--r--packaging/maintainers/README.md75
-rw-r--r--packaging/makeself/README.md48
-rwxr-xr-xpackaging/makeself/build-x86_64-static.sh42
-rwxr-xr-xpackaging/makeself/build.sh61
-rwxr-xr-xpackaging/makeself/functions.sh62
-rwxr-xr-xpackaging/makeself/install-alpine-packages.sh48
-rwxr-xr-xpackaging/makeself/install-or-update.sh234
-rwxr-xr-xpackaging/makeself/jobs/10-prepare-destination.install.sh16
-rwxr-xr-xpackaging/makeself/jobs/50-bash-4.4.18.install.sh54
-rwxr-xr-xpackaging/makeself/jobs/50-curl-7.60.0.install.sh34
-rwxr-xr-xpackaging/makeself/jobs/50-fping-4.2.install.sh29
-rwxr-xr-xpackaging/makeself/jobs/50-ioping-1.1.install.sh18
-rwxr-xr-xpackaging/makeself/jobs/70-netdata-git.install.sh30
-rwxr-xr-xpackaging/makeself/jobs/99-makeself.install.sh99
-rwxr-xr-xpackaging/makeself/makeself-header.sh554
-rw-r--r--packaging/makeself/makeself-help-header.txt44
-rw-r--r--packaging/makeself/makeself-license.txt44
-rw-r--r--packaging/makeself/makeself.lsm16
-rwxr-xr-xpackaging/makeself/makeself.sh621
-rwxr-xr-xpackaging/makeself/post-installer.sh11
-rwxr-xr-xpackaging/makeself/run-all-jobs.sh42
-rwxr-xr-xpackaging/manual_nightly_deployment.sh127
-rw-r--r--packaging/version2
-rw-r--r--registry/Makefile.in514
-rw-r--r--registry/README.md88
-rw-r--r--streaming/Makefile.in571
-rw-r--r--streaming/README.md207
-rw-r--r--system/Makefile.in638
-rw-r--r--system/edit-config101
-rw-r--r--tests/Makefile.am3
-rw-r--r--tests/Makefile.in631
-rw-r--r--tests/README.md56
-rw-r--r--tests/acls/acl.sh119
-rw-r--r--tests/acls/netdata.cfg20
-rw-r--r--tests/acls/netdata.ssl.cfg24
-rw-r--r--tests/backends/prometheus-avg-oldunits.txt148
-rw-r--r--tests/backends/prometheus-avg.txt148
-rw-r--r--tests/backends/prometheus-raw.txt156
-rwxr-xr-xtests/backends/prometheus.bats31
-rw-r--r--tests/health_mgmtapi/README.md12
-rw-r--r--tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/HOSTS-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/RESET-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_2-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_3-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json1
-rw-r--r--tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json1
-rw-r--r--tests/health_mgmtapi/health-cmdapi-test.sh226
-rwxr-xr-xtests/installer/checksums.sh51
-rwxr-xr-xtests/installer/slack.sh65
-rw-r--r--tests/k6/data.js67
-rwxr-xr-xtests/lifecycle.bats61
-rw-r--r--tests/profile/Makefile53
-rw-r--r--tests/profile/benchmark-dictionary.c130
-rw-r--r--tests/profile/benchmark-line-parsing.c707
-rw-r--r--tests/profile/benchmark-procfile-parser.c329
-rw-r--r--tests/profile/benchmark-registry.c227
-rw-r--r--tests/profile/benchmark-value-pairs.c623
-rw-r--r--tests/profile/statsd-stress.c151
-rw-r--r--tests/profile/test-eval.c299
-rwxr-xr-xtests/updater_checks.bats70
-rwxr-xr-xtests/updater_checks.sh78
-rw-r--r--tests/urls/request.sh301
-rw-r--r--tests/urls/request.sh.in10
-rw-r--r--web/Makefile.in702
-rw-r--r--web/README.md10
-rw-r--r--web/api/Makefile.in760
-rw-r--r--web/api/README.md6
-rw-r--r--web/api/badges/Makefile.in514
-rw-r--r--web/api/badges/README.md189
-rw-r--r--web/api/exporters/Makefile.in699
-rw-r--r--web/api/exporters/README.md2
-rw-r--r--web/api/exporters/prometheus/Makefile.in514
-rw-r--r--web/api/exporters/prometheus/README.md4
-rw-r--r--web/api/exporters/shell/Makefile.in514
-rw-r--r--web/api/exporters/shell/README.md10
-rw-r--r--web/api/formatters/Makefile.in701
-rw-r--r--web/api/formatters/README.md35
-rw-r--r--web/api/formatters/csv/Makefile.in514
-rw-r--r--web/api/formatters/csv/README.md88
-rw-r--r--web/api/formatters/json/Makefile.in514
-rw-r--r--web/api/formatters/json/README.md37
-rw-r--r--web/api/formatters/json_wrapper.c13
-rw-r--r--web/api/formatters/rrdset2json.c5
-rw-r--r--web/api/formatters/ssv/Makefile.in514
-rw-r--r--web/api/formatters/ssv/README.md26
-rw-r--r--web/api/formatters/value/Makefile.in514
-rw-r--r--web/api/formatters/value/README.md12
-rw-r--r--web/api/health/Makefile.in514
-rw-r--r--web/api/health/README.md66
-rw-r--r--web/api/netdata-swagger.json96
-rw-r--r--web/api/netdata-swagger.yaml61
-rw-r--r--web/api/queries/Makefile.in706
-rw-r--r--web/api/queries/README.md137
-rw-r--r--web/api/queries/average/Makefile.in514
-rw-r--r--web/api/queries/average/README.md10
-rw-r--r--web/api/queries/average/average.c9
-rw-r--r--web/api/queries/des/Makefile.in514
-rw-r--r--web/api/queries/des/README.md14
-rw-r--r--web/api/queries/incremental_sum/Makefile.in514
-rw-r--r--web/api/queries/incremental_sum/README.md12
-rw-r--r--web/api/queries/max/Makefile.in514
-rw-r--r--web/api/queries/max/README.md10
-rw-r--r--web/api/queries/median/Makefile.in514
-rw-r--r--web/api/queries/median/README.md12
-rw-r--r--web/api/queries/min/Makefile.in514
-rw-r--r--web/api/queries/min/README.md10
-rw-r--r--web/api/queries/query.c876
-rw-r--r--web/api/queries/rrdr.h12
-rw-r--r--web/api/queries/ses/Makefile.in514
-rw-r--r--web/api/queries/ses/README.md14
-rw-r--r--web/api/queries/stddev/Makefile.in514
-rw-r--r--web/api/queries/stddev/README.md23
-rw-r--r--web/api/queries/sum/Makefile.in514
-rw-r--r--web/api/queries/sum/README.md12
-rw-r--r--web/api/web_api_v1.c46
-rw-r--r--web/api/web_api_v1.h1
-rw-r--r--web/gui/Makefile.in914
-rw-r--r--web/gui/README.md32
-rw-r--r--web/gui/browserconfig.xml2
-rw-r--r--web/gui/confluence/README.md50
-rw-r--r--web/gui/custom/README.md93
-rw-r--r--web/gui/dashboard.css18
-rw-r--r--web/gui/dashboard.js37
-rw-r--r--web/gui/dashboard.slate.css18
-rw-r--r--web/gui/dashboard_info.js143
-rw-r--r--web/gui/images/favicon-128.pngbin2436 -> 0 bytes
-rw-r--r--web/gui/images/favicon-196x196.pngbin10025 -> 0 bytes
-rw-r--r--web/gui/images/ms-icon-310x150.pngbin3632 -> 0 bytes
-rw-r--r--web/gui/images/ms-icon-36x36.pngbin536 -> 0 bytes
-rw-r--r--web/gui/images/packaging-beta-tag.svg42
-rw-r--r--web/gui/images/seo-performance-128.pngbin1828 -> 0 bytes
-rw-r--r--web/gui/main.js14
-rw-r--r--web/gui/manifest.json41
-rw-r--r--web/gui/src/dashboard.js/charting/_c3.js114
-rw-r--r--web/gui/src/dashboard.js/charting/_morris.js81
-rw-r--r--web/gui/src/dashboard.js/charting/_raphael.js48
-rw-r--r--web/gui/src/dashboard.js/charting/dygraph.js37
-rw-r--r--web/gui/version.txt1
-rw-r--r--web/server/Makefile.in698
-rw-r--r--web/server/README.md134
-rw-r--r--web/server/static/Makefile.in697
-rw-r--r--web/server/static/README.md2
-rw-r--r--web/server/web_client.c10
549 files changed, 91898 insertions, 19018 deletions
diff --git a/.codacy.yml b/.codacy.yml
deleted file mode 100644
index fb16c241..00000000
--- a/.codacy.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-exclude_paths:
- - collectors/python.d.plugin/python_modules/pyyaml2/**
- - collectors/python.d.plugin/python_modules/pyyaml3/**
- - collectors/python.d.plugin/python_modules/urllib3/**
- - collectors/python.d.plugin/python_modules/third_party/**
- - collectors/node.d.plugin/node_modules/**
- - contrib/**
- - packaging/makeself/**
- - web/gui/css/**
- - web/gui/lib/**
- - web/gui/old/**
- - web/gui/src/**
- - web/gui/main.js
- - tests/**
diff --git a/.codeclimate.yml b/.codeclimate.yml
deleted file mode 100644
index 8a11c84a..00000000
--- a/.codeclimate.yml
+++ /dev/null
@@ -1,99 +0,0 @@
-version: "2"
-checks:
- argument-count:
- enabled: false
- config:
- threshold: 10
- complex-logic:
- enabled: false
- config:
- threshold: 10
- file-lines:
- enabled: false
- config:
- threshold: 5000
- method-complexity:
- enabled: false
- config:
- threshold: 20
- method-count:
- enabled: false
- config:
- threshold: 50
- method-lines:
- enabled: false
- config:
- threshold: 250
- nested-control-flow:
- enabled: false
- config:
- threshold: 4
- return-statements:
- enabled: false
- config:
- threshold: 4
- similar-code:
- enabled: false
- identical-code:
- enabled: false
-plugins:
- csslint:
- enabled: true
- duplication:
- enabled: false
- config:
- languages:
- - javascript:
- mass_threshold: 100
- - python:
- python_version: 3
- mass_threshold: 100
- checks:
- Similar code:
- enabled: false
- Identical code:
- enabled: false
- eslint:
- enabled: true
- checks:
- max-statements:
- enabled: false
- complexity:
- enabled: false
- no-eval:
- enabled: false
- no-extend-native:
- enabled: false
- no-void:
- enabled: false
- no-alert:
- enabled: false
- no-undef-init:
- enabled: false
- fixme:
- enabled: false
- phpmd:
- enabled: true
- radon:
- enabled: true
- checks:
- Complexity:
- enabled: false
-exclude_patterns:
- - ".gitignore"
- - ".githooks/"
- - "tests/"
- - "m4/"
- - "web/css/"
- - "web/lib/"
- - "web/fonts/"
- - "web/old/"
- - "collectors/python.d.plugin/python_modules/pyyaml2/"
- - "collectors/python.d.plugin/python_modules/pyyaml3/"
- - "collectors/python.d.plugin/python_modules/urllib3/"
- - "collectors/node.d.plugin/node_modules/lib/"
- - "collectors/node.d.plugin/node_modules/asn1-ber.js"
- - "collectors/node.d.plugin/node_modules/extend.js"
- - "collectors/node.d.plugin/node_modules/pixl-xml.js"
- - "collectors/node.d.plugin/node_modules/net-snmp.js"
-
diff --git a/.gitattributes b/.gitattributes
deleted file mode 100644
index 45ec5156..00000000
--- a/.gitattributes
+++ /dev/null
@@ -1,2 +0,0 @@
-*.c diff=cpp
-*.h diff=cpp
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 2b4e0d16..35070e49 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -2,60 +2,60 @@
# This way we prevent modifications which will be overwriten by automation.
# Global (default) code owner
-* @ktsaou @cakrit
+* @ktsaou @cakrit @cosmix
# Ownership by directory structure
-.travis/ @paulkatsoulakis @cakrit
-.github/ @paulkatsoulakis @cakrit
+.travis/ @cakrit @cosmix
+.github/ @cakrit @cosmix
backends/ @thiagoftsm @vlvkobal
backends/graphite/ @thiagoftsm @vlvkobal
backends/json/ @thiagoftsm @vlvkobal
backends/opentsdb/ @thiagoftsm @vlvkobal
-backends/prometheus/ @vlvkobal @paulkatsoulakis @thiagoftsm
-build/ @paulkatsoulakis @cakrit
-collectors/ @vlvkobal @mfundul @cakrit
-collectors/charts.d.plugin/ @paulkatsoulakis @cakrit
-collectors/freebsd.plugin/ @vlvkobal @thiagoftsm @cakrit
-collectors/macos.plugin/ @vlvkobal @thiagoftsm @cakrit
-collectors/node.d.plugin/ @VLegakis @cakrit
-collectors/node.d.plugin/fronius/ @ccremer @cakrit
-collectors/node.d.plugin/snmp/ @VLegakis @cakrit
-collectors/node.d.plugin/stiebeleltron/ @ccremer @cakrit
+backends/prometheus/ @vlvkobal @thiagoftsm
+build/ @cakrit @cosmix
+collectors/ @vlvkobal @mfundul @cakrit @cosmix
+collectors/charts.d.plugin/ @cakrit @cosmix
+collectors/freebsd.plugin/ @vlvkobal @thiagoftsm @cakrit @cosmix
+collectors/macos.plugin/ @vlvkobal @thiagoftsm @cakrit @cosmix
+collectors/node.d.plugin/ @VLegakis @cakrit @cosmix
+collectors/node.d.plugin/fronius/ @ccremer @cakrit @cosmix
+collectors/node.d.plugin/snmp/ @VLegakis @cakrit @cosmix
+collectors/node.d.plugin/stiebeleltron/ @ccremer @cakrit @cosmix
collectors/python.d.plugin/ @ilyam8
-collectors/cups.plugin/ @simonnagl @vlvkobal @thiagoftsm @cakrit
-daemon/ @thiagoftsm @mfundul @cakrit
-database/ @mfundul @thiagoftsm @cakrit
-docs/ @cakrit @joelhans
-health/ @thiagoftsm @vlvkobal @cakrit
-health/health.d/ @thiagoftsm @vlvkobal @cakrit
-health/notifications/ @Ferroin @thiagoftsm @cakrit
-libnetdata/ @thiagofsm @mfundul @cakrit
-packaging/ @paulkatsoulakis @cakrit
-packaging/installer/ @paulkatsoulakis @cakrit
-packaging/makeself/ @paulkatsoulakis @cakrit
-registry/ @VLegakis @cakrit
-streaming/ @thiagoftsm @vlvkobal @cakrit
-web/ @thiagoftsm @mfundul @vlvkobal @cakrit
-web/gui/ @VLegakis @cakrit
+collectors/cups.plugin/ @simonnagl @vlvkobal @thiagoftsm @cakrit @cosmix
+daemon/ @thiagoftsm @mfundul @cakrit @cosmix
+database/ @mfundul @thiagoftsm @cakrit @cosmix
+docs/ @cakrit @cosmix @joelhans
+health/ @thiagoftsm @vlvkobal @cakrit @cosmix
+health/health.d/ @thiagoftsm @vlvkobal @cakrit @cosmix
+health/notifications/ @Ferroin @thiagoftsm @cakrit @cosmix
+libnetdata/ @thiagofsm @mfundul @cakrit @cosmix
+packaging/ @cakrit @cosmix
+packaging/installer/ @cakrit @cosmix
+packaging/makeself/ @cakrit @cosmix
+registry/ @VLegakis @cakrit @cosmix
+streaming/ @thiagoftsm @vlvkobal @cakrit @cosmix
+web/ @thiagoftsm @mfundul @vlvkobal @cakrit @cosmix
+web/gui/ @VLegakis @cakrit @cosmix
# Ownership by filetype (overwrites ownership by directory)
-*.md @cakrit @joelhans
-*.am @paulkatsoulakis
+*.md @cakrit @cosmix @joelhans
+*.am @cakrit @cosmix
# Ownership of specific files
-.gitignore @paulkatsoulakis @cakrit
-.travis.yml @paulkatsoulakis
-.lgtm.yml @paulkatsoulakis
-.eslintrc @paulkatsoulakis
-.eslintignore @paulkatsoulakis
-.csslintrc @paulkatsoulakis
-.codeclimate.yml @paulkatsoulakis
-.codacy.yml @paulkatsoulakis
-netdata.spec.in @paulkatsoulakis
-netdata-installer.sh @paulkatsoulakis @cakrit
-netlify.toml @cakrit
+.gitignore @cakrit @cosmix
+.travis.yml @cakrit @cosmix
+.lgtm.yml @cakrit @cosmix
+.eslintrc @cakrit @cosmix
+.eslintignore @cakrit @cosmix
+.csslintrc @cakrit @cosmix
+.codeclimate.yml @cakrit @cosmix
+.codacy.yml @cakrit @cosmix
+netdata.spec.in @cakrit @cosmix
+netdata-installer.sh @cakrit @cosmix
+netlify.toml @cakrit @cosmix
package.json @VLegakis
packaging/version @netdatabot
-LICENSE.md @cakrit @joelhans
+LICENSE.md @cakrit @cosmix @joelhans
CHANGELOG.md @netdatabot
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index bd939bab..00000000
--- a/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-about: General issue template
-labels: "needs triage", "no changelog"
----
-
-<!---
-This is a generic issue template. We usually prefer contributors to use one
-of 3 other specific issue templates (bug report, feature request, question)
-to allow our automation classify those so you can get response faster.
-However if your issue doesn't fall into either one of those 3 categories
-use this generic template.
---->
-
-#### Summary
-
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
deleted file mode 100644
index d378d451..00000000
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ /dev/null
@@ -1,34 +0,0 @@
----
-name: Bug report
-about: Create a bug report to help us improve
-labels: bug, needs triage
----
-
-<!---
-When creating a bug report please:
-- Verify first that your issue is not already reported on GitHub
-- Test if the latest release and master branch are affected too.
-- Provide a clear and concise description of what the bug is in "Bug report
- summary" section.
-- Try to provide as much information about your environment (OS distribution,
- running in container, etc.) as possible to allow us reproduce this bug faster.
-- Write which component is affected. We group our components the same way our
- code is structured so basically:
- component name = dir in top level directory of repository
-- Describe how you found this bug and how we can reproduce it. Preferable with
- a minimal test-case scenario. You can paste gist.github.com links for larger
- files
-- Provide a clear and concise description of what you expected to happen.
--->
-
-##### Bug report summary
-
-##### OS / Environment
-
-##### Netdata version (ouput of `netdata -V`)
-
-##### Component Name
-
-##### Steps To Reproduce
-
-##### Expected behavior
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index 4d210259..00000000
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-name: Feature request
-about: Suggest an idea for our project
-labels: feature request, needs triage
----
-
-<!---
-When creating a feature request please:
-- Verify first that your issue is not already reported on GitHub
-- Explain new feature briefly in "Feature idea summary" section
-- Provide a clear and concise description of what you expect to happen.
---->
-
-##### Feature idea summary
-
-##### Expected behavior
diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md
deleted file mode 100644
index c5cd71e6..00000000
--- a/.github/ISSUE_TEMPLATE/question.md
+++ /dev/null
@@ -1,26 +0,0 @@
----
-name: Question
-about: You just want to ask a question? Go on.
-labels: question, no changelog
----
-
-<!---
-When asking a new question please:
-- Verify first that your question wasn't asked before on GitHub.
- HINT: Use label "question" when searching for such issues.
-- Briefly explain what is the problem you are having
-- Try to provide as much information about your environment (OS distribution,
- running in container, etc.) as possible to allow us reproduce this bug faster.
-- Write which component is affected. We group our components the same way our
- code is structured so basically:
- component name = dir in top level directory of repository
-- Provide a clear and concise description of what you expected to happen.
--->
-
-##### Question summary
-
-##### OS / Environment
-
-##### Component Name
-
-##### Expected results
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index b4932f9c..00000000
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,19 +0,0 @@
-<!--
-Describe the change in summary section, including rationale and degin decisions.
-Include "Fixes #nnn" if you are fixing an existing issue.
-
-In "Component Name" section write which component is changed in this PR. This
-will help us review your PR quicker.
-
-If you have more information you want to add, write them in "Additional
-Information" section. This is usually used to help others understand your
-motivation behind this change. A step-by-step reproduction of the problem is
-helpful if there is no related issue.
--->
-
-##### Summary
-
-##### Component Name
-
-##### Additional Information
-
diff --git a/.github/stale.yml b/.github/stale.yml
deleted file mode 100644
index abf927a4..00000000
--- a/.github/stale.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-only: issues
-limitPerRun: 30
-daysUntilStale: 30
-daysUntilClose: 7
-exemptLabels:
- - bug
- - help wanted
- - feature request
-exemptProjects: true
-exemptMilestones: true
-staleLabel: stale
-markComment: >
- This issue has been inactive for 30 days.
- It will be closed in one week, unless it is updated.
-closeComment: >
- This issue has been automatically closed due to extended period of inactivity.
- Please reopen if it is still valid. Thank you for your contributions.
diff --git a/.gitignore b/.gitignore
index 82000619..52a108f7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -77,8 +77,7 @@ packaging/makeself/tmp/
# coverity
cov-int/
netdata-coverity-analysis.tgz
-.coverity-token
-.coverity-build
+.coverity-scan.conf
.cproject/
.idea/
@@ -87,7 +86,6 @@ netdata-coverity-analysis.tgz
.settings/
README
TODO.md
-netdata.conf
TODO.txt
web/gui/chart-info/
diff --git a/.lgtm.yml b/.lgtm.yml
deleted file mode 100644
index 38f6675c..00000000
--- a/.lgtm.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-# LGTM does a good job at classifying files, but sometimes it needs some help.
-# To classify files which shouldn't be checked we need to define where such
-# files are located and manually assign them one of possible categories:
-# docs, generated, library, template, test
-# More information can be found in lgtm documentation:
-# https://help.semmle.com/lgtm-enterprise/user/help/file-classification.html#built-in-tags
-# https://lgtm.com/help/lgtm/lgtm.yml-configuration-file
-path_classifiers:
- library:
- - collectors/python.d.plugin/python_modules/third_party/
- - collectors/python.d.plugin/python_modules/urllib3/
- - collectors/python.d.plugin/python_modules/pyyaml2/
- - collectors/python.d.plugin/python_modules/pyyaml3/
- - collectors/node.d.plugin/node_modules/lib/
- - collectors/node.d.plugin/node_modules/asn1-ber.js
- - collectors/node.d.plugin/node_modules/extend.js
- - collectors/node.d.plugin/node_modules/net-snmp.js
- - collectors/node.d.plugin/node_modules/pixl-xml.js
- - web/gui/lib/
- - web/gui/src/
- - web/gui/css/
- test:
- - tests/
diff --git a/.remarkrc.js b/.remarkrc.js
deleted file mode 100644
index a5d9d128..00000000
--- a/.remarkrc.js
+++ /dev/null
@@ -1,121 +0,0 @@
-// Source: https://github.com/codacy/codacy-remark-lint/raw/master/.remarkrc.js
-
-const fs = require("fs");
-const path = require("path");
-
-exports.settings = {
- gfm: true,
- commonmark: true,
- looseTable: false,
- spacedTable: false,
- paddedTable: false,
- fences: true,
- rule: '-',
- ruleRepetition: 3,
- emphasis: "*",
- strong: "*",
- bullet: "-",
- listItemIndent: 'tab',
- incrementListMarker: true
-};
-
-const personalDictionaryPath = path.join(__dirname, ".dictionary");
-const personalDictionary = fs.existsSync(personalDictionaryPath)
- ? {
- personal: fs.readFileSync(personalDictionaryPath, "utf8")
- }
- : {};
-
-const remarkPresetLintMarkdownStyleGuide = {
- plugins: require("remark-preset-lint-markdown-style-guide").plugins.filter(
- function(elem) {
- return elem != require("remark-lint-no-duplicate-headings");
- }
- )
-};
-
-exports.plugins = [
- require("remark-preset-lint-consistent"),
- require("remark-preset-lint-recommended"),
- remarkPresetLintMarkdownStyleGuide,
- [require("remark-lint-no-dead-urls"), { skipOffline: true }],
- require("remark-lint-heading-whitespace"),
- [require("remark-lint-maximum-line-length"), 120],
- [require("remark-lint-maximum-heading-length"), 120],
- [require("remark-lint-list-item-indent"), "tab-size"],
- [require("remark-lint-list-item-spacing"), false],
- [require("remark-lint-strong-marker"), "*"],
- [require("remark-lint-emphasis-marker"), "_"],
- [require("remark-lint-unordered-list-marker-style"), "-"],
- [require("remark-lint-ordered-list-marker-style"), "."],
- [require("remark-lint-ordered-list-marker-value"), "ordered"],
- /*[
- require("remark-lint-write-good"),
- [
- "warn",
- {
- passive: false,
- illusion: true,
- so: true,
- thereIs: true,
- weasel: true,
- adverb: true,
- tooWordy: true,
- cliches: true,
- eprime: false
- }
- ]
- ],*/
- require("remark-validate-links"),
- require("remark-frontmatter"),
- /*[
- require("remark-retext"),
- require("unified")().use({
- plugins: [
- require("retext-english"),
- require("retext-syntax-urls"),
- [
- require("retext-spell"),
- {
- ignoreLiteral: true,
- dictionary: require("dictionary-en-us"),
- ...personalDictionary
- }
- ],
- [
- require("retext-sentence-spacing"),
- {
- preferred: 1
- }
- ],
- require("retext-repeated-words"),
- require("retext-usage"),
- require("retext-indefinite-article"),
- require("retext-redundant-acronyms"),
- [
- require("retext-contractions"),
- {
- straight: true,
- allowLiteral: true
- }
- ],
- require("retext-diacritics"),
- [
- require("retext-quotes"),
- {
- preferred: "straight"
- }
- ],
- require("retext-equality"),
- require("retext-passive"),
- require("retext-profanities"),
- [
- require("retext-readability"),
- {
- age: 20
- }
- ]
- ]
- })
- ]*/
-];
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index a239a0c6..00000000
--- a/.travis.yml
+++ /dev/null
@@ -1,638 +0,0 @@
-dist: xenial
-sudo: true
-language: c
-services:
- - docker
-
-
-
-# This is a hook to help us introduce "soft" errors on our process
-matrix:
- allow_failures:
- - env: ALLOW_SOFT_FAILURE_HERE=true
-
-
-
-# Install dependencies for all, once
-#
-install:
- - sudo apt-get install -y libcap2-bin zlib1g-dev uuid-dev fakeroot libipmimonitoring-dev libmnl-dev libnetfilter-acct-dev gnupg python-pip
- - sudo apt install -y --only-upgrade docker-ce
- - sudo pip install git-semver
- - docker info
- - source tests/installer/slack.sh
- - export NOTIF_CHANNEL="automation-beta"
- - if [ "${TRAVIS_REPO_SLUG}" = "netdata/netdata" ]; then export NOTIF_CHANNEL="automation"; fi;
- - export BUILD_VERSION="$(cat packaging/version | cut -d'-' -f1)"
- - export LATEST_RELEASE_VERSION="$(cat packaging/version | cut -d'-' -f1)"
- - export LATEST_RELEASE_DATE="$(git log -1 --format=%aD "${LATEST_RELEASE_VERSION}" | cat)"
- - if [[ "${TRAVIS_COMMIT_MESSAGE}" = *"[Build latest]"* ]]; then export BUILD_VERSION="$(cat packaging/version | cut -d'-' -f1,2 | sed -e 's/-/./g').latest"; fi;
- - export DEPLOY_REPO="netdata" # Default production packaging repository
- - if [[ "${TRAVIS_COMMIT_MESSAGE}" = *"[Build latest]"* ]]; then export DEPLOY_REPO="netdata-edge"; fi;
- - export PACKAGING_USER="$(echo ${TRAVIS_REPO_SLUG} | cut -d'/' -f1)"
-
-
-
-# Setup notification system
-#
-notifications:
- webhooks: https://app.fossa.io/hooks/travisci
-
-
-
-# Define the stage sequence and conditionals
-#
-stages:
- # Mandatory runs, we always want these executed
- - name: Code quality, linting, syntax, code style
- - name: Build process
- - name: Artifacts validation
- - name: Artifacts validation on bare OS, stable to current lifecycle checks
- if: branch = master AND (type = pull_request OR type = cron)
-
- # Nightly operations
- - name: Nightly operations
- if: branch = master AND type = cron
- - name: Nightly release
- if: branch = master AND type = cron
-
- # Scheduled releases
- - name: Packaging for release
- if: branch = master AND type != pull_request AND type != cron
-
- - name: Publish for release
- if: branch = master AND type != pull_request AND type != cron AND commit_message =~ /(\[netdata release candidate\]|\[netdata major release\]|\[netdata minor release\]|\[netdata patch release\])/
-
- # Build DEB packages under special conditions
- # Ubuntu
- - name: "Package ubuntu/disco"
- if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package amd64 DEB Ubuntu\]|\[Package amd64 DEB\])/
- - name: "Package ubuntu/cosmic"
- if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package amd64 DEB Ubuntu\]|\[Package amd64 DEB\])/
- - name: "Package ubuntu/bionic"
- if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Ubuntu\]|\[Package arm64 DEB\]|\[Package i386 DEB Ubuntu\]|\[Package i386 DEB\]|\[Package amd64 DEB Ubuntu\]|\[Package amd64 DEB\])/
-
- # Debian
- - name: "Package debian/buster"
- if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package amd64 DEB Debian\]|\[Package amd64 DEB\])/
- - name: "Package debian/stretch"
- if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package amd64 DEB Debian\]|\[Package amd64 DEB\])/
- - name: "Package debian/jessie"
- if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 DEB Debian\]|\[Package arm64 DEB\]|\[Package i386 DEB Debian\]|\[Package i386 DEB\]|\[Package amd64 DEB Debian\]|\[Package amd64 DEB\])/
-
- # Build RPM packages under special conditions
- # Enterprise linux (Covers CentOS, Redhat, Amazon linux)
- - name: "Package Enterprise Linux 7"
- if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM Enterprise Linux\]|\[Package arm64 RPM\]|\[Package i386 RPM Enterprise Linux\]|\[Package i386 RPM\]|\[Package amd64 RPM Enterprise Linux\]|\[Package amd64 RPM\])/
- - name: "Package Enterprise linux 6"
- if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package i386 RPM Enterprise Linux\]|\[Package i386 RPM\]|\[Package amd64 RPM Enterprise Linux\]|\[Package amd64 RPM\])/
-
- # Fedora
- - name: "Package Fedora 30"
- if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package amd64 RPM Fedora\]|\[Package amd64 RPM\])/
- - name: "Package Fedora 29"
- if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package amd64 RPM Fedora\]|\[Package amd64 RPM\])/
- - name: "Package Fedora 28"
- if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM Fedora\]|\[Package arm64 RPM\]|\[Package amd64 RPM Fedora\]|\[Package amd64 RPM\])/
-
- # OpenSuSE
- - name: "Package OpenSuSE 15.1"
- if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM openSuSE\]|\[Package arm64 RPM\]|\[Package amd64 RPM openSuSE\]|\[Package amd64 RPM\])/
- - name: "Package OpenSuSE 15.0"
- if: type != cron AND type != pull_request AND branch = master AND commit_message =~ /(\[Package arm64 RPM openSuSE\]|\[Package arm64 RPM\]|\[Package amd64 RPM openSuSE\]|\[Package amd64 RPM\])/
-
-
-
- # DEB and RPM template flows
- - stage: &_RPM_TEMPLATE
- name: "Build & Publish RPM package"
- before_install:
- - sudo apt-get install -y wget lxc lxc-templates
- - source tests/installer/slack.sh
- before_script:
- - post_message "TRAVIS_MESSAGE" "Starting package preparation and publishing for ${BUILD_STRING}.${BUILD_ARCH}" "${NOTIF_CHANNEL}"
- - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}"
- script:
- - echo "GIT Branch:" && git branch
- - echo "Last commit:" && git log -1
- - echo "GIT Describe:" && git describe
- - echo "packaging/version:" && cat packaging/version
- - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh
- - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh
- - sudo chmod -R 755 "/var/lib/lxc"
- - echo "Preparing RPM packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh
- git:
- depth: false
- after_failure: post_message "TRAVIS_MESSAGE" "Failed to build RPM for ${BUILD_STRING}.${BUILD_ARCH}"
- before_deploy:
- - .travis/package_management/yank_stale_rpm.sh "${PACKAGES_DIRECTORY}" "${BUILD_STRING}" || echo "No stale RPM found"
- deploy:
- - provider: packagecloud
- repository: "${DEPLOY_REPO}"
- username: "${PACKAGING_USER}"
- token: "${PKG_CLOUD_TOKEN}"
- dist: "${BUILD_STRING}"
- local_dir: "${PACKAGES_DIRECTORY}"
- skip_cleanup: true
- on:
- # Only deploy on ${USER}/netdata, master branch, when packages directory is created
- repo: ${TRAVIS_REPO_SLUG}
- branch: "master"
- condition: -d "${PACKAGES_DIRECTORY}"
- after_deploy:
- - if [ -n "${BUILDER_NAME}" ]; then rm -rf /home/${BUILDER_NAME}/* && echo "Cleared /home/${BUILDER_NAME} directory" || echo "Failed to clean /home/${BUILDER_NAME} directory"; fi;
- - if [ -d "${PACKAGES_DIRECTORY}" ]; then rm -rf "${PACKAGES_DIRECTORY}"; fi;
-
-
-
- # TODO: This section is stale, will be aligned with the RPM implementation when we get to DEB packaging
- - stage: &_DEB_TEMPLATE
- name: "Build & Publish DEB package"
- before_install:
- - sudo apt-get install -y wget lxc lxc-templates dh-make git-buildpackage build-essential libdistro-info-perl
- - source tests/installer/slack.sh
- before_script:
- - post_message "TRAVIS_MESSAGE" "Starting package preparation and publishing for ${BUILD_STRING}.${BUILD_ARCH}" "${NOTIF_CHANNEL}"
- - export PACKAGES_DIRECTORY="$(mktemp -d -t netdata-packaging-contents-dir-XXXXXX)" && echo "Created packaging directory ${PACKAGES_DIRECTORY}"
- script:
- - echo "GIT Branch:" && git branch
- - echo "Last commit:" && git log -1
- - echo "GIT Describe:" && git describe
- - echo "packaging/version:" && cat packaging/version
- - echo "Creating LXC environment for the build" && sudo -E .travis/package_management/create_lxc_for_build.sh
- - echo "Building package in container" && sudo -E .travis/package_management/build_package_in_container.sh
- - sudo chown -R root:travis "/var/lib/lxc"
- - sudo chmod -R 750 "/var/lib/lxc"
- - echo "Preparing DEB packaging contents for upload" && sudo -E .travis/package_management/prepare_packages.sh
- git:
- depth: false
- after_failure: post_message "TRAVIS_MESSAGE" "Failed to build DEB for ${BUILD_STRING}.${BUILD_ARCH}"
- before_deploy:
- - .travis/package_management/yank_stale_rpm.sh "${PACKAGES_DIRECTORY}" "${BUILD_STRING}" || echo "No stale DEB found"
- deploy:
- - provider: packagecloud
- repository: "${DEPLOY_REPO}"
- username: "${PACKAGING_USER}"
- token: "${PKG_CLOUD_TOKEN}"
- dist: "${BUILD_STRING}"
- local_dir: "${PACKAGES_DIRECTORY}"
- skip_cleanup: true
- on:
- # Only deploy on ${USER}/netdata, master branch, when build-area directory is created
- repo: ${TRAVIS_REPO_SLUG}
- branch: "master"
- condition: -d "${PACKAGES_DIRECTORY}"
- after_deploy:
- - if [ -n "${BUILDER_NAME}" ]; then rm -rf /home/${BUILDER_NAME}/* && echo "Cleared /home/${BUILDER_NAME} directory" || echo "Failed to clean /home/${BUILDER_NAME} directory"; fi;
- - if [ -d "${PACKAGES_DIRECTORY}" ]; then rm -rf "${PACKAGES_DIRECTORY}"; fi;
-
-
-
-# Define stage implementation details
-#
-jobs:
- include:
- # Do code quality, syntax checking and other pre-build activities
- - stage: Code quality, linting, syntax, code style
-
- name: Run shellchecking on BASH
- script: shellcheck --format=gcc $(find . -name '*.sh.in' -not -iwholename '*.git*')
-
- # This falls under same stage defined earlier
- - name: Run checksum checks on kickstart files
- script: ./tests/installer/checksums.sh
- env: LOCAL_ONLY="true"
-
- # This falls under same stage defined earlier
- - name: Web Dashboard pre-generated file consistency checks (dashboard.js)
- script: cp web/gui/dashboard.js /tmp/dashboard.js && ./build/build.sh && diff /tmp/dashboard.js web/gui/dashboard.js
-
-
-
- # Ensure netdata code builds successfully
- - stage: Build process
-
- name: Standard netdata build
- script: fakeroot ./netdata-installer.sh --install $HOME --dont-wait --dont-start-it --enable-plugin-nfacct --enable-plugin-freeipmi --disable-lto
- env: CFLAGS='-O1 -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1'
- after_failure: post_message "TRAVIS_MESSAGE" "<!here> standard netdata build is failing (Still dont know which one, will improve soon)"
-
- - name: Docker container build process (alpine installation)
- script: packaging/docker/build.sh
- env: DEVEL="true"
- after_failure: post_message "TRAVIS_MESSAGE" "Docker build process failed"
-
- - name: Run 'make dist' validation
- before_script: mkdir /tmp/netdata-makedist-test
- script:
- - echo "GIT Branch:" && git branch
- - echo "Last commit:" && git log -1
- - echo "GIT Describe:" && git describe
- - echo "packaging/version:" && cat packaging/version
- - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make clean || echo "Nothing to clean"
- - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make distclean || echo "Nothing to distclean"
- - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" /bin/bash -c "autoreconf -ivf && ./configure --prefix=/netdata_install/usr --sysconfdir=/netdata_install/etc --localstatedir=/netdata_install/var --with-zlib --with-math --with-user=netdata CFLAGS=-O2"
- - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make dist
- - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" ls -ltr ./netdata-$(git describe).tar.gz || ls -ltr ./netdata-$(cat packaging/version | tr -d '\n').tar.gz
- - .travis/run_install_with_dist_file.sh
- - docker run -it -v "${PWD}:/netdata:rw" -v "/tmp/netdata-makedist-test:/netdata_install:rw" -w /netdata "netdata/os-test:ubuntu1804" make distclean
- git:
- depth: false
- after_script: rm -rf /tmp/netdata-makedist-test
- after_failure: post_message "TRAVIS_MESSAGE" "'make dist' failed"
-
-
-
- - stage: Artifacts validation
-
- name: Unit Testing
- script:
- - fakeroot ./netdata-installer.sh --install $HOME --dont-wait --dont-start-it --enable-plugin-nfacct --enable-plugin-freeipmi --disable-lto
- - $HOME/netdata/usr/sbin/netdata -W unittest
- env: CFLAGS='-O1 -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1'
- after_failure: post_message "TRAVIS_MESSAGE" "Unit testing failed"
-
- - name: Build/install on ubuntu 14.04 (not containerized)
- script: fakeroot ./netdata-installer.sh --dont-wait --dont-start-it --install $HOME
- after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on ubuntu 14.04"
-
- - name: Build/Install for ubuntu 18.04 (not containerized)
- script: fakeroot ./netdata-installer.sh --dont-wait --dont-start-it --install $HOME
- after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on ubuntu 18.04"
-
- - name: Run netdata lifecycle, on ubuntu 18.04 (Containerized)
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:ubuntu1804" bats --tap tests/lifecycle.bats
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata lifecycle test script failed on ubuntu 18.04"
-
- - name: Run netdata lifecycle from stable to current, on CentOS 7 (Containerized)
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos7" tests/updater_checks.sh
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on CentOS 7"
-
- - name: Build/install for CentOS 6 (Containerized)
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos6" ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp
- after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on CentOS 6"
-
- - name: Build/install for CentOS 7 (Containerized)
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "netdata/os-test:centos7" ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp
- after_failure: post_message "TRAVIS_MESSAGE" "Build/Install failed on CentOS 7"
-
-
-
- - stage: "Artifacts validation on bare OS, stable to current lifecycle checks"
-
- # Ubuntu runs
- name: Run netdata lifecycle on Ubuntu 16.04 (xenial)
- script: sudo -E tests/updater_checks.sh
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Ubuntu 16.04"
-
- - name: Run netdata lifecycle, on Ubuntu 19.04 (Containerized)
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "ubuntu:19.04" tests/updater_checks.sh
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Ubuntu 19.04"
-
- # Centos runs
- - name: Run netdata lifecycle on CentOS 7 (Containerized)
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "centos:7" tests/updater_checks.sh
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare CentOS 7"
-
- # Debian runs
- - name: Run netdata lifecycle, on Debian 9 (Containerized)
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "debian:stretch" tests/updater_checks.sh
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Debian 9 (stretch)"
-
- # openSuSE runs
- - name: Run netdata lifecycle, on openSuSE 15.0
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/leap:15.0" tests/updater_checks.sh
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/leap:15.0"
-
- - name: Run netdata lifecycle, on openSuSE 15.1
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/leap:15.1" tests/updater_checks.sh
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/leap:15.1"
-
- - name: Run netdata lifecycle, on openSuSE Tumbleweed
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "opensuse/tumbleweed:latest" tests/updater_checks.sh
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare opensuse/tumbleweed:latest"
-
- # Alpine runs
- - name: Run netdata lifecycle, on Alpine linux
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "alpine" tests/updater_checks.sh
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Alpine"
-
- # Arch linux runs
- - name: Run netdata lifecycle, on ArchLinux
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "archlinux/base:latest" tests/updater_checks.sh
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare archlinux/base:latest"
-
- # Fedora runs
- - name: Run netdata lifecycle, on Fedora 28
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:28" tests/updater_checks.sh
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 28"
-
- - name: Run netdata lifecycle, on Fedora 29
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:29" tests/updater_checks.sh
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 29"
-
- - name: Run netdata lifecycle, on Fedora 30 (Containerized)
- script: docker run -it -v "${PWD}:/netdata:rw" -w /netdata "fedora:30" tests/updater_checks.sh
- after_failure: post_message "TRAVIS_MESSAGE" "Netdata updater process failed on bare Fedora 30"
-
-
-
- - stage: Packaging for release
-
- name: Generate changelog and TAG the release (only on special commit msg)
- before_script: post_message "TRAVIS_MESSAGE" "Packaging step for release initiated" "${NOTIF_CHANNEL}"
- script:
- - echo "GIT Branch:" && git branch
- - echo "Last commit:" && git log -1
- - echo "GIT Describe:" && git describe
- - echo "packaging/version:" && cat packaging/version
- - .travis/generate_changelog_and_tag_release.sh
- after_failure: post_message "TRAVIS_MESSAGE" "<!here> Packaging for release failed"
- git:
- depth: false
-
- - name: Run labeler on github issues
- script: .travis/labeler.sh # labeler should be replaced with GitHub Actions when they hit GA
-
-
-
- # ###### Packaging workflow section ######
- # References:
- # https://us.images.linuxcontainers.org
- # https://packagecloud.io/docs#install_repo
-
- # Ubuntu distros build
- #
- - stage:
- <<: *_DEB_TEMPLATE
- stage: "Package ubuntu/disco"
- env:
- - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="disco" BUILD_STRING="ubuntu/disco"
- - PACKAGE_TYPE="deb" REPO_TOOL="apt-get"
- - ALLOW_SOFT_FAILURE_HERE=true
-
-
-
- - stage:
- <<: *_DEB_TEMPLATE
- stage: "Package ubuntu/cosmic"
- env:
- - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="cosmic" BUILD_STRING="ubuntu/cosmic"
- - PACKAGE_TYPE="deb" REPO_TOOL="apt-get"
- - ALLOW_SOFT_FAILURE_HERE=true
-
-
-
- - stage:
- <<: *_DEB_TEMPLATE
- stage: "Package ubuntu/bionic"
- env:
- - BUILDER_NAME="builder" BUILD_DISTRO="ubuntu" BUILD_RELEASE="bionic" BUILD_STRING="ubuntu/bionic"
- - PACKAGE_TYPE="deb" REPO_TOOL="apt-get"
- - ALLOW_SOFT_FAILURE_HERE=true
-
-
-
- # Debian distros build
- - stage:
- <<: *_DEB_TEMPLATE
- stage: "Package debian/buster"
- env:
- - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="buster" BUILD_STRING="debian/buster"
- - PACKAGE_TYPE="deb" REPO_TOOL="apt-get"
- - ALLOW_SOFT_FAILURE_HERE=true
-
-
-
- - stage:
- <<: *_DEB_TEMPLATE
- stage: "Package debian/stretch"
- env:
- - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="stretch" BUILD_STRING="debian/stretch"
- - PACKAGE_TYPE="deb" REPO_TOOL="apt-get"
- - ALLOW_SOFT_FAILURE_HERE=true
-
-
-
- - stage:
- <<: *_DEB_TEMPLATE
- stage: "Package debian/jessie"
- env:
- - BUILDER_NAME="builder" BUILD_DISTRO="debian" BUILD_RELEASE="jessie" BUILD_STRING="debian/jessie"
- - PACKAGE_TYPE="deb" REPO_TOOL="apt-get"
- - ALLOW_SOFT_FAILURE_HERE=true
-
-
-
- # Enterprise linux builds (Centos, Redhat, Amazon linux (el/6))
- #
- - stage:
- <<: *_RPM_TEMPLATE
- stage: "Package Enterprise Linux 7"
- env:
- - BUILDER_NAME="builder" BUILD_DISTRO="centos" BUILD_RELEASE="7" BUILD_STRING="el/7"
- - PACKAGE_TYPE="rpm" REPO_TOOL="yum"
- - ALLOW_SOFT_FAILURE_HERE=true
-
-
-
- - stage:
- <<: *_RPM_TEMPLATE
- stage: "Package Enterprise linux 6"
- env:
- - BUILDER_NAME="builder" BUILD_DISTRO="centos" BUILD_RELEASE="6" BUILD_STRING="el/6"
- - PACKAGE_TYPE="rpm" REPO_TOOL="yum"
- - ALLOW_SOFT_FAILURE_HERE=true
-
-
-
- # Fedora distros build
- #
- - stage:
- <<: *_RPM_TEMPLATE
- stage: "Package Fedora 30"
- env:
- - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="30" BUILD_STRING="fedora/30"
- - PACKAGE_TYPE="rpm" REPO_TOOL="dnf"
- - ALLOW_SOFT_FAILURE_HERE=true
-
-
-
- - stage:
- <<: *_RPM_TEMPLATE
- stage: "Package Fedora 29"
- env:
- - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="29" BUILD_STRING="fedora/29"
- - PACKAGE_TYPE="rpm" REPO_TOOL="dnf"
- - ALLOW_SOFT_FAILURE_HERE=true
-
-
-
- - stage:
- <<: *_RPM_TEMPLATE
- stage: "Package Fedora 28"
- env:
- - BUILDER_NAME="builder" BUILD_DISTRO="fedora" BUILD_RELEASE="28" BUILD_STRING="fedora/28"
- - PACKAGE_TYPE="rpm" REPO_TOOL="dnf"
- - ALLOW_SOFT_FAILURE_HERE=true
-
-
-
- # Opensuse distros build
- #
- - stage:
- <<: *_RPM_TEMPLATE
- stage: "Package OpenSuSE 15.1"
- env:
- - BUILDER_NAME="builder" BUILD_DISTRO="opensuse" BUILD_RELEASE="15.0" BUILD_STRING="opensuse/15.1"
- - PACKAGE_TYPE="rpm" REPO_TOOL="zypper"
- - ALLOW_SOFT_FAILURE_HERE=true
-
-
-
- - stage:
- <<: *_RPM_TEMPLATE
- stage: "Package OpenSuSE 15.0"
- env:
- - BUILDER_NAME="builder" BUILD_DISTRO="opensuse" BUILD_RELEASE="15.0" BUILD_STRING="opensuse/15.0"
- - PACKAGE_TYPE="rpm" REPO_TOOL="zypper"
- - ALLOW_SOFT_FAILURE_HERE=true
- # ###### End of packaging workflow section ###### #
- # ############################################### #
-
-
-
- # We only publish if a TAG has been set during packaging
- - stage: Publish for release
-
- name: Build & Publish docker images
- before_script: post_message "TRAVIS_MESSAGE" "Publishing docker images" "${NOTIF_CHANNEL}"
- script:
- - echo "GIT Branch:" && git branch
- - echo "Last commit:" && git log -1
- - echo "GIT Describe:" && git describe
- - echo "packaging/version:" && cat packaging/version
- - packaging/docker/check_login.sh
- - echo "Switching to latest master branch, to pick up tagging if any" && git checkout master && git pull
- - packaging/docker/build.sh
- - packaging/docker/publish.sh
- after_failure: post_message "TRAVIS_MESSAGE" "<!here> Docker image publishing failed"
- git:
- depth: false
- env: ALLOW_SOFT_FAILURE_HERE=true
- # We don't run on release candidates
- if: tag !~ /(-rc)/
-
- - name: Create release draft
- before_script: post_message "TRAVIS_MESSAGE" "Drafting release on github" "${NOTIF_CHANNEL}"
- script:
- - echo "GIT Branch:" && git branch
- - echo "Last commit:" && git log -1
- - echo "GIT Describe:" && git describe
- - echo "packaging/version:" && cat packaging/version
- - echo "Generating release artifacts" && .travis/create_artifacts.sh # Could/should be a common storage to put this and share between jobs
- - .travis/draft_release.sh
- git:
- depth: false
- after_failure: post_message "TRAVIS_MESSAGE" "<!here> Draft release submission failed"
- # We don't run on release candidates
- if: tag !~ /(-rc)/
-
-
-
- # This is the nightly pre-execution step (Jobs, preparatory steps for nightly, etc)
- - stage: Nightly operations
-
- name: Run coverity scan
- # Just notify people that Nightly ops triggered, use the first step as a hook to do that
- before_script: post_message "TRAVIS_MESSAGE" "Starting nightly operations" "${NOTIF_CHANNEL}"
- script: ./coverity-install.sh && ./coverity-scan.sh || echo "Coverity failed :("
-
- - name: Kickstart files integrity testing (extended)
- script: ./tests/installer/checksums.sh
-
- - name: Run labeler on github issues
- script: .travis/labeler.sh # labeler should be replaced with GitHub Actions when they hit GA
-
- # This is generating the changelog for nightly release and publish it
- - name: Generate nightly changelog
- before_script: post_message "TRAVIS_MESSAGE" "Starting changelog generation for nightlies" "${NOTIF_CHANNEL}"
- script:
- - ".travis/nightlies.sh"
- - ".travis/check_changelog_last_modification.sh"
- after_failure: post_message "TRAVIS_MESSAGE" "<!here> Nightly changelog generation failed"
- git:
- depth: false
-
-
-
- # This is the nightly execution step
- #
- - stage: Nightly release
-
- name: Build & Publish docker images
- before_script: post_message "TRAVIS_MESSAGE" "Publishing docker images for nightlies" "${NOTIF_CHANNEL}"
- script:
- - echo "GIT Branch:" && git branch
- - echo "Last commit:" && git log -1
- - echo "GIT Describe:" && git describe
- - echo "packaging/version:" && cat packaging/version
- - docker info
- - packaging/docker/check_login.sh
- - packaging/docker/build.sh
- - packaging/docker/publish.sh
- after_failure: post_message "TRAVIS_MESSAGE" "<!here> Nightly docker image publish failed"
- git:
- depth: false
- env: ALLOW_SOFT_FAILURE_HERE=true
-
- - name: Create nightly release artifacts, publish to GCS
- before_script: post_message "TRAVIS_MESSAGE" "Starting artifacts generation for nightlies" "${NOTIF_CHANNEL}"
- script:
- - echo "GIT Branch:" && git branch
- - echo "Last commit:" && git log -1
- - echo "GIT Describe:" && git describe
- - echo "packaging/version:" && cat packaging/version
- - .travis/create_artifacts.sh
- after_failure: post_message "TRAVIS_MESSAGE" "<!here> Nightly artifacts generation failed"
- git:
- depth: false
- before_deploy:
- echo "Preparing creds under ${TRAVIS_REPO_SLUG}";
- if [ "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
- openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d;
- else
- echo "Beta deployment stage in progress";
- openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d;
- fi;
- deploy:
- # Beta storage, used for testing purposes
- - provider: gcs
- edge:
- branch: gcs-ng
- project_id: netdata-storage
- credentials: .travis/gcs-credentials.json
- bucket: "netdata-dev-nightlies"
- skip_cleanup: true
- local_dir: "artifacts"
- on:
- # Only deploy on netdata/netdata, master branch, when artifacts directory is created
- repo: ${TRAVIS_REPO_SLUG}
- branch: master
- condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} != "netdata/netdata"
-
- # Production storage
- - provider: gcs
- edge:
- branch: gcs-ng
- project_id: netdata-storage
- credentials: .travis/gcs-credentials.json
- bucket: "netdata-nightlies"
- skip_cleanup: true
- local_dir: "artifacts"
- on:
- # Only deploy on netdata/netdata, master branch, when artifacts directory is created
- repo: netdata/netdata
- branch: master
- condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} = "netdata/netdata"
- after_deploy: rm -f .travis/gcs-credentials.json
diff --git a/.travis/README.md b/.travis/README.md
deleted file mode 100644
index 3b314fa1..00000000
--- a/.travis/README.md
+++ /dev/null
@@ -1,143 +0,0 @@
-# Description of CI build configuration
-
-## Variables needed by travis
-
-- GITHUB_TOKEN - GitHub token with push access to repository
-- DOCKER_USERNAME - Username (netdatabot) with write access to docker hub repository
-- DOCKER_PWD - Password to docker hub
-- encrypted_8daf19481253_key - key needed by openssl to decrypt GCS credentials file
-- encrypted_8daf19481253_iv - IV needed by openssl to decrypt GCS credentials file
-- COVERITY_SCAN_TOKEN - Token to allow coverity test analysis uploads
-- SLACK_USERNAME - This is required for the slack notifications triggered by travis pipeline
-- SLACK_CHANNEL - This is the channel that Travis will be posting messages
-- SLACK_NOTIFY_WEBHOOK_URL - This is the incoming URL webhook as provided by slack integration. Visit Apps integration in slack to generate the required hook
-- SLACK_BOT_NAME - This is the name your bot will appear with on slack
-
-## CI workflow details
-Our CI pipeline is designed to help us identify and mitigate risks at all stages of implementation.
-To accommodate this need, we used [Travis CI](http://www.travis-ci.com) as our CI/CD tool.
-Our main areas of concern are:
-1) Only push code that is working. That means fail fast so that we can improve before we reach the public
-
-2) Reduce the time to market to minimum, by streamlining the release process.
- That means a lot of testing, a lot of consistency checks, a lot of validations
-
-3) Generated artifacts consistency. We should not allow broken software to reach the public.
- When this happens, it's embarassing and we struggle to eliminate it.
-
-4) We are an innovative company, so we love to automate :)
-
-
-Having said that, here's a brief introduction to Netdata's improved CI/CD pipeline with Travis.
-Our CI/CD lifecycle contains three different execution entry points:
-1) A user opens a pull request to netdata/master: Travis will run a pipeline on the branch under that PR
-2) A merge or commit happens on netdata/master. This will trigger travis to run, but we have two distinct cases in this scenario:
- a) A user merges a pull request to netdata/master: Travis will run on master, after the merge.
- b) A user runs a commit/merge with a special keyword (mentioned later).
- This triggers a release for either minor, major or release candidate versions, depending the keyword
-3) A scheduled job runs on master once per day: Travis will run on master at the scheduled interval
-
-To accommodate all three entry points our CI/CD workflow has a set of steps that run on all three entry points.
-Once all these steps are successfull, then our pipeline executes another subset of steps for entry points 2 and 3.
-In travis terms the "steps" are "Stages" and within each stage we execute a set of activities called "jobs" in travis.
-
-### Always run: Stages that running on all three execution entry points
-
-## Code quality, linting, syntax, code style
-At this early stage we iterate through a set of basic quality control checks:
-- Shell checking: Run linters for our various BASH scripts
-- Checksum validators: Run validators to ensure our installers and documentation are in sync
-- Dashboard validator: We provide a pre-generated dashboard.js script file that we need to make sure its up to date. We validate that.
-
-## Build process
-At this stage, basically, we build :-)
-We do a baseline check of our build artifacts to guarantee they are not broken
-Briefly our activities include:
-- Verify docker builds successfully
-- Run the standard netdata installer, to make sure we build & run properly
-- Do the same through 'make dist', as this is our stable channel for our kickstart files
-
-## Artifacts validation
-At this point we know our software is building, we need to go through the a set of checks, to guarantee
-that our product meets certain epxectations. At the current stage, we are focusing on basic capabilities
-like installing in different distributions, running the full lifecycle of install-run-update-install and so on.
-We are still working on enriching this with more and more use cases, to get us closer to achieving full stability of our software.
-Briefly we currently evaluate the following activities:
-- Basic software unit testing
-- Non containerized build and install on ubuntu 14.04
-- Non containerized build and install on ubuntu 18.04
-- Running the full netdata lifecycle (install, update, uninstall) on ubuntu 18.04
-- Build and install on CentOS 6
-- Build and install on CentOS 7
-(More to come)
-
-### Nightly operations: Stages that run daily under cronjob
-The nightly stages are related to the daily nightly activities, that produce our daily latest releases.
-We also maintain a couple of cronjobs that run during the night to provide us with deeper insights,
-like for example coverity scanning or extended kickstart checksum checks
-
-## Nightly operations
-At this stage we run scheduled jobs and execute the nightly changelog generator, coverity scans,
-labeler for our issues and extended kickstart files checksum validations.
-
-## Nightly release
-During this stage we are building and publishing latest docker images, prepare the nightly artifacts
-and deploy them (the artifacts) to our google cloud service provider.
-
-
-### Publishing
-Publishing is responsible for executing the major/minor/patch releases and is separated
-in two stages: packaging preparation process and publishing.
-
-## Packaging for release
-During packaging we are preparing the release changelog information and run the labeler.
-
-## Publish for release
-The publishing stage is the most complex part in publishing. This is the stage were we generate and publish docker images,
-prepare the release artifacts and get ready with the release draft.
-
-### Package Management workflows
-As part of our goal to provide the best support to our customers, we have created a set of CI workflows to automatically produce
-DEB and RPM for multiple distributions. These workflows are implemented under the templated stages '_DEB_TEMPLATE' and '_RPM_TEMPLATE'.
-We currently plan to actively support the following Operating Systems, with a plan to further expand this list following our users needs.
-
-### Operating systems supported
-The following distributions are supported
-- Debian versions
- - Buster (TBD - not released yet, check [debian releases](https://www.debian.org/releases/) for details)
- - Stretch
- - Jessie
- - Wheezy
-
-- Ubuntu versions
- - Disco
- - Cosmic
- - Bionic
- - artful
-
-- Enterprise Linux versions (Covers Redhat, CentOS, and Amazon Linux with version 6)
- - Version 8 (TBD)
- - Version 7
- - Version 6
-
-- Fedora versions
- - Version 31 (TBD)
- - Version 30
- - Version 29
- - Version 28
-
-- OpenSuSE versions
- - 15.1
- - 15.0
-
-- Gentoo distributions
- - TBD
-
-### Architectures supported
-We plan to support amd64, x86 and arm64 architectures. As of June 2019 only amd64 and x86 will become available, as we are still working on solving issues with the architecture.
-
-The Package deployment can be triggered manually by executing an empty commit with the following message pattern: `[Package PACKAGE_TYPE PACKAGE_ARCH] DESCRIBE_THE_REASONING_HERE`.
-Travis Yaml configuration allows the user to combine package type and architecture as necessary to regenerate the current stable release (For example tag v1.15.0 as of 4th of May 2019)
-Sample patterns to trigger building of packages for all amd64 supported architecture:
-- '[Package amd64 RPM]': Build & publish all amd64 available RPM packages
-- '[Package amd64 DEB]': Build & publish all amd64 available DEB packages
diff --git a/.travis/check_changelog_last_modification.sh b/.travis/check_changelog_last_modification.sh
deleted file mode 100755
index 2665c062..00000000
--- a/.travis/check_changelog_last_modification.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-LAST_MODIFICATION="$(git log -1 --pretty="format:%at" CHANGELOG.md)"
-CURRENT_TIME="$(date +"%s")"
-TWO_DAYS_IN_SECONDS=172800
-
-DIFF=$((CURRENT_TIME - LAST_MODIFICATION))
-
-echo "Checking CHANGELOG.md last modification time on GIT.."
-echo "CHANGELOG.md timestamp: ${LAST_MODIFICATION}"
-echo "Current timestamp: ${CURRENT_TIME}"
-echo "Diff: ${DIFF}"
-
-if [ ${DIFF} -gt ${TWO_DAYS_IN_SECONDS} ]; then
- echo "CHANGELOG.md is more than two days old!"
- post_message "TRAVIS_MESSAGE" "Hi <!here>, CHANGELOG.md was found more than two days old (Diff: ${DIFF} seconds)" "${NOTIF_CHANNEL}"
-else
- echo "CHANGELOG.md is less than two days old, fine"
-fi
diff --git a/.travis/create_artifacts.sh b/.travis/create_artifacts.sh
deleted file mode 100755
index 9670f229..00000000
--- a/.travis/create_artifacts.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env bash
-#
-# Artifacts creation script.
-# This script generates two things:
-# 1) The static binary that can run on all linux distros (built-in dependencies etc)
-# 2) The distribution source tarbal
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author: Paul Emm. Katsoulakis <paul@netdata.cloud>
-#
-# shellcheck disable=SC2230
-
-set -e
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup || echo "")
-if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository"
- exit 1
-fi
-
-if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
- echo "Beta mode on ${TRAVIS_REPO_SLUG}, not running anything here"
- exit 0
-fi;
-
-
-echo "--- Initialize git configuration ---"
-git checkout "${1-master}"
-git pull
-
-# Everything from this directory will be uploaded to GCS
-mkdir -p artifacts
-BASENAME="netdata-$(git describe)"
-
-# Make sure stdout is in blocking mode. If we don't, then conda create will barf during downloads.
-# See https://github.com/travis-ci/travis-ci/issues/4704#issuecomment-348435959 for details.
-python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);'
-echo "--- Create tarball ---"
-autoreconf -ivf
-./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --with-zlib --with-math --with-user=netdata CFLAGS=-O2
-make dist
-mv "${BASENAME}.tar.gz" artifacts/
-
-echo "--- Create self-extractor ---"
-./packaging/makeself/build-x86_64-static.sh
-
-# Needed for GCS
-echo "--- Copy artifacts to separate directory ---"
-#shellcheck disable=SC2164
-cp packaging/version artifacts/latest-version.txt
-cd artifacts
-ln -s "${BASENAME}.tar.gz" netdata-latest.tar.gz
-ln -s "${BASENAME}.gz.run" netdata-latest.gz.run
-sha256sum -b ./* >"sha256sums.txt"
-echo "checksums:"
-cat sha256sums.txt
diff --git a/.travis/draft_release.sh b/.travis/draft_release.sh
deleted file mode 100755
index ddc0f9ad..00000000
--- a/.travis/draft_release.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/bin/bash
-#
-# Draft release generator.
-# This utility is responsible for submitting a draft release to github repo
-# It is agnostic of other processes, when executed it will draft a release,
-# based on the most recent reachable tag.
-#
-# Requirements:
-# - GITHUB_TOKEN variable set with GitHub token. Access level: repo.public_repo
-# - artifacts directory in place
-# - The directory is created by create_artifacts.sh mechanism
-# - The artifacts need to be created with the same tag, obviously
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud>
-
-set -e
-
-if [ ! -f .gitignore ]; then
- echo "Run as ./travis/$(basename "$0") from top level directory of git repository"
- exit 1
-fi
-
-echo "--- Initialize git configuration ---"
-git checkout master
-git pull
-
-
-if [[ $(git describe) =~ -rc* ]]; then
- echo "This is a release candidate tag, we do not generate a release draft"
- exit 0
-fi
-
-# Load the tag, if any
-GIT_TAG=$(git describe)
-
-if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
- echo "Beta mode on ${TRAVIS_REPO_SLUG}, i was about to run for release (${GIT_TAG}), but i am emulating, so bye"
- exit 0
-fi;
-
-echo "---- CREATING RELEASE DRAFT WITH ASSETS -----"
-# Download hub
-HUB_VERSION=${HUB_VERSION:-"2.5.1"}
-wget "https://github.com/github/hub/releases/download/v${HUB_VERSION}/hub-linux-amd64-${HUB_VERSION}.tgz" -O "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz"
-tar -C /tmp -xvf "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz"
-export PATH=$PATH:"/tmp/hub-linux-amd64-${HUB_VERSION}/bin"
-
-# Create a release draft
-if [ -z ${GIT_TAG+x} ]; then
- echo "Variable GIT_TAG is not set. Something went terribly wrong! Exiting."
- exit 1
-fi
-if [ "${GIT_TAG}" != "$(git tag --points-at)" ]; then
- echo "ERROR! Current commit is not tagged. Stopping release creation."
- exit 1
-fi
-until hub release create --draft \
- -a "artifacts/netdata-${GIT_TAG}.tar.gz" \
- -a "artifacts/netdata-${GIT_TAG}.gz.run" \
- -a "artifacts/sha256sums.txt" \
- -m "${GIT_TAG}" "${GIT_TAG}"; do
- sleep 5
-done
diff --git a/.travis/gcs-credentials.json.enc b/.travis/gcs-credentials.json.enc
deleted file mode 100644
index 5d1e7b2d..00000000
--- a/.travis/gcs-credentials.json.enc
+++ /dev/null
Binary files differ
diff --git a/.travis/generate_changelog_and_tag_release.sh b/.travis/generate_changelog_and_tag_release.sh
deleted file mode 100755
index fb155b26..00000000
--- a/.travis/generate_changelog_and_tag_release.sh
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/bin/bash
-#
-# Script to automatically do a couple of things:
-# - generate a new tag according to semver (https://semver.org/)
-# - generate CHANGELOG.md by using https://github.com/skywinder/github-changelog-generator
-#
-# Tags are generated by searching for a keyword in last commit message. Keywords are:
-# - [patch] or [fix] to bump patch number
-# - [minor], [feature] or [feat] to bump minor number
-# - [major] or [breaking change] to bump major number
-# All keywords MUST be surrounded with square braces.
-#
-# Script uses git mechanisms for locking, so it can be used in parallel builds
-#
-# Requirements:
-# - GITHUB_TOKEN variable set with GitHub token. Access level: repo.public_repo
-# - docker
-#
-# This is a modified version of:
-# https://github.com/paulfantom/travis-helper/blob/master/releasing/releaser.sh
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud>
-# Author: Pawel Krupa (@paulfantom)
-set -e
-
-if [ ! -f .gitignore ]; then
- echo "Run as ./travis/$(basename "$0") from top level directory of git repository"
- exit 1
-fi
-
-echo "--- Executing Tagging facility to determine TAG ---"
-source .travis/tagger.sh
-
-echo "--- Changelog generator and tagger script starting ---"
-# If tagger script hasn't produced a TAG, there is nothing to do so bail out happy
-if [ -z "${GIT_TAG}" ]; then
- echo "GIT_TAG is empty, nothing to do for now (Value: $GIT_TAG)"
- exit 0
-fi
-
-if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
- echo "Beta mode on ${TRAVIS_REPO_SLUG}, nothing to do on the changelog generator and tagging script for (${GIT_TAG}), bye"
- exit 0
-fi
-
-echo "--- Initialize git configuration ---"
-export GIT_MAIL="bot@netdata.cloud"
-export GIT_USER="netdatabot"
-git checkout master
-git pull
-
-echo "---- UPDATE VERSION FILE ----"
-echo "$GIT_TAG" >packaging/version
-git add packaging/version
-
-echo "---- GENERATE CHANGELOG -----"
-./.travis/generate_changelog_for_release.sh
-git add CHANGELOG.md
-
-echo "---- COMMIT AND PUSH CHANGES ----"
-git commit -m "[ci skip] release $GIT_TAG" --author "${GIT_USER} <${GIT_MAIL}>"
-git tag "$GIT_TAG" -a -m "Automatic tag generation for travis build no. $TRAVIS_BUILD_NUMBER"
-git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')"
-git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')" --tags
-# After those operations output of command `git describe` should be identical with a value of GIT_TAG
diff --git a/.travis/generate_changelog_for_nightlies.sh b/.travis/generate_changelog_for_nightlies.sh
deleted file mode 100755
index b9086288..00000000
--- a/.travis/generate_changelog_for_nightlies.sh
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/bash
-#
-# Changelog generation scriptlet.
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pawel Krupa (paulfantom)
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-set -e
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup || echo "")
-if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository"
- echo "Changelog generation process aborted"
- exit 1
-fi
-
-LAST_TAG="$1"
-COMMITS_SINCE_RELEASE="$2"
-NEW_VERSION="${LAST_TAG}-$((COMMITS_SINCE_RELEASE + 1))-nightly"
-ORG=$(echo "$TRAVIS_REPO_SLUG" | cut -d '/' -f1)
-PROJECT=$(echo "$TRAVIS_REPO_SLUG" | cut -d '/' -f 2)
-GIT_MAIL=${GIT_MAIL:-"bot@netdata.cloud"}
-GIT_USER=${GIT_USER:-"netdatabot"}
-PUSH_URL=$(git config --get remote.origin.url | sed -e 's/^https:\/\///')
-FAIL=0
-if [ -z ${GIT_TAG+x} ]; then
- OPTS=""
-else
- OPTS="--future-release ${GIT_TAG}"
-fi
-echo "We got $COMMITS_SINCE_RELEASE changes since $LAST_TAG, re-generating changelog"
-
-if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
- echo "Beta mode on ${TRAVIS_REPO_SLUG}, nothing else to do here"
- exit 0
-fi
-
-git checkout master
-git pull
-
-echo "Running project markmandel for github changelog generation"
-#docker run -it --rm -v "$(pwd)":/usr/local/src/your-app ferrarimarco/github-changelog-generator:1.14.3 \
-docker run -it -v "$(pwd)":/project markmandel/github-changelog-generator:latest \
- --user "${ORG}" \
- --project "${PROJECT}" \
- --token "${GITHUB_TOKEN}" \
- --since-tag "v1.10.0" \
- --unreleased-label "**Next release**" \
- --no-issues \
- --exclude-labels "stale,duplicate,question,invalid,wontfix,discussion,no changelog" \
- --no-compare-link ${OPTS}
-
-echo "Changelog created! Adding packaging/version(${NEW_VERSION}) and CHANGELOG.md to the repository"
-echo "${NEW_VERSION}" > packaging/version
-git add packaging/version && echo "1) Added packaging/version to repository" || FAIL=1
-git add CHANGELOG.md && echo "2) Added changelog file to repository" || FAIL=1
-git commit -m '[ci skip] create nightly packages and update changelog' --author "${GIT_USER} <${GIT_MAIL}>" && echo "3) Committed changes to repository" || FAIL=1
-git push "https://${GITHUB_TOKEN}:@${PUSH_URL}" && echo "4) Pushed changes to remote ${PUSH_URL}" || FAIL=1
-
-# In case of a failure, wrap it up and bail out cleanly
-if [ $FAIL -eq 1 ]; then
- git clean -xfd
- echo "Changelog generation failed during github UPDATE!"
- exit 1
-fi
-
-echo "Changelog generation completed successfully!"
diff --git a/.travis/generate_changelog_for_release.sh b/.travis/generate_changelog_for_release.sh
deleted file mode 100755
index de5d3e70..00000000
--- a/.travis/generate_changelog_for_release.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-set -e
-
-if [ ! -f .gitignore ]; then
- echo "Run as ./travis/$(basename "$0") from top level directory of git repository"
- exit 1
-fi
-
-ORGANIZATION=$(echo "$TRAVIS_REPO_SLUG" | awk -F '/' '{print $1}')
-PROJECT=$(echo "$TRAVIS_REPO_SLUG" | awk -F '/' '{print $2}')
-GIT_MAIL=${GIT_MAIL:-"bot@netdata.cloud"}
-GIT_USER=${GIT_USER:-"netdatabot"}
-if [ -z ${GIT_TAG+x} ]; then
- OPTS=""
-else
- OPTS="--future-release ${GIT_TAG}"
-fi
-
-if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
- echo "Beta mode on ${TRAVIS_REPO_SLUG}, nothing else to do"
- exit 0
-fi
-
-echo "--- Creating changelog ---"
-git checkout master
-git pull
-#docker run -it --rm -v "$(pwd)":/usr/local/src/your-app ferrarimarco/github-changelog-generator:1.14.3 \
-docker run -it -v "$(pwd)":/project markmandel/github-changelog-generator:latest \
- --user "${ORGANIZATION}" \
- --project "${PROJECT}" \
- --token "${GITHUB_TOKEN}" \
- --since-tag "v1.10.0" \
- --unreleased-label "**Next release**" \
- --exclude-labels "stale,duplicate,question,invalid,wontfix,discussion,no changelog" \
- --no-compare-link ${OPTS}
diff --git a/.travis/labeler.sh b/.travis/labeler.sh
deleted file mode 100755
index 7863084d..00000000
--- a/.travis/labeler.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash
-
-# This is a simple script which should apply labels to unlabelled issues from last 3 days.
-# It will soon be deprecated by GitHub Actions so no futher development on it is planned.
-
-# Previously this was using POST to only add labels. But this method seems to be failing with larger number of requests
-new_labels() {
- ISSUE="$1"
- URL="https://api.github.com/repos/netdata/netdata/issues/$ISSUE/labels"
- # deduplicate array and add quotes
- SET=( $(for i in "${@:2}"; do [ "$i" != "" ] && echo "\"$i\""; done | sort -u) )
- # implode array to string
- LABELS="${SET[*]}"
- # add commas between quotes (replace spaces)
- LABELS="${LABELS//\" \"/\",\"}"
- # remove duplicate quotes in case parameters were already quoted
- LABELS="${LABELS//\"\"/\"}"
- echo "-------- Assigning labels to #${ISSUE}: ${LABELS} --------"
- curl -H "Authorization: token $GITHUB_TOKEN" -d "{\"labels\":[${LABELS}]}" -X PUT "${URL}" &>/dev/null
-}
-
-if [ "$GITHUB_TOKEN" == "" ]; then
- echo "GITHUB_TOKEN is needed"
- exit 1
-fi
-
-if ! [ -x "$(command -v hub)" ]; then
- echo "===== Download HUB ====="
- HUB_VERSION=${HUB_VERSION:-"2.5.1"}
- wget "https://github.com/github/hub/releases/download/v${HUB_VERSION}/hub-linux-amd64-${HUB_VERSION}.tgz" -O "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz"
- tar -C /tmp -xvf "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz" &>/dev/null
- export PATH=$PATH:"/tmp/hub-linux-amd64-${HUB_VERSION}/bin"
-fi
-
-echo "===== Looking up available labels ====="
-LABELS_FILE=/tmp/labels
-hub issue labels >$LABELS_FILE
-
-# Change all 'area' labels assigned to PR saving non-area labels.
-echo "===== Categorizing PRs ====="
-NEW_LABELS=/tmp/new_labels
-for PR in $(hub pr list -s all -f "%I%n" -L 10); do
- echo "----- Processing PR #$PR -----"
- echo "" >$NEW_LABELS
- NEW_SET=""
- DIFF_URL="https://github.com/netdata/netdata/pull/$PR.diff"
- for FILE in $(curl -L "${DIFF_URL}" 2>/dev/null | grep "diff --git a/" | cut -d' ' -f3 | sort | uniq); do
- LABEL=""
- case "${FILE}" in
- *".md") AREA="docs" ;;
- *"/collectors/python.d.plugin/"*) AREA="external/python" ;;
- *"/collectors/charts.d.plugin/"*) AREA="external" ;;
- *"/collectors/node.d.plugin/"*) AREA="external" ;;
- *"/.travis"*) AREA="ci" ;;
- *"/.github/*.md"*) AREA="docs" ;;
- *"/.github/"*) AREA="ci" ;;
- *"/build/"*) AREA="packaging" ;;
- *"/contrib/"*) AREA="packaging" ;;
- *"/diagrams/"*) AREA="docs" ;;
- *"/installer/"*) AREA="packaging" ;;
- *"/makeself/"*) AREA="packaging" ;;
- *"/system/"*) AREA="packaging" ;;
- *"/netdata-installer.sh"*) AREA="packaging" ;;
- *) AREA=$(echo "$FILE" | cut -d'/' -f2) ;;
- esac
- LABEL="area/$AREA"
- echo "Selecting $LABEL due to $FILE"
- if grep "$LABEL" "$LABELS_FILE"; then
- echo "$LABEL" >>$NEW_LABELS
- if [[ $LABEL =~ "external" ]]; then
- echo "area/collectors" >>$NEW_LABELS
- fi
- else
- echo "-------- Label '$LABEL' not available --------"
- fi
- done
- NEW_SET=$(sort $NEW_LABELS | uniq)
- if [ ! -z "$NEW_SET" ]; then
- PREV=$(curl -H "Authorization: token $GITHUB_TOKEN" "https://api.github.com/repos/netdata/netdata/issues/$PR/labels" 2>/dev/null | jq '.[].name' | grep -v "area")
- new_labels "$PR" ${NEW_SET} "${PREV[*]}"
- fi
-done
diff --git a/.travis/nightlies.sh b/.travis/nightlies.sh
deleted file mode 100755
index 00246104..00000000
--- a/.travis/nightlies.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#
-# This is the nightly changelog generation script
-# It is responsible for two major activities:
-# 1) Update packaging/version with the current nightly version
-# 2) Generate the changelog for the mentioned version
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pawel Krupa (paulfantom)
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-set -e
-
-FAIL=0
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup || echo "")
-if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository"
- echo "Changelog generation process aborted"
- exit 1
-fi
-
-LAST_TAG=$(git describe --abbrev=0 --tags)
-COMMITS_SINCE_RELEASE=$(git rev-list "${LAST_TAG}"..HEAD --count)
-PREVIOUS_NIGHTLY_COUNT="$(rev <packaging/version | cut -d- -f 2 | rev)"
-
-# If no commits since release, just stop
-if [ "${COMMITS_SINCE_RELEASE}" == "${PREVIOUS_NIGHTLY_COUNT}" ]; then
- echo "No changes since last nighthly release"
- exit 0
-fi
-
-if [ ! "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then
- echo "Beta mode -- nothing to do for ${TRAVIS_REPO_SLUG} on the nightlies script, bye"
- exit 0
-fi
-
-echo "--- Running Changelog generation ---"
-NIGHTLIES_CHANGELOG_FAILED=0
-.travis/generate_changelog_for_nightlies.sh "${LAST_TAG}" "${COMMITS_SINCE_RELEASE}" || NIGHTLIES_CHANGELOG_FAILED=1
-
-if [ ${NIGHTLIES_CHANGELOG_FAILED} -eq 1 ]; then
- echo "Changelog generation has failed, this is a soft error, process continues"
- post_message "TRAVIS_MESSAGE" "Changelog generation job for nightlies failed, possibly due to github issues" "${NOTIF_CHANNEL}"
-fi
-
-exit "${FAIL}"
diff --git a/.travis/package_management/build.sh b/.travis/package_management/build.sh
deleted file mode 100644
index beb522a3..00000000
--- a/.travis/package_management/build.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-
-UNPACKAGED_NETDATA_PATH="$1"
-LATEST_RELEASE_VERSION="$2"
-
-if [ -z "${LATEST_RELEASE_VERSION}" ]; then
- echo "Parameter 'LATEST_RELEASE_VERSION' not defined"
- exit 1
-fi
-
-if [ -z "${UNPACKAGED_NETDATA_PATH}" ]; then
- echo "Parameter 'UNPACKAGED_NETDATA_PATH' not defined"
- exit 1
-fi
-
-echo "Running changelog generation mechanism since ${LATEST_RELEASE_VERSION}"
-
-echo "Entering ${UNPACKAGED_NETDATA_PATH}"
-cd "${UNPACKAGED_NETDATA_PATH}"
-
-echo "Linking debian -> contrib/debian"
-ln -sf contrib/debian debian
-
-echo "Executing dpkg-buildpackage"
-if dpkg-buildpackage --version 2> /dev/null | grep -q "1.18"; then
- dpkg-buildpackage --post-clean --pre-clean --build=binary
-else
- dpkg-buildpackage -b
-fi
-
-echo "DEB build script completed!"
diff --git a/.travis/package_management/build_package_in_container.sh b/.travis/package_management/build_package_in_container.sh
deleted file mode 100755
index 95a68e7a..00000000
--- a/.travis/package_management/build_package_in_container.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/env bash
-#
-# Entry point for package build process
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-#shellcheck disable=SC1091
-set -e
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup)
-if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository"
- echo "Docker build process aborted"
- exit 1
-fi
-
-source .travis/package_management/functions.sh || (echo "Failed to load packaging library" && exit 1)
-
-# Check for presence of mandatory environment variables
-if [ -z "${BUILD_STRING}" ]; then
- echo "No Distribution was defined. Make sure BUILD_STRING is set on the environment before running this script"
- exit 1
-fi
-
-if [ -z "${BUILDER_NAME}" ]; then
- echo "No builder account and container name defined. Make sure BUILDER_NAME is set on the environment before running this script"
- exit 1
-fi
-
-if [ -z "${BUILD_DISTRO}" ]; then
- echo "No build distro information defined. Make sure BUILD_DISTRO is set on the environment before running this script"
- exit 1
-fi
-
-if [ -z "${BUILD_RELEASE}" ]; then
- echo "No build release information defined. Make sure BUILD_RELEASE is set on the environment before running this script"
- exit 1
-fi
-
-if [ -z "${PACKAGE_TYPE}" ]; then
- echo "No build release information defined. Make sure PACKAGE_TYPE is set on the environment before running this script"
- exit 1
-fi
-
-# Detect architecture and load extra variables needed
-detect_arch_from_commit
-
-case "${BUILD_ARCH}" in
-"all")
- echo "* * * Building all architectures, amd64 and i386 * * *"
- echo "Building for amd64.."
- export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-amd64"
- export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
- .travis/package_management/trigger_${PACKAGE_TYPE}_lxc_build.py "${CONTAINER_NAME}"
-
- echo "Building for arm64.."
- export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-arm64"
- export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
- .travis/package_management/trigger_${PACKAGE_TYPE}_lxc_build.py "${CONTAINER_NAME}"
-
- echo "Building for i386.."
- export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-i386"
- export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
- .travis/package_management/trigger_${PACKAGE_TYPE}_lxc_build.py "${CONTAINER_NAME}"
-
- ;;
-"amd64"|"arm64"|"i386")
- echo "Building for ${BUILD_ARCH}.."
- export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-${BUILD_ARCH}"
- export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
- .travis/package_management/trigger_${PACKAGE_TYPE}_lxc_build.py "${CONTAINER_NAME}"
- ;;
-*)
- echo "Unknown build architecture '${BUILD_ARCH}', nothing to do for build"
- exit 1
- ;;
-esac
-
-echo "Build process completed!"
diff --git a/.travis/package_management/common.py b/.travis/package_management/common.py
deleted file mode 100755
index 6e7a2602..00000000
--- a/.travis/package_management/common.py
+++ /dev/null
@@ -1,162 +0,0 @@
-#
-#
-# Python library with commonly used functions within the package management scope
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
-
-import lxc
-import subprocess
-import os
-
-def fetch_version(orig_build_version):
- tag = None
- friendly_version = ""
-
- # TODO: Checksum validations
- if str(orig_build_version).count(".latest") == 1:
- version_list=str(orig_build_version).replace('v', '').split('.')
- friendly_version='.'.join(version_list[0:2]) + "." + version_list[3]
- else:
- friendly_version = orig_build_version.replace('v', '')
- tag = friendly_version # Go to stable tag
- print("Version set to %s from %s" % (friendly_version, orig_build_version))
-
- return friendly_version, tag
-
-def replace_tag(tag_name, spec, new_tag_content):
- print("Fixing tag %s in %s" % (tag_name, spec))
-
- ifp = open(spec, "r")
- config = ifp.readlines()
- ifp.close()
-
- source_line = -1
- for line in config:
- if str(line).count(tag_name + ":") > 0:
- source_line = config.index(line)
- print("Found line: %s in item %d" % (line, source_line))
- break
-
- if source_line >= 0:
- print("Replacing line %s with %s in spec file" %(config[source_line], new_tag_content))
- config[source_line] = "%s: %s\n" % (tag_name, new_tag_content)
- config_str = ''.join(config)
- ofp = open(spec, 'w')
- ofp.write(config_str)
- ofp.close()
-
-def run_command(container, command):
- print("Running command: %s" % command)
- command_result = container.attach_wait(lxc.attach_run_command, command)
-
- if command_result != 0:
- raise Exception("Command failed with exit code %d" % command_result)
-
-def run_command_in_host(cmd):
- print("Issue command in host: %s" % str(cmd))
-
- proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- o, e = proc.communicate()
- print('Output: ' + o.decode('ascii'))
- print('Error: ' + e.decode('ascii'))
- print('code: ' + str(proc.returncode))
-
-def prepare_repo(container):
- if str(os.environ["REPO_TOOL"]).count("zypper") == 1:
- run_command(container, [os.environ["REPO_TOOL"], "clean", "-a"])
- run_command(container, [os.environ["REPO_TOOL"], "--no-gpg-checks", "update", "-y"])
-
- elif str(os.environ["REPO_TOOL"]).count("yum") == 1:
- run_command(container, [os.environ["REPO_TOOL"], "clean", "all"])
- run_command(container, [os.environ["REPO_TOOL"], "update", "-y"])
-
- if os.environ["BUILD_STRING"].count("el/7") == 1 and os.environ["BUILD_ARCH"].count("i386") == 1:
- print ("Skipping epel-release install for %s-%s" % (os.environ["BUILD_STRING"], os.environ["BUILD_ARCH"]))
- else:
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "epel-release"])
-
- elif str(os.environ["REPO_TOOL"]).count("apt-get") == 1:
- run_command(container, [os.environ["REPO_TOOL"], "update", "-y"])
- else:
- run_command(container, [os.environ["REPO_TOOL"], "update", "-y"])
-
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "sudo"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "wget"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "bash"])
-
-def install_common_dependendencies(container):
- if str(os.environ["REPO_TOOL"]).count("zypper") == 1:
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-glib-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c"])
-
- elif str(os.environ["REPO_TOOL"]).count("yum") == 1:
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"])
-
- elif str(os.environ["REPO_TOOL"]).count("apt-get") == 1:
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "g++"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libipmimonitoring-dev"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libjson-c-dev"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libcups2-dev"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libsnappy-dev"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libprotobuf-dev"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libprotoc-dev"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"])
- if os.environ["BUILD_STRING"].count("debian/jessie") == 1:
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy"])
- else:
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "gcc-c++"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "cups-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "freeipmi-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "json-c-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "snappy-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-c-devel"])
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "protobuf-compiler"])
-
- if os.environ["BUILD_STRING"].count("el/6") <= 0:
- run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "autogen"])
-
-def prepare_version_source(dest_archive, pkg_friendly_version, tag=None):
- print(".0 Preparing local implementation tarball for version %s" % pkg_friendly_version)
- tar_file = os.environ['LXC_CONTAINER_ROOT'] + dest_archive
-
- if tag is not None:
- print(".1 Checking out tag %s" % tag)
- run_command_in_host(['git', 'fetch', '--all'])
-
- # TODO: Keep in mind that tricky 'v' there, needs to be removed once we clear our versioning scheme
- run_command_in_host(['git', 'checkout', 'v%s' % pkg_friendly_version])
-
- print(".2 Tagging the code with version: %s" % pkg_friendly_version)
- run_command_in_host(['git', 'tag', '-a', pkg_friendly_version, '-m', 'Tagging while packaging on %s' % os.environ["CONTAINER_NAME"]])
-
- print(".3 Run autoreconf -ivf")
- run_command_in_host(['autoreconf', '-ivf'])
-
- print(".4 Run configure")
- run_command_in_host(['./configure', '--with-math', '--with-zlib', '--with-user=netdata'])
-
- print(".5 Run make dist")
- run_command_in_host(['make', 'dist'])
-
- print(".6 Copy generated tarbal to desired path")
- if os.path.exists('netdata-%s.tar.gz' % pkg_friendly_version):
- run_command_in_host(['sudo', 'cp', 'netdata-%s.tar.gz' % pkg_friendly_version, tar_file])
-
- print(".7 Fixing permissions on tarball")
- run_command_in_host(['sudo', 'chmod', '777', tar_file])
- else:
- print("I could not find (%s) on the disk, stopping the build. Kindly check the logs and try again" % 'netdata-%s.tar.gz' % pkg_friendly_version)
- sys.exit(1)
diff --git a/.travis/package_management/configure_deb_lxc_environment.py b/.travis/package_management/configure_deb_lxc_environment.py
deleted file mode 100755
index 12328dde..00000000
--- a/.travis/package_management/configure_deb_lxc_environment.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python3
-#
-# Prepare the build environment within the container
-# The script attaches to the running container and does the following:
-# 1) Create the container
-# 2) Start the container up
-# 3) Create the builder user
-# 4) Prepare the environment for DEB build
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
-
-import common
-import os
-import sys
-import lxc
-
-if len(sys.argv) != 2:
- print('You need to provide a container name to get things started')
- sys.exit(1)
-container_name=sys.argv[1]
-
-# Setup the container object
-print("Defining container %s" % container_name)
-container = lxc.Container(container_name)
-if not container.defined:
- raise Exception("Container %s not defined!" % container_name)
-
-# Start the container
-if not container.start():
- raise Exception("Failed to start the container")
-
-if not container.running or not container.state == "RUNNING":
- raise Exception('Container %s is not running, configuration process aborted ' % container_name)
-
-# Wait for connectivity
-print("Waiting for container connectivity to start configuration sequence")
-if not container.get_ips(timeout=30):
- raise Exception("Timeout while waiting for container")
-
-build_path = "/home/%s" % os.environ['BUILDER_NAME']
-
-# Run the required activities now
-# 1. Create the builder user
-print("1. Adding user %s" % os.environ['BUILDER_NAME'])
-common.run_command(container, ["useradd", "-m", os.environ['BUILDER_NAME']])
-
-# Fetch package dependencies for the build
-print("2. Preparing repo on LXC container")
-common.prepare_repo(container)
-
-print("2.1 Install .DEB build support packages")
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dpkg-dev"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libdistro-info-perl"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-make"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-systemd"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "dh-autoreconf"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "git-buildpackage"])
-
-print("2.2 Add more dependencies")
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libnetfilter-acct-dev"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libcups2-dev"])
-
-print ("3.1 Run install-required-packages scriptlet")
-common.run_command(container, ["wget", "-T", "15", "-O", "%s/.install-required-packages.sh" % build_path, "https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh"])
-common.run_command(container, ["bash", "%s/.install-required-packages.sh" % build_path, "netdata", "--dont-wait", "--non-interactive"])
-
-print("3.2 Installing package dependencies within LXC container")
-common.install_common_dependendencies(container)
-
-friendly_version=""
-dest_archive=""
-download_url=""
-tag = None
-friendly_version, tag = common.fetch_version(os.environ['BUILD_VERSION'])
-
-tar_file="%s/netdata-%s.tar.gz" % (os.path.dirname(dest_archive), friendly_version)
-
-print("5. I will be building version '%s' of netdata." % os.environ['BUILD_VERSION'])
-dest_archive="%s/netdata-%s.tar.gz" % (build_path, friendly_version)
-
-if str(os.environ["BUILD_STRING"]).count("debian/jessie") == 1:
- print("5.1 We are building for Jessie, adjusting control file")
- common.run_command_in_host(['sudo', 'rm', 'contrib/debian/control'])
- common.run_command_in_host(['sudo', 'cp', 'contrib/debian/control.jessie', 'contrib/debian/control'])
-
-common.prepare_version_source(dest_archive, friendly_version, tag=tag)
-
-print("6. Installing build.sh script to build path")
-common.run_command_in_host(['sudo', 'cp', '.travis/package_management/build.sh', "%s/%s/build.sh" % (os.environ['LXC_CONTAINER_ROOT'], build_path)])
-common.run_command_in_host(['sudo', 'chmod', '777', "%s/%s/build.sh" % (os.environ['LXC_CONTAINER_ROOT'], build_path)])
-common.run_command_in_host(['sudo', 'ln', '-sf', 'contrib/debian', 'debian'])
-
-print("Done!")
diff --git a/.travis/package_management/configure_rpm_lxc_environment.py b/.travis/package_management/configure_rpm_lxc_environment.py
deleted file mode 100755
index 79d34608..00000000
--- a/.travis/package_management/configure_rpm_lxc_environment.py
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/usr/bin/env python3
-#
-#
-# Prepare the build environment within the container
-# The script attaches to the running container and does the following:
-# 1) Create the container
-# 2) Start the container up
-# 3) Create the builder user
-# 4) Prepare the environment for RPM build
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
-
-import common
-import os
-import sys
-import lxc
-
-if len(sys.argv) != 2:
- print('You need to provide a container name to get things started')
- sys.exit(1)
-container_name=sys.argv[1]
-
-# Setup the container object
-print("Defining container %s" % container_name)
-container = lxc.Container(container_name)
-if not container.defined:
- raise Exception("Container %s not defined!" % container_name)
-
-# Start the container
-if not container.start():
- raise Exception("Failed to start the container")
-
-if not container.running or not container.state == "RUNNING":
- raise Exception('Container %s is not running, configuration process aborted ' % container_name)
-
-# Wait for connectivity
-print("Waiting for container connectivity to start configuration sequence")
-if not container.get_ips(timeout=30):
- raise Exception("Timeout while waiting for container")
-
-# Run the required activities now
-# Create the builder user
-print("1. Adding user %s" % os.environ['BUILDER_NAME'])
-common.run_command(container, ["useradd", "-m", os.environ['BUILDER_NAME']])
-
-# Fetch package dependencies for the build
-print("2.1 Preparing repo on LXC container")
-common.prepare_repo(container)
-
-common.run_command(container, ["wget", "-T", "15", "-O", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh"])
-common.run_command(container, ["bash", "/home/%s/.install-required-packages.sh" % (os.environ['BUILDER_NAME']), "netdata", "--dont-wait", "--non-interactive"])
-
-# Exceptional cases, not available everywhere
-#
-print("2.2 Running uncommon dependencies and preparing LXC environment")
-# Not on Centos-7
-if os.environ["BUILD_STRING"].count("el/7") <= 0:
- common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "libnetfilter_acct-devel"])
-
-# Not on Centos-6
-if os.environ["BUILD_STRING"].count("el/6") <= 0:
- common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "autoconf-archive"])
-
-print("2.3 Installing common dependencies")
-common.install_common_dependendencies(container)
-
-print("3. Setting up macros")
-common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "/bin/echo", "'%_topdir %(echo /home/" + os.environ['BUILDER_NAME'] + ")/rpmbuild' > /home/" + os.environ['BUILDER_NAME'] + "/.rpmmacros"])
-
-print("4. Create rpmbuild directory")
-common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/BUILD"])
-common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/RPMS"])
-common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/SOURCES"])
-common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/SPECS"])
-common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "mkdir", "-p", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild/SRPMS"])
-common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "ls", "-ltrR", "/home/" + os.environ['BUILDER_NAME'] + "/rpmbuild"])
-
-# Download the source
-rpm_friendly_version=""
-dest_archive=""
-download_url=""
-spec_file="/home/%s/rpmbuild/SPECS/netdata.spec" % os.environ['BUILDER_NAME']
-tag = None
-rpm_friendly_version, tag = common.fetch_version(os.environ['BUILD_VERSION'])
-tar_file="%s/netdata-%s.tar.gz" % (os.path.dirname(dest_archive), rpm_friendly_version)
-
-print("5. I will be building version '%s' of netdata." % os.environ['BUILD_VERSION'])
-dest_archive="/home/%s/rpmbuild/SOURCES/netdata-%s.tar.gz" % (os.environ['BUILDER_NAME'], rpm_friendly_version)
-
-common.prepare_version_source(dest_archive, rpm_friendly_version, tag=tag)
-
-# Extract the spec file in place
-print("6. Extract spec file from the source")
-common.run_command_in_host(['sudo', 'cp', 'netdata.spec', os.environ['LXC_CONTAINER_ROOT'] + spec_file])
-common.run_command_in_host(['sudo', 'chmod', '777', os.environ['LXC_CONTAINER_ROOT'] + spec_file])
-
-print("7. Temporary hack: Change Source0 to %s on spec file %s" % (dest_archive, spec_file))
-common.replace_tag("Source0", os.environ['LXC_CONTAINER_ROOT'] + spec_file, tar_file)
-
-print('Done!')
diff --git a/.travis/package_management/create_lxc_for_build.sh b/.travis/package_management/create_lxc_for_build.sh
deleted file mode 100755
index d733687a..00000000
--- a/.travis/package_management/create_lxc_for_build.sh
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/usr/bin/env bash
-#
-# This script generates an LXC container and starts it up
-# Once the script completes successfully, a container has become available for usage
-# The container image to be used and the container name to be set, are part of variables
-# that must be present for the script to work
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-# shellcheck disable=SC1091
-set -e
-
-source .travis/package_management/functions.sh || (echo "Failed to load packaging library" && exit 1)
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup)
-if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository"
- echo "LXC Container creation aborted"
- exit 1
-fi
-
-# Check for presence of mandatory environment variables
-if [ -z "${BUILD_STRING}" ]; then
- echo "No Distribution was defined. Make sure BUILD_STRING is set on the environment before running this script"
- exit 1
-fi
-
-if [ -z "${BUILDER_NAME}" ]; then
- echo "No builder account and container name defined. Make sure BUILDER_NAME is set on the environment before running this script"
- exit 1
-fi
-
-if [ -z "${BUILD_DISTRO}" ]; then
- echo "No build distro information defined. Make sure BUILD_DISTRO is set on the environment before running this script"
- exit 1
-fi
-
-if [ -z "${BUILD_RELEASE}" ]; then
- echo "No build release information defined. Make sure BUILD_RELEASE is set on the environment before running this script"
- exit 1
-fi
-
-if [ -z "${PACKAGE_TYPE}" ]; then
- echo "No build release information defined. Make sure PACKAGE_TYPE is set on the environment before running this script"
- exit 1
-fi
-
-# Detect architecture and load extra variables needed
-detect_arch_from_commit
-
-echo "Creating LXC container ${BUILDER_NAME}/${BUILD_STRING}/${BUILD_ARCH}...."
-
-case "${BUILD_ARCH}" in
-"all")
- # i386
- echo "Creating LXC Container for i386.."
- export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-i386"
- export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
- lxc-create -n "${CONTAINER_NAME}" -t "download" -- --dist "${BUILD_DISTRO}" --release "${BUILD_RELEASE}" --arch "i386" --no-validate
-
- echo "Container(s) ready. Configuring container(s).."
- .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}"
-
- # amd64
- echo "Creating LXC Container for amd64.."
- export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-amd64"
- export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
- lxc-create -n "${CONTAINER_NAME}" -t "download" -- --dist "${BUILD_DISTRO}" --release "${BUILD_RELEASE}" --arch "amd64" --no-validate
-
- echo "Container(s) ready. Configuring container(s).."
- .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}"
-
- # arm64
- echo "Creating LXC Container for arm64.."
- export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-arm64"
- export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
- lxc-create -n "${CONTAINER_NAME}" -t "download" -- --dist "${BUILD_DISTRO}" --release "${BUILD_RELEASE}" --arch "arm64" --no-validate
-
- echo "Container(s) ready. Configuring container(s).."
- .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}"
- ;;
-"i386"|"amd64"|"arm64")
- # amd64 or i386
- echo "Creating LXC Container for ${BUILD_ARCH}.."
- export CONTAINER_NAME="${BUILDER_NAME}-${BUILD_DISTRO}${BUILD_RELEASE}-${BUILD_ARCH}"
- export LXC_CONTAINER_ROOT="/var/lib/lxc/${CONTAINER_NAME}/rootfs"
- lxc-create -n "${CONTAINER_NAME}" -t "download" -- --dist "${BUILD_DISTRO}" --release "${BUILD_RELEASE}" --arch "${BUILD_ARCH}" --no-validate
-
- echo "Container(s) ready. Configuring container(s).."
- .travis/package_management/configure_${PACKAGE_TYPE}_lxc_environment.py "${CONTAINER_NAME}"
- ;;
-*)
- echo "Unknown BUILD_ARCH value '${BUILD_ARCH}' given, process failed"
- exit 1
- ;;
-esac
-
-echo "..LXC creation complete!"
diff --git a/.travis/package_management/functions.sh b/.travis/package_management/functions.sh
deleted file mode 100644
index 0c4425fa..00000000
--- a/.travis/package_management/functions.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-# no-shebang-needed-its-a-library
-#
-# Utility functions for packaging in travis CI
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-#shellcheck disable=SC2148
-set -e
-
-function detect_arch_from_commit {
- case "${TRAVIS_COMMIT_MESSAGE}" in
- "[Package amd64"*)
- export BUILD_ARCH="amd64"
- ;;
- "[Package i386"*)
- export BUILD_ARCH="i386"
- ;;
- "[Package ALL"*)
- export BUILD_ARCH="all"
- ;;
- "[Package arm64"*)
- export BUILD_ARCH="arm64"
- ;;
-
- *)
- echo "Unknown build architecture in '${TRAVIS_COMMIT_MESSAGE}'. No BUILD_ARCH can be provided"
- exit 1
- ;;
- esac
-
- echo "Detected build architecture ${BUILD_ARCH}"
-}
diff --git a/.travis/package_management/package_cloud_wrapper.sh b/.travis/package_management/package_cloud_wrapper.sh
deleted file mode 100755
index 48a372d3..00000000
--- a/.travis/package_management/package_cloud_wrapper.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env bash
-#
-# This is a tool to help removal of packages from packagecloud.io
-# It utilizes the package_cloud utility provided from packagecloud.io
-#
-# Depends on:
-# 1) package cloud gem (detects absence and installs it)
-#
-# Requires:
-# 1) PKG_CLOUD_TOKEN variable exported
-# 2) To properly install package_cloud when not found, it requires: ruby gcc gcc-c++ ruby-devel
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-#shellcheck disable=SC2068,SC2145
-set -e
-PKG_CLOUD_CONFIG="$HOME/.package_cloud_configuration.cfg"
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup)
-if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository"
- echo "Docker build process aborted"
- exit 1
-fi
-
-# Install dependency if not there
-if ! command -v package_cloud > /dev/null 2>&1; then
- echo "No package cloud gem found, installing"
- gem install -V package_cloud || (echo "Package cloud installation failed. you might want to check if required dependencies are there (ruby gcc gcc-c++ ruby-devel)" && exit 1)
-else
- echo "Found package_cloud gem, continuing"
-fi
-
-# Check for required token and prepare config
-if [ -z "${PKG_CLOUD_TOKEN}" ]; then
- echo "Please set PKG_CLOUD_TOKEN to be able to use ${0}"
- exit 1
-fi
-echo "{\"url\":\"https://packagecloud.io\",\"token\":\"${PKG_CLOUD_TOKEN}\"}" > "${PKG_CLOUD_CONFIG}"
-
-echo "Executing package_cloud with config ${PKG_CLOUD_CONFIG} and parameters $@"
-package_cloud $@ --config="${PKG_CLOUD_CONFIG}"
-
-rm -rf "${PKG_CLOUD_CONFIG}"
-echo "Done!"
diff --git a/.travis/package_management/prepare_packages.sh b/.travis/package_management/prepare_packages.sh
deleted file mode 100755
index 12ed07cc..00000000
--- a/.travis/package_management/prepare_packages.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env bash
-#
-# Utility that gathers generated packages,
-# puts them together in a local folder for deploy facility to pick up
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-#shellcheck disable=SC2068
-set -e
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup)
-if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository"
- echo "Package preparation aborted"
- exit 1
-fi
-
-export LXC_ROOT="/var/lib/lxc"
-
-# Go through the containers created for packaging and pick up all generated packages
-CREATED_CONTAINERS=$(ls -A "${LXC_ROOT}")
-for d in ${CREATED_CONTAINERS[@]}; do
- echo "Picking up packaging contents from ${d}"
-
- # Pick up any RPMS from builder
- RPM_BUILD_PATH="${LXC_ROOT}/${d}/rootfs/home/${BUILDER_NAME}/rpmbuild"
- if [ -d "${RPM_BUILD_PATH}" ]; then
- echo "Checking folder ${RPM_BUILD_PATH} for RPMS and SRPMS"
-
- if [ -d "${RPM_BUILD_PATH}/RPMS" ]; then
- echo "Copying any RPMS in '${RPM_BUILD_PATH}', copying over the following:"
- ls -ltrR "${RPM_BUILD_PATH}/RPMS"
- [[ -d "${RPM_BUILD_PATH}/RPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/x86_64/* "${PACKAGES_DIRECTORY}"
- [[ -d "${RPM_BUILD_PATH}/RPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i386/* "${PACKAGES_DIRECTORY}"
- [[ -d "${RPM_BUILD_PATH}/RPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/RPMS/i686/* "${PACKAGES_DIRECTORY}"
- fi
-
- if [ -d "${RPM_BUILD_PATH}/SRPMS" ]; then
- echo "Copying any SRPMS in '${RPM_BUILD_PATH}', copying over the following:"
- ls -ltrR "${RPM_BUILD_PATH}/SRPMS"
- [[ -d "${RPM_BUILD_PATH}/SRPMS/x86_64" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/x86_64/* "${PACKAGES_DIRECTORY}"
- [[ -d "${RPM_BUILD_PATH}/SRPMS/i386" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i386/* "${PACKAGES_DIRECTORY}"
- [[ -d "${RPM_BUILD_PATH}/SRPMS/i686" ]] && cp -r "${RPM_BUILD_PATH}"/SRPMS/i686/* "${PACKAGES_DIRECTORY}"
- fi
- else
- DEB_BUILD_PATH="${LXC_ROOT}/${d}/rootfs/home/${BUILDER_NAME}"
- echo "Checking folder ${DEB_BUILD_PATH} for DEB packages"
- if [ -d "${DEB_BUILD_PATH}" ]; then
- cp "${DEB_BUILD_PATH}"/netdata*.ddeb "${PACKAGES_DIRECTORY}" || echo "Could not copy any .ddeb files"
- cp "${DEB_BUILD_PATH}"/netdata*.deb "${PACKAGES_DIRECTORY}" || echo "Could not copy any .deb files"
- cp "${DEB_BUILD_PATH}"/netdata*.buildinfo "${PACKAGES_DIRECTORY}" || echo "Could not copy any .buildinfo files"
- cp "${DEB_BUILD_PATH}"/netdata*.changes "${PACKAGES_DIRECTORY}" || echo "Could not copy any .changes files"
- else
- echo "Folder ${DEB_BUILD_PATH} does not exist or not a directory, nothing to do for package preparation"
- fi
- fi
-done
-
-chmod -R 777 "${PACKAGES_DIRECTORY}"
-echo "Packaging contents ready to ship!"
diff --git a/.travis/package_management/trigger_deb_lxc_build.py b/.travis/package_management/trigger_deb_lxc_build.py
deleted file mode 100755
index a0235a73..00000000
--- a/.travis/package_management/trigger_deb_lxc_build.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env python3
-#
-# This script is responsible for running the RPM build on the running container
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
-
-import common
-import os
-import sys
-import lxc
-
-if len(sys.argv) != 2:
- print('You need to provide a container name to get things started')
- sys.exit(1)
-container_name=sys.argv[1]
-
-# Load the container, break if its not there
-print("Starting up container %s" % container_name)
-container = lxc.Container(container_name)
-if not container.defined:
- raise Exception("Container %s does not exist!" % container_name)
-
-# Check if the container is running, attempt to start it up in case its not running
-if not container.running or not container.state == "RUNNING":
- print('Container %s is not running, attempt to start it up' % container_name)
-
- # Start the container
- if not container.start():
- raise Exception("Failed to start the container")
-
- if not container.running or not container.state == "RUNNING":
- raise Exception('Container %s is not running, configuration process aborted ' % container_name)
-
-# Wait for connectivity
-if not container.get_ips(timeout=30):
- raise Exception("Timeout while waiting for container")
-
-build_path = "/home/%s" % os.environ['BUILDER_NAME']
-
-print("Setting up EMAIL and DEBFULLNAME variables required by the build tools")
-os.environ["EMAIL"] = "bot@netdata.cloud"
-os.environ["DEBFULLNAME"] = "Netdata builder"
-
-# Run the build process on the container
-new_version, tag = common.fetch_version(os.environ['BUILD_VERSION'])
-print("Starting DEB build process for version %s" % new_version)
-
-netdata_tarball = "%s/netdata-%s.tar.gz" % (build_path, new_version)
-unpacked_netdata = netdata_tarball.replace(".tar.gz", "")
-
-print("Extracting tarball %s" % netdata_tarball)
-common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "tar", "xf", netdata_tarball, "-C", build_path])
-
-print("Fixing changelog tags")
-changelog_in_host = "contrib/debian/changelog"
-common.run_command_in_host(['sed', '-i', 's/PREVIOUS_PACKAGE_VERSION/%s-1/g' % os.environ["LATEST_RELEASE_VERSION"].replace("v", ""), changelog_in_host])
-common.run_command_in_host(['sed', '-i', 's/PREVIOUS_PACKAGE_DATE/%s/g' % os.environ["LATEST_RELEASE_DATE"], changelog_in_host])
-
-print("Executing gbp dch command..")
-common.run_command_in_host(['gbp', 'dch', '--release', '--ignore-branch', '--spawn-editor=snapshot', '--since=%s' % os.environ["LATEST_RELEASE_VERSION"], '--new-version=%s' % new_version])
-
-print("Copying over changelog to the destination machine")
-common.run_command_in_host(['sudo', 'cp', 'debian/changelog', "%s/%s/netdata-%s/contrib/debian/" % (os.environ['LXC_CONTAINER_ROOT'], build_path, new_version)])
-
-print("Running debian build script since %s" % os.environ["LATEST_RELEASE_VERSION"])
-common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "%s/build.sh" % build_path, unpacked_netdata, new_version])
-
-print("Listing contents on build path")
-common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "ls", "-ltr", build_path])
-
-print('Done!')
diff --git a/.travis/package_management/trigger_rpm_lxc_build.py b/.travis/package_management/trigger_rpm_lxc_build.py
deleted file mode 100755
index f9e109c7..00000000
--- a/.travis/package_management/trigger_rpm_lxc_build.py
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env python3
-#
-# This script is responsible for running the RPM build on the running container
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
-
-import common
-import os
-import sys
-import lxc
-
-if len(sys.argv) != 2:
- print('You need to provide a container name to get things started')
- sys.exit(1)
-container_name=sys.argv[1]
-
-# Load the container, break if its not there
-print("Starting up container %s" % container_name)
-container = lxc.Container(container_name)
-if not container.defined:
- raise Exception("Container %s does not exist!" % container_name)
-
-# Check if the container is running, attempt to start it up in case its not running
-if not container.running or not container.state == "RUNNING":
- print('Container %s is not running, attempt to start it up' % container_name)
-
- # Start the container
- if not container.start():
- raise Exception("Failed to start the container")
-
- if not container.running or not container.state == "RUNNING":
- raise Exception('Container %s is not running, configuration process aborted ' % container_name)
-
-# Wait for connectivity
-if not container.get_ips(timeout=30):
- raise Exception("Timeout while waiting for container")
-
-print("Adding builder specific dependencies to the LXC container")
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "rpm-build"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "rpm-devel"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "rpmlint"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "make"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "python"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "bash"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "diffutils"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "patch"])
-common.run_command(container, [os.environ["REPO_TOOL"], "install", "-y", "rpmdevtools"])
-
-# Run the build process on the container
-print("Starting RPM build process")
-common.run_command(container, ["sudo", "-u", os.environ['BUILDER_NAME'], "rpmbuild", "-ba", "--rebuild", "/home/%s/rpmbuild/SPECS/netdata.spec" % os.environ['BUILDER_NAME']])
-
-print('Done!')
diff --git a/.travis/package_management/yank_stale_rpm.sh b/.travis/package_management/yank_stale_rpm.sh
deleted file mode 100755
index 3f766971..00000000
--- a/.travis/package_management/yank_stale_rpm.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-#
-# This script is responsible for the removal of stale RPM/DEB files.
-# It runs on the pre-deploy step and takes care of the removal of the files
-# prior to the upload of the freshly built ones
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-#shellcheck disable=SC2010,SC2068
-set -e
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup)
-if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as .travis/package_management/$(basename "$0") from top level directory of netdata git repository"
- echo "Package yanking cancelled"
- exit 1
-fi
-
-PACKAGES_DIR="$1"
-DISTRO="$2"
-PACKAGES_LIST="$(ls -AR "${PACKAGES_DIR}" | grep -e '\.rpm' -e '\.deb' -e '\.ddeb' )"
-
-if [ ! -d "${PACKAGES_DIR}" ] || [ -z "${PACKAGES_LIST}" ]; then
- echo "Folder ${PACKAGES_DIR} does not seem to be a valid directory or is empty. No packages to check for yanking"
- exit 1
-fi
-
-for pkg in ${PACKAGES_LIST[@]}; do
- echo "Attempting yank on ${pkg}.."
- .travis/package_management/package_cloud_wrapper.sh yank "${PACKAGING_USER}/${DEPLOY_REPO}/${DISTRO}" "${pkg}" || echo "Nothing to yank or error on ${pkg}"
-done
-
diff --git a/.travis/run_install_with_dist_file.sh b/.travis/run_install_with_dist_file.sh
deleted file mode 100755
index ccad627c..00000000
--- a/.travis/run_install_with_dist_file.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env bash
-#
-# This script is evaluating netdata installation with the source from make dist
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
-#
-set -e
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup || echo "")
-if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository"
- echo "Changelog generation process aborted"
- exit 1
-fi
-
-echo "Initiating dist archive contents validation"
-DIST_FILE_FROM_GIT="netdata-$(git describe).tar.gz"
-DIST_FILE_FROM_FILE="netdata-$(tr -d '\n' < packaging/version).tar.bgz"
-if [ -f ${DIST_FILE_FROM_GIT} ]; then
- DIST_FILE="${DIST_FILE_FROM_GIT}"
-elif [ -f ${DIST_FILE_FROM_FILE} ]; then
- DIST_FILE="${DIST_FILE_FROM_FILE}"
-else
- echo "I could not find netdata distfile. Nor ${DIST_FILE_FROM_GIT} or ${DIST_FILE_FROM_FILE} exist"
- exit 1
-fi
-
-echo "Opening dist archive ${DIST_FILE}"
-tar -xovf "${DIST_FILE}"
-NETDATA_DIST_FOLDER=$(echo ${DIST_FILE} | cut -d. -f1,2,3)
-if [ ! -d ${NETDATA_DIST_FOLDER} ]; then
- echo "I could not locate folder ${NETDATA_DIST_FOLDER}, something went wrong failing the test"
- exit 1
-fi
-
-echo "Entering ${NETDATA_DIST_FOLDER} and starting docker compilation"
-cd ${NETDATA_DIST_FOLDER}
-docker run -it -v "${PWD}:/code:rw" -w /code "netdata/os-test:centos7" /bin/bash -c "./netdata-installer.sh --dont-wait --install /tmp && echo \"Validating netdata instance is running\" && wget -O'-' 'http://127.0.0.1:19999/api/v1/info' | grep version"
-
-echo "Installation completed with no errors! Removing temporary folders"
-
-# TODO: Travis give me a permission denied on some files here, i made it a soft error until i figure out what is wrong
-cd -
-rm -rf ${NETDATA_DIST_FOLDER} || echo "I could not remove temporary directory, make sure you delete ${NETDATA_DIST_FOLDER} by yourself if this wasn't run over ephemeral storage"
diff --git a/.travis/tagger.sh b/.travis/tagger.sh
deleted file mode 100755
index adac35b4..00000000
--- a/.travis/tagger.sh
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/bin/bash
-#
-# Original script is available at https://github.com/paulfantom/travis-helper/blob/master/releasing/releaser.sh
-#
-# Tags are generated by searching for a keyword in last commit message. Keywords are:
-# - [patch] or [fix] to bump patch number
-# - [minor], [feature] or [feat] to bump minor number
-# - [major] or [breaking change] to bump major number
-# All keywords MUST be surrounded with square braces.
-#
-# Requirements:
-# - GITHUB_TOKEN variable set with GitHub token. Access level: repo.public_repo
-# - git-semver python package (pip install git-semver)
-#
-# Note: Exported variables needed by .travis/draft_release.sh
-#
-# Original script is available at https://github.com/paulfantom/travis-helper/blob/master/releasing/releaser.sh
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pawel Krupa (paulfantom)
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-
-set -e
-
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup || echo "")
-if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository"
- echo "Changelog generation process aborted"
- exit 1
-fi
-
-
-# Figure out what will be new release candidate tag based only on previous ones.
-# This assumes that RELEASES are in format of "v0.1.2" and prereleases (RCs) are using "v0.1.2-rc0"
-function set_tag_release_candidate() {
- LAST_TAG=$(git semver)
- echo "Last tag found is: ${LAST_TAG}"
-
- if [[ $LAST_TAG =~ -rc* ]]; then
- VERSION=$(echo "$LAST_TAG" | cut -d'-' -f 1)
- LAST_RC=$(echo "$LAST_TAG" | cut -d'c' -f 2)
- RC=$((LAST_RC + 1))
- else
- VERSION="$(git semver --next-minor)"
- RC=0
- echo "Warning: Will set version to ${VERSION} (Last tag: ${LAST_TAG}) while tagged for release candidate generation"
- fi
- GIT_TAG="v${VERSION}-rc${RC}"
-}
-
-echo "Determining TAG"
-
-# Check if current commit is tagged or not
-GIT_TAG=$(git tag --points-at)
-
-if [ -z "${GIT_TAG}" ]; then
- git semver
- # Figure out next tag based on commit message
- echo "Last commit message: ${TRAVIS_COMMIT_MESSAGE}"
- case "${TRAVIS_COMMIT_MESSAGE}" in
- *"[netdata patch release]"*) GIT_TAG="v$(git semver --next-patch)" ;;
- *"[netdata minor release]"*) GIT_TAG="v$(git semver --next-minor)" ;;
- *"[netdata major release]"*) GIT_TAG="v$(git semver --next-major)" ;;
- *"[netdata release candidate]"*) set_tag_release_candidate ;;
- *)
- echo "Keyword not detected. Exiting..."
- exit 0
- ;;
- esac
-fi
-
-echo "Setting up GIT_TAG to ${GIT_TAG}"
-export GIT_TAG
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d66fc666..c883c208 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,67 +1,85 @@
# Changelog
+## [v1.17.0](https://github.com/netdata/netdata/tree/v1.17.0) (2019-09-02)
+
+**Fixed bugs:**
+
+- netdata/installer: fix static64 installer always overwriting configuration [\#6710](https://github.com/netdata/netdata/pull/6710) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- netdata/packaging: put go.d version in one place [\#6557](https://github.com/netdata/netdata/pull/6557) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+
+**Merged pull requests:**
+
+- Skip issues from release changelog [\#6759](https://github.com/netdata/netdata/pull/6759) ([cakrit](https://github.com/cakrit))
+- Increase minimum release for changelog [\#6758](https://github.com/netdata/netdata/pull/6758) ([cakrit](https://github.com/cakrit))
+- netdata/packaging: Add python3-lxc dependency [\#6753](https://github.com/netdata/netdata/pull/6753) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- make coverity-scan.sh usable by hand [\#6747](https://github.com/netdata/netdata/pull/6747) ([ktsaou](https://github.com/ktsaou))
+- netdata/packaging: fix coverity problem in travis \(2 issues\) [\#6743](https://github.com/netdata/netdata/pull/6743) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- Add command-line option descriptions for apps.plugin [\#6738](https://github.com/netdata/netdata/pull/6738) ([vlvkobal](https://github.com/vlvkobal))
+- netdata/packaging: Add purging logic for package cloud repositories [\#6732](https://github.com/netdata/netdata/pull/6732) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- Fix corrupted transaction payload handling [\#6731](https://github.com/netdata/netdata/pull/6731) ([mfundul](https://github.com/mfundul))
+- Netdata-installer warning removed [\#6715](https://github.com/netdata/netdata/pull/6715) ([thiagoftsm](https://github.com/thiagoftsm))
+- Remove of unecessary NULL web server [\#6714](https://github.com/netdata/netdata/pull/6714) ([thiagoftsm](https://github.com/thiagoftsm))
+- History tips [\#6711](https://github.com/netdata/netdata/pull/6711) ([jacekkolasa](https://github.com/jacekkolasa))
+- exact path to netdata.conf in .gitignore [\#6709](https://github.com/netdata/netdata/pull/6709) ([sunflowerbofh](https://github.com/sunflowerbofh))
+- Tabulatin on health/health.c [\#6691](https://github.com/netdata/netdata/pull/6691) ([thiagoftsm](https://github.com/thiagoftsm))
+- netdata/packaging: Align libdir in all configure commands [\#6682](https://github.com/netdata/netdata/pull/6682) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- spigotmc module: :pencil2: fix typos. [\#6680](https://github.com/netdata/netdata/pull/6680) ([Cat7373](https://github.com/Cat7373))
+- netdata/packaging: \[ci skip\] Correlate configure command [\#6678](https://github.com/netdata/netdata/pull/6678) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- netdata/packaging: Add auto-generation of packages for nightly [\#6675](https://github.com/netdata/netdata/pull/6675) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- .travis.yml: Improve template and commit\_message predicates readability [\#6673](https://github.com/netdata/netdata/pull/6673) ([knatsakis](https://github.com/knatsakis))
+- .travis.yml: Build docker images in parallel [\#6672](https://github.com/netdata/netdata/pull/6672) ([knatsakis](https://github.com/knatsakis))
+- add CHANGELOG.md to .remarkignore [\#6671](https://github.com/netdata/netdata/pull/6671) ([prhomhyse](https://github.com/prhomhyse))
+- netdata/packaging: separately identify dependency resolution for HTTPS and DB engine [\#6670](https://github.com/netdata/netdata/pull/6670) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- Bugfix 931843 1 [\#6667](https://github.com/netdata/netdata/pull/6667) ([sunflowerbofh](https://github.com/sunflowerbofh))
+- Fix Markdown Lint warnings [\#6664](https://github.com/netdata/netdata/pull/6664) ([prhomhyse](https://github.com/prhomhyse))
+- Remove texts inside tests/Makefile [\#6660](https://github.com/netdata/netdata/pull/6660) ([thiagoftsm](https://github.com/thiagoftsm))
+- .travis.yml: packaging/docker/check\_login.sh can cause the build to fail [\#6655](https://github.com/netdata/netdata/pull/6655) ([knatsakis](https://github.com/knatsakis))
+- Display uptime for processes [\#6654](https://github.com/netdata/netdata/pull/6654) ([vlvkobal](https://github.com/vlvkobal))
+- Prometheus version update [\#6652](https://github.com/netdata/netdata/pull/6652) ([prhomhyse](https://github.com/prhomhyse))
+- Accept \<\> around links in markdown [\#6646](https://github.com/netdata/netdata/pull/6646) ([cakrit](https://github.com/cakrit))
+- netdata/installer: Fix error running kickstart as a non-privileged user [\#6642](https://github.com/netdata/netdata/pull/6642) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- fix spigotmc plugin bugs [\#6635](https://github.com/netdata/netdata/pull/6635) ([Cat7373](https://github.com/Cat7373))
+- Fix 'None' in left nav [\#6632](https://github.com/netdata/netdata/pull/6632) ([joelhans](https://github.com/joelhans))
+- Update terms of use for U.S. legal reasons [\#6631](https://github.com/netdata/netdata/pull/6631) ([cakrit](https://github.com/cakrit))
+- Fix a segmentation fault in backends [\#6627](https://github.com/netdata/netdata/pull/6627) ([vlvkobal](https://github.com/vlvkobal))
+- .travis.yml: Remove 'sudo: true' as it is now deprecated [\#6624](https://github.com/netdata/netdata/pull/6624) ([knatsakis](https://github.com/knatsakis))
+- Change "netdata" to "Netdata" in all docs [\#6621](https://github.com/netdata/netdata/pull/6621) ([joelhans](https://github.com/joelhans))
+- netdata: Add cosmix as codeowner, wherever cakrit is [\#6618](https://github.com/netdata/netdata/pull/6618) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- mysql: do not remove `slave\_status` from queries in module [\#6617](https://github.com/netdata/netdata/pull/6617) ([ilyam8](https://github.com/ilyam8))
+- Add alarm variables to the response of chart and data \(\#6054\) [\#6615](https://github.com/netdata/netdata/pull/6615) ([alpes214](https://github.com/alpes214))
+- Better system OS detection for RHEL6 and Mac OS X [\#6612](https://github.com/netdata/netdata/pull/6612) ([dex4er](https://github.com/dex4er))
+- update package version requirements for LZ4 and libuv [\#6607](https://github.com/netdata/netdata/pull/6607) ([mfundul](https://github.com/mfundul))
+- netdata/installer: do not enable netdata, if it was previously disabled [\#6606](https://github.com/netdata/netdata/pull/6606) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- Fix typos in: 'README.md' file. [\#6604](https://github.com/netdata/netdata/pull/6604) ([coffeina](https://github.com/coffeina))
+- mysql collector: slave\_status charts per replication channel [\#6597](https://github.com/netdata/netdata/pull/6597) ([ilyam8](https://github.com/ilyam8))
+- Install Netdata with Docker [\#6596](https://github.com/netdata/netdata/pull/6596) ([prhomhyse](https://github.com/prhomhyse))
+- netdata/packaging: Expect .tar.gz version of go.d plugin [\#6590](https://github.com/netdata/netdata/pull/6590) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- Fix configure.ac allowing deprecated LZ4 library call [\#6589](https://github.com/netdata/netdata/pull/6589) ([mfundul](https://github.com/mfundul))
+- Fix crash in malloc [\#6583](https://github.com/netdata/netdata/pull/6583) ([thiagoftsm](https://github.com/thiagoftsm))
+- Build DEB and RPM packages in parallel [\#6579](https://github.com/netdata/netdata/pull/6579) ([knatsakis](https://github.com/knatsakis))
+- Fixed typo 'follwing' -\> 'following' [\#6575](https://github.com/netdata/netdata/pull/6575) ([vvanouytsel](https://github.com/vvanouytsel))
+- netdata/packaging: Bare OS validations [\#6574](https://github.com/netdata/netdata/pull/6574) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- vsphere collector: charts descritpion and alarms [\#6572](https://github.com/netdata/netdata/pull/6572) ([ilyam8](https://github.com/ilyam8))
+- Documentation style guide & build instructions [\#6563](https://github.com/netdata/netdata/pull/6563) ([joelhans](https://github.com/joelhans))
+- Add alarm status counter api call [\#6554](https://github.com/netdata/netdata/pull/6554) ([alpes214](https://github.com/alpes214))
+- netdata/packaging: Documentation on distribution support matrix and functionality availability [\#6552](https://github.com/netdata/netdata/pull/6552) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
+- mongodb: ssl connection [\#6546](https://github.com/netdata/netdata/pull/6546) ([ilyam8](https://github.com/ilyam8))
+- Add MongoDB backend [\#6524](https://github.com/netdata/netdata/pull/6524) ([vlvkobal](https://github.com/vlvkobal))
+- Netdata Cloud documentation [\#6476](https://github.com/netdata/netdata/pull/6476) ([joelhans](https://github.com/joelhans))
+- Variable Granularity support for data collection [\#6430](https://github.com/netdata/netdata/pull/6430) ([mfundul](https://github.com/mfundul))
+- \(re-open\) ZRAM info collector module \(proc.plugin\) [\#6424](https://github.com/netdata/netdata/pull/6424) ([RaZeR-RBI](https://github.com/RaZeR-RBI))
+- Update plugin\_nfacct.c [\#6098](https://github.com/netdata/netdata/pull/6098) ([fun04wr0ng](https://github.com/fun04wr0ng))
+
## [v1.16.1](https://github.com/netdata/netdata/tree/v1.16.1) (2019-07-31)
**Fixed bugs:**
-- /etc/netdata/.environment gets updated constantly with auto-updates [\#6550](https://github.com/netdata/netdata/issues/6550)
-- pluginsd python.d slowly eating more and more CPU due to smartd collection [\#6532](https://github.com/netdata/netdata/issues/6532)
-- Special characters in configuration files can break the UI [\#6531](https://github.com/netdata/netdata/issues/6531)
-- Plugin httpcheck heavily changes the WebUI [\#6530](https://github.com/netdata/netdata/issues/6530)
-- Valid let's encrypt certifcate considerated as invalid by streaming client [\#6529](https://github.com/netdata/netdata/issues/6529)
-- Can't start netdata in Amazon Linux [\#6522](https://github.com/netdata/netdata/issues/6522)
-- Missing file when install the netdata [\#6519](https://github.com/netdata/netdata/issues/6519)
-- netdata tengine invalid response length [\#6490](https://github.com/netdata/netdata/issues/6490)
-- Snappy library is not detected correctly in all Linux distributions [\#6478](https://github.com/netdata/netdata/issues/6478)
-- python sensors collector: sensors chips filtering doesnt work [\#6462](https://github.com/netdata/netdata/issues/6462)
-- Streaming not working with ^SSL option in bind to = [\#6457](https://github.com/netdata/netdata/issues/6457)
-- fix CRC error handling in dbengine [\#6451](https://github.com/netdata/netdata/issues/6451)
-- python.d ERROR: unbound\[local\] : \[Errno 57\] Socket is not connected FreeBSD [\#6434](https://github.com/netdata/netdata/issues/6434)
-- Issue with rethinkdbs plugin [\#6429](https://github.com/netdata/netdata/issues/6429)
-- Double free or corruption \(again\) [\#6412](https://github.com/netdata/netdata/issues/6412)
-- rpm version should be 1.16.0 not v1.16.0 \(extra "v"\) [\#6409](https://github.com/netdata/netdata/issues/6409)
-- Netdata does not detect pkg-config under automated install [\#6405](https://github.com/netdata/netdata/issues/6405)
-- No LVM disk space usage on CentOS [\#6401](https://github.com/netdata/netdata/issues/6401)
-- Cannot see charts in an imported snapshot [\#6384](https://github.com/netdata/netdata/issues/6384)
-- netdata does not send notifications for alarms which fail with \(errno 12, Out of memory\) [\#6335](https://github.com/netdata/netdata/issues/6335)
-- netdata/packaging: Fine tune documentation regarding package dependencies per distribution [\#6300](https://github.com/netdata/netdata/issues/6300)
-- netdata/web: layout getting messed up on certain dimensions when resizing the window [\#6269](https://github.com/netdata/netdata/issues/6269)
-- charts.d kills process twice [\#6190](https://github.com/netdata/netdata/issues/6190)
-- MacOS Path Issues [\#6165](https://github.com/netdata/netdata/issues/6165)
-- Memory leak in power supply module [\#6132](https://github.com/netdata/netdata/issues/6132)
-- Question for the Netdata-Dashboard [\#6037](https://github.com/netdata/netdata/issues/6037)
-- Fatal errors sometimes fail to halt the netdata daemon [\#5896](https://github.com/netdata/netdata/issues/5896)
-- my-netdata menu dynamic sizing [\#5812](https://github.com/netdata/netdata/issues/5812)
-- \[Security\] Docker socket exported writable; better use docker socket proxy [\#5680](https://github.com/netdata/netdata/issues/5680)
-- Cant see user name when run netdata in docker [\#5585](https://github.com/netdata/netdata/issues/5585)
-- No netdata-updater.sh in cron.daily after installing with kickstart-static64.sh [\#4122](https://github.com/netdata/netdata/issues/4122)
-- If trailing slash is not included dashboard.js fails to load for slave dashboard for master [\#3820](https://github.com/netdata/netdata/issues/3820)
-- Alarm "system.softnet\_stat" is very strict. [\#1076](https://github.com/netdata/netdata/issues/1076)
- netdata/packaging: Move tarball checksum information into lib dir of netdata [\#6555](https://github.com/netdata/netdata/pull/6555) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
- netdata/packaging: Adopt netdata-updater to run properly for static64 installations. [\#6520](https://github.com/netdata/netdata/pull/6520) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
- netdata/packaging: Do not deliver edit-config as part of the distribution tarball [\#6507](https://github.com/netdata/netdata/pull/6507) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
- Stop anonymous stats from writing log in /tmp [\#6491](https://github.com/netdata/netdata/pull/6491) ([cakrit](https://github.com/cakrit))
- netdata/packaging: Fix RPM packaging workflow issues, plus draft changes for .DEB packaging [\#6415](https://github.com/netdata/netdata/pull/6415) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
-**Closed issues:**
-
-- Nodes Views redirects to the agent instead of the nodes view upon login [\#6542](https://github.com/netdata/netdata/issues/6542)
-- adding node.js for building web server [\#6521](https://github.com/netdata/netdata/issues/6521)
-- Support for ScaleIO/VxFlexOS v3 [\#6475](https://github.com/netdata/netdata/issues/6475)
-- Disable HTML email for alarms [\#6458](https://github.com/netdata/netdata/issues/6458)
-- netdata/packaging: Make go.d plugin an independent module [\#6367](https://github.com/netdata/netdata/issues/6367)
-- Global option for enabling charts with zero metrics [\#6315](https://github.com/netdata/netdata/issues/6315)
-- Netdata variable granularity support in netdata daemon - with basic unit testing [\#6255](https://github.com/netdata/netdata/issues/6255)
-- Netdata variable granularity support in dbengine [\#6254](https://github.com/netdata/netdata/issues/6254)
-- Alarms on system boot [\#6114](https://github.com/netdata/netdata/issues/6114)
-- \[Binary releases\] Create a script that will containerise the DEB build process [\#5968](https://github.com/netdata/netdata/issues/5968)
-- health\_alarm\_notify Being Overwritten [\#5669](https://github.com/netdata/netdata/issues/5669)
-- HTML sanitizer for dashboard\_info.js [\#5652](https://github.com/netdata/netdata/issues/5652)
-- badge do not suppport other language [\#3117](https://github.com/netdata/netdata/issues/3117)
-- Feature Request: Linux zram device statistics. [\#2578](https://github.com/netdata/netdata/issues/2578)
-- Provide an include configuration mechanism [\#2360](https://github.com/netdata/netdata/issues/2360)
-- Authentication support [\#70](https://github.com/netdata/netdata/issues/70)
-
**Merged pull requests:**
- Handle disconnected sockets in unbound collector. [\#6561](https://github.com/netdata/netdata/pull/6561) ([Ferroin](https://github.com/Ferroin))
@@ -125,67 +143,6 @@
**Fixed bugs:**
-- Double free or corruption \(fasttop\) [\#6370](https://github.com/netdata/netdata/issues/6370)
-- Netdata not ignoring /dev and /run by default [\#6361](https://github.com/netdata/netdata/issues/6361)
-- \[critical\] netdata segfault when restart service [\#6356](https://github.com/netdata/netdata/issues/6356)
-- Backend as-collected values from statsd are 1000 time to high [\#6347](https://github.com/netdata/netdata/issues/6347)
-- Tomcat collector break on invalid XML caused by single quotes in Memory Pool names \(code solution inside\) [\#6343](https://github.com/netdata/netdata/issues/6343)
-- adaptec\_raid: failed to parse `arcconf GETCONFIG 1 LD` output [\#6337](https://github.com/netdata/netdata/issues/6337)
-- Cannot reinstall netdata [\#6329](https://github.com/netdata/netdata/issues/6329)
-- collectors/perf.plugin/perf\_plugin.c:171: error: 'PERF\_COUNT\_HW\_REF\_CPU\_CYCLES' undeclared here \(not in a function\) [\#6321](https://github.com/netdata/netdata/issues/6321)
-- Never able to sign in [\#6306](https://github.com/netdata/netdata/issues/6306)
-- /dev/fd/63: line 113: : command not found when trying to update [\#6289](https://github.com/netdata/netdata/issues/6289)
-- Redirect cannot overwrite netdata. [\#6288](https://github.com/netdata/netdata/issues/6288)
-- Netdata lateral menu hidden [\#6287](https://github.com/netdata/netdata/issues/6287)
-- How to remove/unregister a streaming node to prevent the health alarms from triggering [\#6266](https://github.com/netdata/netdata/issues/6266)
-- netdata/daemon: Service down with multiple "too many open files" occurring on DB engine [\#6265](https://github.com/netdata/netdata/issues/6265)
-- RFE: Minor change to the spec.in file in order to help with building rpms on RHEL [\#6256](https://github.com/netdata/netdata/issues/6256)
-- addgroup: gid '999' in use [\#6253](https://github.com/netdata/netdata/issues/6253)
-- Starting netdata official container fails with `addgroup: gid '999' in use` [\#6251](https://github.com/netdata/netdata/issues/6251)
-- Elasticsearch plugin error - 'module' object has no attribute 'Retry' [\#6248](https://github.com/netdata/netdata/issues/6248)
-- "Missing charts" on v1.15.0-70-nightly \(\*\*solved\*\*\) [\#6244](https://github.com/netdata/netdata/issues/6244)
-- netdata daemon collectors stuck [\#6239](https://github.com/netdata/netdata/issues/6239)
-- URL Parser changes, correct health cmd api conditions to create silencer [\#6238](https://github.com/netdata/netdata/issues/6238)
-- Error during installation in devuan [\#6230](https://github.com/netdata/netdata/issues/6230)
-- kickstart.sh: Getting Nightly SHA256 sums from storage.googleapis.com times out [\#6227](https://github.com/netdata/netdata/issues/6227)
-- Use major/minor from sys/types.h [\#6218](https://github.com/netdata/netdata/issues/6218)
-- Update fails because of new system dependencies \(\*\*Resolved\*\*\) [\#6200](https://github.com/netdata/netdata/issues/6200)
-- netdata/database: db engine crashing under certain conditions [\#6199](https://github.com/netdata/netdata/issues/6199)
-- Update frequency for x509check alarm [\#6193](https://github.com/netdata/netdata/issues/6193)
-- kickstart.sh: Getting Nightly SHA256 sums from storage.googleapis.com times out [\#6191](https://github.com/netdata/netdata/issues/6191)
-- tv.html errors on https [\#6188](https://github.com/netdata/netdata/issues/6188)
-- Error messages on old database files [\#6186](https://github.com/netdata/netdata/issues/6186)
-- netdata/packaging: Sporadic job failures in Travis CI [\#6185](https://github.com/netdata/netdata/issues/6185)
-- Fix date in pushbullet alarm notifications [\#6178](https://github.com/netdata/netdata/issues/6178)
-- Chart's "name" not streamed [\#6177](https://github.com/netdata/netdata/issues/6177)
-- Why is auto-update not working? V1.14.0 [\#6170](https://github.com/netdata/netdata/issues/6170)
-- \[solved\] ZFS database \(was: netdata FATAL : MAIN :uv\_fs\_fsopen: invalid argument \# : Invalid argument\) [\#6161](https://github.com/netdata/netdata/issues/6161)
-- httpcheck does not export some metrics in version 1.15 [\#6157](https://github.com/netdata/netdata/issues/6157)
-- opensuse - installation by hand issues due to hardcoded libexec in netdata-installer.sh [\#6153](https://github.com/netdata/netdata/issues/6153)
-- httpcheck causes lots of SYN\_SENT dangling sockets [\#6152](https://github.com/netdata/netdata/issues/6152)
-- varnish plugin not showing \(varnish version 4\) [\#6149](https://github.com/netdata/netdata/issues/6149)
-- Reduce number of codacy issues [\#6131](https://github.com/netdata/netdata/issues/6131)
-- Optimize calls that gather system-info [\#6130](https://github.com/netdata/netdata/issues/6130)
-- Fix telemetry config in installer [\#6129](https://github.com/netdata/netdata/issues/6129)
-- web\_log reports unmatched lines [\#6125](https://github.com/netdata/netdata/issues/6125)
-- Netdata 1.15 crashes on Ubuntu 16.04 [\#6117](https://github.com/netdata/netdata/issues/6117)
-- netdata doesn't start with the new dbengine [\#6110](https://github.com/netdata/netdata/issues/6110)
-- "mdstat.mdX\_disks" chart's total is misleading [\#6108](https://github.com/netdata/netdata/issues/6108)
-- Telemetry rarely received from docker containers [\#6095](https://github.com/netdata/netdata/issues/6095)
-- netdata/packaging/installer: Fine tuning based on user feedback and findings from testing [\#6094](https://github.com/netdata/netdata/issues/6094)
-- opensuse - installation by hand issues due to hardcoded libexec in netdata-installer.sh [\#6092](https://github.com/netdata/netdata/issues/6092)
-- Problem updating [\#6088](https://github.com/netdata/netdata/issues/6088)
-- invalid help links on platform \(web log nginx/apache\) [\#6086](https://github.com/netdata/netdata/issues/6086)
-- install on debian jessie [\#6083](https://github.com/netdata/netdata/issues/6083)
-- error "cgroup-network-helper.sh: DEBUG: virsh command is not available" with the lastest docker image [\#6073](https://github.com/netdata/netdata/issues/6073)
-- Kickstart script verification md5sum is out of date, verification fails [\#6049](https://github.com/netdata/netdata/issues/6049)
-- AWS Kinesis dependency [\#6002](https://github.com/netdata/netdata/issues/6002)
-- HTTP requests are classified as AF\_UNIX [\#5987](https://github.com/netdata/netdata/issues/5987)
-- swapio chart is missing on CentOS 7 [\#5845](https://github.com/netdata/netdata/issues/5845)
-- Netdata behind caddy reverse proxy wont login. [\#5794](https://github.com/netdata/netdata/issues/5794)
-- netdata/packaging/installer: error when running the kickstart and also when uninstalling it with uninstaller [\#5745](https://github.com/netdata/netdata/issues/5745)
-- cgroups name resolution doesn't work \(on Raspbian\) [\#5314](https://github.com/netdata/netdata/issues/5314)
-- Old Monit metrics still remains in Netdata even after Netdata restarted [\#5074](https://github.com/netdata/netdata/issues/5074)
- netdata/packaging/docker: Fix docker documentation and a fix to avoid failures [\#6344](https://github.com/netdata/netdata/pull/6344) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
- Fix devuan support for initd [\#6275](https://github.com/netdata/netdata/pull/6275) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
- netdata/packaging/docker: Fix docker socket utilization, first pass [\#6233](https://github.com/netdata/netdata/pull/6233) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
@@ -196,48 +153,6 @@
- netdata/packaging/installer: nits and fixes [\#6121](https://github.com/netdata/netdata/pull/6121) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
- netdata: Fix labels usage, quotes are not needed [\#6091](https://github.com/netdata/netdata/pull/6091) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
-**Closed issues:**
-
-- \[QUESTION\] Show Docker Container without ID [\#6358](https://github.com/netdata/netdata/issues/6358)
-- Allow user to configure the maximum number of File Descriptors for the netdata service [\#6313](https://github.com/netdata/netdata/issues/6313)
-- XMPP/Jabber notification support [\#6292](https://github.com/netdata/netdata/issues/6292)
-- collector/freeipmi - option to remove the ID added to the label [\#6284](https://github.com/netdata/netdata/issues/6284)
-- FreeIPMI - option to remove the ID added to the label [\#6283](https://github.com/netdata/netdata/issues/6283)
-- Scope of 1.16-rc2 [\#6243](https://github.com/netdata/netdata/issues/6243)
-- Documentation improvements [\#6214](https://github.com/netdata/netdata/issues/6214)
-- monitor dnsmasq dhcp server leases [\#6206](https://github.com/netdata/netdata/issues/6206)
-- Pihole stats modules [\#6204](https://github.com/netdata/netdata/issues/6204)
-- Scope of 1.16-rc1 [\#6159](https://github.com/netdata/netdata/issues/6159)
-- Netdata variable granularity Research [\#6148](https://github.com/netdata/netdata/issues/6148)
-- SN\_EMPTY\_SLOT RRDDIMM value detection inside DB engine [\#6105](https://github.com/netdata/netdata/issues/6105)
-- force page alignment per dimension of chart [\#6104](https://github.com/netdata/netdata/issues/6104)
-- When the collector restarts after having stopped a long time ago fill the chart gaps efficiently [\#6103](https://github.com/netdata/netdata/issues/6103)
-- Introduce cross-distro release testing on our CI [\#6102](https://github.com/netdata/netdata/issues/6102)
-- Optimize the memory footprint of the Database Engine [\#6010](https://github.com/netdata/netdata/issues/6010)
-- phpdaemon monitoring [\#6006](https://github.com/netdata/netdata/issues/6006)
-- Secure streaming via SSL [\#6004](https://github.com/netdata/netdata/issues/6004)
-- \[Binary releases\] Optimise netdata.spec file [\#5969](https://github.com/netdata/netdata/issues/5969)
-- \[Binary releases\] Create a script to containerise the RPM build process [\#5967](https://github.com/netdata/netdata/issues/5967)
-- Clearer communiation of telemetry [\#5863](https://github.com/netdata/netdata/issues/5863)
-- alarm-notify.sh should respect the cloud base url setting [\#5791](https://github.com/netdata/netdata/issues/5791)
-- Design k8s collector endpoint autodiscovery [\#5729](https://github.com/netdata/netdata/issues/5729)
-- notify package maintainers of the new netdata releases [\#5682](https://github.com/netdata/netdata/issues/5682)
-- \[preparation\] VMware Vsphere monitoring [\#5635](https://github.com/netdata/netdata/issues/5635)
-- Feature: add VictoriaMetrics backend for long-term archiving [\#5619](https://github.com/netdata/netdata/issues/5619)
-- Update docs for obsolete Python modules [\#5339](https://github.com/netdata/netdata/issues/5339)
-- Hide: timestamps, memory values left, background and use graph as a gif [\#5186](https://github.com/netdata/netdata/issues/5186)
-- Cookie consent for the Netdata sites [\#4798](https://github.com/netdata/netdata/issues/4798)
-- netdata startup order on boot \(systemd\) [\#4266](https://github.com/netdata/netdata/issues/4266)
-- RFC: registry v2 [\#3990](https://github.com/netdata/netdata/issues/3990)
-- mail notifications wiki points to a non-existing file [\#3433](https://github.com/netdata/netdata/issues/3433)
-- Simple way to disable alerts [\#3414](https://github.com/netdata/netdata/issues/3414)
-- CPU performance monitoring [\#3232](https://github.com/netdata/netdata/issues/3232)
-- \[RFE\] send notifications repeatedly until the alert is resolved [\#2956](https://github.com/netdata/netdata/issues/2956)
-- allow netdata to know the plugin that collects each chart [\#2692](https://github.com/netdata/netdata/issues/2692)
-- riak-rv support [\#2413](https://github.com/netdata/netdata/issues/2413)
-- alarms to monitor the number of processes in a system [\#2239](https://github.com/netdata/netdata/issues/2239)
-- Support OpenTSDB back-end via HTTP\(S\) API [\#1642](https://github.com/netdata/netdata/issues/1642)
-
**Merged pull requests:**
- Ignore /dev and /run space/inode usage [\#6399](https://github.com/netdata/netdata/pull/6399) ([vlvkobal](https://github.com/vlvkobal))
@@ -275,7 +190,7 @@
- Add note regarding libexecdir [\#6301](https://github.com/netdata/netdata/pull/6301) ([cakrit](https://github.com/cakrit))
- stale bot limits update [\#6297](https://github.com/netdata/netdata/pull/6297) ([ilyam8](https://github.com/ilyam8))
- \[freeipmi\] Remove id in sensor name when already unique [\#6296](https://github.com/netdata/netdata/pull/6296) ([Saruspete](https://github.com/Saruspete))
-- Web [\#6294](https://github.com/netdata/netdata/pull/6294) ([thiagoftsm](https://github.com/thiagoftsm))
+- Agent Frontend footer hidden [\#6294](https://github.com/netdata/netdata/pull/6294) ([thiagoftsm](https://github.com/thiagoftsm))
- doc: remove single/multi-threaded web server configuration [\#6291](https://github.com/netdata/netdata/pull/6291) ([nodiscc](https://github.com/nodiscc))
- Add a riak plugin [\#6286](https://github.com/netdata/netdata/pull/6286) ([jchristgit](https://github.com/jchristgit))
- netdata/packaging: Separate beta messages from production messages [\#6282](https://github.com/netdata/netdata/pull/6282) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
@@ -361,44 +276,6 @@
**Fixed bugs:**
-- Fix rrdengineapi compiler warning [\#6075](https://github.com/netdata/netdata/issues/6075)
-- New dbengine stil creates directories for individual charts in the cache directory. [\#6067](https://github.com/netdata/netdata/issues/6067)
-- v1.15.0 fails to build due to missing CFLAGS [\#6066](https://github.com/netdata/netdata/issues/6066)
-- netdata/dbengine: constant restarts on octopuscs [\#6053](https://github.com/netdata/netdata/issues/6053)
-- Nodes are inconsistently unreachables [\#6051](https://github.com/netdata/netdata/issues/6051)
-- BUG when compiling code in mac OS [\#6043](https://github.com/netdata/netdata/issues/6043)
-- Unexpected change to metrics sent to backends with new dbengine [\#6039](https://github.com/netdata/netdata/issues/6039)
-- packages installer failed [\#6027](https://github.com/netdata/netdata/issues/6027)
-- netdata/packaging: Fix installer failure on missing packages [\#6026](https://github.com/netdata/netdata/issues/6026)
-- Prowl notifications ignored by alarm-notify.sh [\#6021](https://github.com/netdata/netdata/issues/6021)
-- using with docker. got error after update. [\#6018](https://github.com/netdata/netdata/issues/6018)
-- Segfault on NetData v1.14.0-51-g18336910 [\#6013](https://github.com/netdata/netdata/issues/6013)
-- Slack does not send to channel [\#6003](https://github.com/netdata/netdata/issues/6003)
-- api/v1/chart and api/v1/data calls don't return chart variables names, values [\#5990](https://github.com/netdata/netdata/issues/5990)
-- elasticsearch throws exception and kills whole python.d [\#5978](https://github.com/netdata/netdata/issues/5978)
-- System info doesn't show OS name and version in Mac OS X [\#5950](https://github.com/netdata/netdata/issues/5950)
-- nvidia\_smi wrong power draw numbers [\#5939](https://github.com/netdata/netdata/issues/5939)
-- Coverity Scan issues on get\_system\_info [\#5937](https://github.com/netdata/netdata/issues/5937)
-- `timeout` requires flag [\#5935](https://github.com/netdata/netdata/issues/5935)
-- docker image is missing nut package [\#5934](https://github.com/netdata/netdata/issues/5934)
-- mongodb error exception on check : ‘locks’ [\#5929](https://github.com/netdata/netdata/issues/5929)
-- v1.14.0 is released but status is draft [\#5921](https://github.com/netdata/netdata/issues/5921)
-- Weird WD values [\#5919](https://github.com/netdata/netdata/issues/5919)
-- Alert badge not showing after reverse proxy [\#5900](https://github.com/netdata/netdata/issues/5900)
-- Docker image has plugins not executable for netdata user [\#5890](https://github.com/netdata/netdata/issues/5890)
-- cgroup-network-helper.sh issues in our Docker images [\#5880](https://github.com/netdata/netdata/issues/5880)
-- Fix dependency installation for Fedora 30 [\#5831](https://github.com/netdata/netdata/issues/5831)
-- netdata/packaging: Fix pipeline so that it detects possible updater issues [\#5786](https://github.com/netdata/netdata/issues/5786)
-- netdata/packaging/installer: User management during install/uninstall broken for MacOS [\#5737](https://github.com/netdata/netdata/issues/5737)
-- netdata/packaging/ci: Investigate pipeline failure [\#5733](https://github.com/netdata/netdata/issues/5733)
-- netdata-updater should remain after all installations [\#5673](https://github.com/netdata/netdata/issues/5673)
-- \[Question\] Are python-pymongo and python-yaml needed for a barebones install of netdata? [\#5632](https://github.com/netdata/netdata/issues/5632)
-- python SocketService: lack of connect timeout, python.d.plugin hangs [\#5541](https://github.com/netdata/netdata/issues/5541)
-- installer wrong message on centos [\#5474](https://github.com/netdata/netdata/issues/5474)
-- Misleading information on memory consumption [\#5203](https://github.com/netdata/netdata/issues/5203)
-- File not found by glob when building an rpm \(latest code - 20181218\) [\#5033](https://github.com/netdata/netdata/issues/5033)
-- Issue creating deb package from v1.11.1 [\#4979](https://github.com/netdata/netdata/issues/4979)
-- netdata service fails to start - pfsense [\#3469](https://github.com/netdata/netdata/issues/3469)
- netdata/packaging: Fix kickstart/kickstart-static64 formatting bugs [\#6048](https://github.com/netdata/netdata/pull/6048) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
- netdata/packaging: install/uninstall fixes for macOS case [\#6045](https://github.com/netdata/netdata/pull/6045) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
- netdata/packaging: Adjust auto-updater installation logic [\#6035](https://github.com/netdata/netdata/pull/6035) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
@@ -406,38 +283,6 @@
- Fix segmentation fault [\#6011](https://github.com/netdata/netdata/pull/6011) ([vlvkobal](https://github.com/vlvkobal))
- netdata/packaging: Fix failing CI nightly process [\#6007](https://github.com/netdata/netdata/pull/6007) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
-**Closed issues:**
-
-- \[feature\] collects windows metrics from wmi\_exporter [\#6001](https://github.com/netdata/netdata/issues/6001)
-- \[Binary releases\] Establish RPM repository servers and software [\#5966](https://github.com/netdata/netdata/issues/5966)
-- \[Binary releases\] Establish Debian/Ubuntu repository servers and software [\#5965](https://github.com/netdata/netdata/issues/5965)
-- Add collectors list and system information to /api/v1/info [\#5888](https://github.com/netdata/netdata/issues/5888)
-- Database engine internal stress testing [\#5879](https://github.com/netdata/netdata/issues/5879)
-- Database engine testing by the netdata team [\#5878](https://github.com/netdata/netdata/issues/5878)
-- research: docker swarm [\#5877](https://github.com/netdata/netdata/issues/5877)
-- research: ScaleIO [\#5876](https://github.com/netdata/netdata/issues/5876)
-- Implement CPU limits for cgroup v2 [\#5850](https://github.com/netdata/netdata/issues/5850)
-- feature: kubelet collector improvements \(leftovers from \#5720\) [\#5824](https://github.com/netdata/netdata/issues/5824)
-- fresh install of ubuntu 18.04.1 and netdata missing no "read" showing up in Disk [\#5817](https://github.com/netdata/netdata/issues/5817)
-- netdata/packaging: \[SPIKE\] Investigate CPack/CMake status, clarify and estimate the available approaches [\#5775](https://github.com/netdata/netdata/issues/5775)
-- "Healthy" entrypoint for HTTP-interface [\#5764](https://github.com/netdata/netdata/issues/5764)
-- netdata/packaging/ci: Create and document a manual nightly deployment procedure [\#5762](https://github.com/netdata/netdata/issues/5762)
-- feature: collect docker swarm metrics [\#5710](https://github.com/netdata/netdata/issues/5710)
-- snmp-trap receiver [\#5597](https://github.com/netdata/netdata/issues/5597)
-- AWS Kinesis as backend collector [\#5596](https://github.com/netdata/netdata/issues/5596)
-- Travis CI should upload docker images for PRs to docker hub [\#5583](https://github.com/netdata/netdata/issues/5583)
-- Custom dashboard - Just displaying a number [\#5578](https://github.com/netdata/netdata/issues/5578)
-- Prototype for netdata internal database v2.0 [\#5303](https://github.com/netdata/netdata/issues/5303)
-- Tengine monitoring [\#4886](https://github.com/netdata/netdata/issues/4886)
-- Scaleio monitoring [\#4825](https://github.com/netdata/netdata/issues/4825)
-- Monitor disk access latency \(e.g. ioping\) [\#4660](https://github.com/netdata/netdata/issues/4660)
-- provide binary repos for main distros [\#2728](https://github.com/netdata/netdata/issues/2728)
-- RPM Packaging Discussion [\#1306](https://github.com/netdata/netdata/issues/1306)
-- Setup PPA for Debian systems [\#1302](https://github.com/netdata/netdata/issues/1302)
-- Binary release [\#87](https://github.com/netdata/netdata/issues/87)
-- Provide Ubuntu PPA [\#69](https://github.com/netdata/netdata/issues/69)
-- Build packaging for debian [\#42](https://github.com/netdata/netdata/issues/42)
-
**Merged pull requests:**
- Shorten netdata version and correctly send OS\_VERSION\_ID [\#6082](https://github.com/netdata/netdata/pull/6082) ([cakrit](https://github.com/cakrit))
@@ -499,51 +344,11 @@
**Fixed bugs:**
-- Follow up adjustment of plugin permissions [\#5858](https://github.com/netdata/netdata/issues/5858)
-- netdata/packaging: In particular cases netdata binary produces broken version string [\#5855](https://github.com/netdata/netdata/issues/5855)
-- python module autodetection\_retry don't work as expected [\#5832](https://github.com/netdata/netdata/issues/5832)
-- CouchDB collector exception [\#5830](https://github.com/netdata/netdata/issues/5830)
-- Segfault when using postgres plugin [\#5829](https://github.com/netdata/netdata/issues/5829)
-- macOS: python.d.plugin crashes on start [\#5821](https://github.com/netdata/netdata/issues/5821)
-- Travis: Nightlies job fails when no changes in the code [\#5815](https://github.com/netdata/netdata/issues/5815)
-- Errorneous netdata version [\#5808](https://github.com/netdata/netdata/issues/5808)
-- Kickstart-static64.sh installation problems [\#5807](https://github.com/netdata/netdata/issues/5807)
-- Netdata Installer : $NETDATA\_USER not defined ? [\#5803](https://github.com/netdata/netdata/issues/5803)
-- Typo in netdata-updater [\#5781](https://github.com/netdata/netdata/issues/5781)
-- Kickstart installer fails to due missing function [\#5768](https://github.com/netdata/netdata/issues/5768)
-- smstools 3 notifications error handling [\#5767](https://github.com/netdata/netdata/issues/5767)
-- netdata-installer.sh: sha256sum binaries do not exist in FreeBSD [\#5747](https://github.com/netdata/netdata/issues/5747)
-- netdata/packaging/build: Build getting broken sporadically at times in py2-psycopg2 package [\#5713](https://github.com/netdata/netdata/issues/5713)
-- netdata-updater broke [\#5529](https://github.com/netdata/netdata/issues/5529)
-- centos6: python.d.plugin zombie process after netdata restart [\#5491](https://github.com/netdata/netdata/issues/5491)
-- Socket proxying through unix socket doesnt work in FreeBSD 12 [\#5178](https://github.com/netdata/netdata/issues/5178)
- netdata/packaging/ci: Do not trigger deployment if certain conditions are not met [\#5816](https://github.com/netdata/netdata/pull/5816) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
- netdata/packaging/installer: HoS situation - Fix broken install-or-update script [\#5806](https://github.com/netdata/netdata/pull/5806) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
- netdata/packaging: Fix non compatible function declaration [\#5789](https://github.com/netdata/netdata/pull/5789) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
- Make netdata.spec more futureproof [\#5766](https://github.com/netdata/netdata/pull/5766) ([nichivo](https://github.com/nichivo))
-**Closed issues:**
-
-- API Key not enabled [\#5869](https://github.com/netdata/netdata/issues/5869)
-- last\_collected\_secs health check for phpfpm [\#5820](https://github.com/netdata/netdata/issues/5820)
-- update /packaging/docker/README.md in Mac OS run build [\#5818](https://github.com/netdata/netdata/issues/5818)
-- feature: LDAPS support for openldap-collector [\#5783](https://github.com/netdata/netdata/issues/5783)
-- Add execution of updated kickstart scripts to the CI pipeline [\#5769](https://github.com/netdata/netdata/issues/5769)
-- netdata/packaging/ci: re-design travis pipeline [\#5761](https://github.com/netdata/netdata/issues/5761)
-- feature: kubelet collector improvements [\#5720](https://github.com/netdata/netdata/issues/5720)
-- How can i set syslog facility for netdata daemon? [\#5717](https://github.com/netdata/netdata/issues/5717)
-- netdata/packaging: Master issue - overall building pipelines cleanup: improvements, bug fixes, etc [\#5716](https://github.com/netdata/netdata/issues/5716)
-- feature: openvpn module - use management interface [\#5699](https://github.com/netdata/netdata/issues/5699)
-- feature: postgres module add connection via URI [\#5684](https://github.com/netdata/netdata/issues/5684)
-- Status of netdata/binary-packages [\#5671](https://github.com/netdata/netdata/issues/5671)
-- Full integration with netdata of the new database implementation [\#5641](https://github.com/netdata/netdata/issues/5641)
-- Ensure that `jq` is available in netdata installations [\#5621](https://github.com/netdata/netdata/issues/5621)
-- Can I check monthly or daily reports or history [\#5287](https://github.com/netdata/netdata/issues/5287)
-- SSL support for Elasticsearch plugin [\#5179](https://github.com/netdata/netdata/issues/5179)
-- support filtering disk naming by id [\#4957](https://github.com/netdata/netdata/issues/4957)
-- CoreDNS monitoring [\#4861](https://github.com/netdata/netdata/issues/4861)
-- Netdata's cgroup tracking does not work with version 2 cgroups. [\#3750](https://github.com/netdata/netdata/issues/3750)
-
**Merged pull requests:**
- Pipeline redesign master [\#5885](https://github.com/netdata/netdata/pull/5885) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
@@ -589,40 +394,6 @@
## [v1.14.0-rc0](https://github.com/netdata/netdata/tree/v1.14.0-rc0) (2019-03-30)
-**Fixed bugs:**
-
-- NetData no longer installs after recent xenstat commit [\#5726](https://github.com/netdata/netdata/issues/5726)
-- Errant netdata-updater logs on root "/" directory [\#5679](https://github.com/netdata/netdata/issues/5679)
-- remove python obsolete modules [\#5647](https://github.com/netdata/netdata/issues/5647)
-- Reinstalling with kickstart.sh fails [\#5584](https://github.com/netdata/netdata/issues/5584)
-- Uninstaller fixes and instructions [\#5290](https://github.com/netdata/netdata/issues/5290)
-- Installer problem with config files under 'orig' symlink [\#5039](https://github.com/netdata/netdata/issues/5039)
-
-**Closed issues:**
-
-- netdata/packaging/installer: Finish up netdata installer refactoring [\#5705](https://github.com/netdata/netdata/issues/5705)
-- Feature: Netdata alarms as POST events to a configurable endpoint in health\_alarm\_notify.conf [\#5702](https://github.com/netdata/netdata/issues/5702)
-- feature cgroups plugin: make containers priority user configurable [\#5697](https://github.com/netdata/netdata/issues/5697)
-- Basic kubeproxy collector [\#5683](https://github.com/netdata/netdata/issues/5683)
-- push kickstart.sh to https://my-netdata.io/kickstart.sh [\#5681](https://github.com/netdata/netdata/issues/5681)
-- Slack footer link breaks when the URL is too long [\#5654](https://github.com/netdata/netdata/issues/5654)
-- netdata build error [\#5649](https://github.com/netdata/netdata/issues/5649)
-- Scalable page cache, metrics index and space management [\#5640](https://github.com/netdata/netdata/issues/5640)
-- Basic kubelet collector [\#5639](https://github.com/netdata/netdata/issues/5639)
-- Kubernetes helmchart improvements [\#5637](https://github.com/netdata/netdata/issues/5637)
-- Isn't it a security risk not having SSL mode for postgres plugin? [\#5629](https://github.com/netdata/netdata/issues/5629)
-- feature: collect docker-engine metrics [\#5622](https://github.com/netdata/netdata/issues/5622)
-- nightlies.sh should stop on error [\#5580](https://github.com/netdata/netdata/issues/5580)
-- /etc/cron.daily/netdata-updater -f $'\E\(B\E\[0m\\n' /dev/fd/63: line 86: : command not found [\#5570](https://github.com/netdata/netdata/issues/5570)
-- NetData on XCP-ng dom0 CentOS [\#5357](https://github.com/netdata/netdata/issues/5357)
-- Kubelet monitoring [\#4859](https://github.com/netdata/netdata/issues/4859)
-- Docker Hub monitoring [\#4843](https://github.com/netdata/netdata/issues/4843)
-- fping 4.1 released [\#4227](https://github.com/netdata/netdata/issues/4227)
-- support gauges without a needle [\#4206](https://github.com/netdata/netdata/issues/4206)
-- RFC: a new design for alert system [\#3963](https://github.com/netdata/netdata/issues/3963)
-- notifications with smstools \(direct connection to mobile phone\) [\#2720](https://github.com/netdata/netdata/issues/2720)
-- remove not updated charts after some time [\#688](https://github.com/netdata/netdata/issues/688)
-
**Merged pull requests:**
- netdata/packaging/ci: small improvements in logging [\#5746](https://github.com/netdata/netdata/pull/5746) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
@@ -667,43 +438,6 @@
## [v1.13.0](https://github.com/netdata/netdata/tree/v1.13.0) (2019-03-14)
-**Fixed bugs:**
-
-- ssl\_check memory leak [\#5624](https://github.com/netdata/netdata/issues/5624)
-- postgresql : invalid connection option "autodetection\_retry" [\#5615](https://github.com/netdata/netdata/issues/5615)
-- Proxmox containers no longer being renamed [\#5606](https://github.com/netdata/netdata/issues/5606)
-- spigotmc py2 : load source error : 'module' object has no attribute 'A' [\#5595](https://github.com/netdata/netdata/issues/5595)
-- swap chart should use stack mode instead of area mode [\#5567](https://github.com/netdata/netdata/issues/5567)
-- streaming master is incorrectly detected \(home icon of streamed hosts\) [\#5560](https://github.com/netdata/netdata/issues/5560)
-- netdata segfault since 1.12.1 [\#5553](https://github.com/netdata/netdata/issues/5553)
-- Alert if process isn't alive [\#5547](https://github.com/netdata/netdata/issues/5547)
-- sha256sum: go.d.plugin-v0.1.0.linux-arm64: No such file or directory [\#5538](https://github.com/netdata/netdata/issues/5538)
-- RESOLVED - NS\_ERROR\_FILE\_CORRUPTED: [\#5535](https://github.com/netdata/netdata/issues/5535)
-- Reliability of Prometheus exposed metrics names [\#5527](https://github.com/netdata/netdata/issues/5527)
-- Unexpected host seen in list of hosts [\#5518](https://github.com/netdata/netdata/issues/5518)
-- apps\_plugin plots zeroes for empty groups [\#5514](https://github.com/netdata/netdata/issues/5514)
-- Docker containers resources missing unless a netdata full restart executed [\#5512](https://github.com/netdata/netdata/issues/5512)
-- Problem loading module sensors [\#5490](https://github.com/netdata/netdata/issues/5490)
-- Need more graceful handling of duplicate machine GUIDs [\#5488](https://github.com/netdata/netdata/issues/5488)
-- Used ram for container shows incorrectly [\#5477](https://github.com/netdata/netdata/issues/5477)
-- SpigotMC Chart won't show [\#4131](https://github.com/netdata/netdata/issues/4131)
-- Memory leak in python.d.plugin [\#3817](https://github.com/netdata/netdata/issues/3817)
-
-**Closed issues:**
-
-- ssl check module [\#5631](https://github.com/netdata/netdata/issues/5631)
-- Support connect to mysql via SSL [\#5608](https://github.com/netdata/netdata/issues/5608)
-- Document how to install netdata on kubernetes [\#5526](https://github.com/netdata/netdata/issues/5526)
-- descrease python.d.plugin memory usage [\#5525](https://github.com/netdata/netdata/issues/5525)
-- Per User statistics from apps.plugin disappear / stopped working [\#5517](https://github.com/netdata/netdata/issues/5517)
-- google is blocked. [\#5410](https://github.com/netdata/netdata/issues/5410)
-- Kubernetes pod/containers monitoring [\#5387](https://github.com/netdata/netdata/issues/5387)
-- Configuration translator [\#5144](https://github.com/netdata/netdata/issues/5144)
-- Move to CMake [\#4610](https://github.com/netdata/netdata/issues/4610)
-- Modularize installer script [\#4609](https://github.com/netdata/netdata/issues/4609)
-- Normalize data to use standard units [\#4380](https://github.com/netdata/netdata/issues/4380)
-- RFC: Remove node and bash plugins [\#4036](https://github.com/netdata/netdata/issues/4036)
-
**Merged pull requests:**
- installer: include go.d.plugin version v0.2.0 [\#5638](https://github.com/netdata/netdata/pull/5638) ([paulfantom](https://github.com/paulfantom))
@@ -743,25 +477,10 @@
- Prevent data corruption upon GUID duplication between master and slave netdata instances [\#5511](https://github.com/netdata/netdata/pull/5511) ([paulkatsoulakis](https://github.com/paulkatsoulakis))
- Convert SpigotMC module to use regexes for parsing. [\#5507](https://github.com/netdata/netdata/pull/5507) ([Ferroin](https://github.com/Ferroin))
- Add zero minimum in linux power supply module [\#5395](https://github.com/netdata/netdata/pull/5395) ([vlvkobal](https://github.com/vlvkobal))
-- sslcheck module: \(remote\) SSL certificate expiry time check [\#5365](https://github.com/netdata/netdata/pull/5365) ([p-thurner](https://github.com/p-thurner))
+- sslcheck module: \(remote\) SSL certificate expiry time check [\#5365](https://github.com/netdata/netdata/pull/5365) ([ghost](https://github.com/ghost))
## [v1.12.2](https://github.com/netdata/netdata/tree/v1.12.2) (2019-02-28)
-**Fixed bugs:**
-
-- Installer at https://my-netdata.io/kickstart.sh isnt updated to master branch? [\#5492](https://github.com/netdata/netdata/issues/5492)
-- Verbose curl output causes unwanted emails from netdata-updater cronjob [\#5484](https://github.com/netdata/netdata/issues/5484)
-- Invalid URL [\#5479](https://github.com/netdata/netdata/issues/5479)
-- \[FIX\] RocketChat notifications not working [\#5470](https://github.com/netdata/netdata/issues/5470)
-- go.d.plugin installation fails due to insufficient timeout [\#5467](https://github.com/netdata/netdata/issues/5467)
-- netdata segfault when restart service [\#5366](https://github.com/netdata/netdata/issues/5366)
-
-**Closed issues:**
-
-- tc.plugin: use high-precission bash sleep as loadable [\#5161](https://github.com/netdata/netdata/issues/5161)
-- Unexpected data exposure by default value of "\[registry\].registry to announce" [\#2760](https://github.com/netdata/netdata/issues/2760)
-- Oracle monitoring [\#1996](https://github.com/netdata/netdata/issues/1996)
-
**Merged pull requests:**
- Show streamed servers even for users that are not signed in! \#5482 [\#5519](https://github.com/netdata/netdata/pull/5519) ([gmosx](https://github.com/gmosx))
@@ -784,40 +503,8 @@
**Fixed bugs:**
-- \#5457 broke `go.d.plugin` installation. [\#5459](https://github.com/netdata/netdata/issues/5459)
-- wrong netdata version in response header [\#5438](https://github.com/netdata/netdata/issues/5438)
-- Readme - News section: shows 1.11.1 as the latest release [\#5436](https://github.com/netdata/netdata/issues/5436)
-- Under python3 the LogService.\_get\_raw\_data fails on undecodable data [\#5430](https://github.com/netdata/netdata/issues/5430)
-- apcupsd chart not updating when on battery [\#5428](https://github.com/netdata/netdata/issues/5428)
-- /usr/bin/ld: cannot find -lcupsimage [\#5426](https://github.com/netdata/netdata/issues/5426)
-- ERROR 405: Cannot download charts index from server [\#5418](https://github.com/netdata/netdata/issues/5418)
-- Installer/Updater download via curl redirect [\#5411](https://github.com/netdata/netdata/issues/5411)
-- Cannot update netdata [\#5405](https://github.com/netdata/netdata/issues/5405)
-- sensors plugin only plots sensors that have hard-coded limits [\#5402](https://github.com/netdata/netdata/issues/5402)
-- kickstart-static64.sh should be looking in GCS for the binary package [\#5390](https://github.com/netdata/netdata/issues/5390)
-- Docker container networking [\#5383](https://github.com/netdata/netdata/issues/5383)
-- Race condition in UI may result in streamed hosts not showing when not signed in [\#5370](https://github.com/netdata/netdata/issues/5370)
-- clock\_gettime\(7, &timespec\) failed. \(errno 22, Invalid argument\) [\#5367](https://github.com/netdata/netdata/issues/5367)
-- Missing build deps for automake and autoconf from the spec file [\#5362](https://github.com/netdata/netdata/issues/5362)
-- wget command that dowlonad go.d plugin creates lots of output in headless install [\#5356](https://github.com/netdata/netdata/issues/5356)
-- Update checker false positive [\#5352](https://github.com/netdata/netdata/issues/5352)
-- cups.plugin fails to be compiled [\#5324](https://github.com/netdata/netdata/issues/5324)
-- Codacy is unconfigured [\#5320](https://github.com/netdata/netdata/issues/5320)
-- netdata-installer.sh fails to detect netdata is restarted [\#5304](https://github.com/netdata/netdata/issues/5304)
-- bcache writeback\_rate unknown units [\#4719](https://github.com/netdata/netdata/issues/4719)
-- Split NFACCT handling to a dedicated helper program. [\#3749](https://github.com/netdata/netdata/issues/3749)
- Don't send host/port in to anonymous stats in fatal of STREAM\_RECEIVER [\#5378](https://github.com/netdata/netdata/pull/5378) ([cakrit](https://github.com/cakrit))
-**Closed issues:**
-
-- Alert alarm flapping [\#5346](https://github.com/netdata/netdata/issues/5346)
-- Localization of HTML Docs [\#5197](https://github.com/netdata/netdata/issues/5197)
-- move node.js module named to go.d [\#5048](https://github.com/netdata/netdata/issues/5048)
-- Fluentd monitoring [\#4847](https://github.com/netdata/netdata/issues/4847)
-- collect CPU, memory and disk limits from cgroups and provide alarms using thresholds as percentages [\#2401](https://github.com/netdata/netdata/issues/2401)
-- Feature request: ability to process detailed `ipcs -q` output [\#1890](https://github.com/netdata/netdata/issues/1890)
-- Add multi-language support\(such as Chinese\) [\#219](https://github.com/netdata/netdata/issues/219)
-
**Merged pull requests:**
- Fix timer crash during exit [\#5464](https://github.com/netdata/netdata/pull/5464) ([mfundul](https://github.com/mfundul))
@@ -874,45 +561,6 @@
## [v1.12.0](https://github.com/netdata/netdata/tree/v1.12.0) (2019-02-06)
-**Fixed bugs:**
-
-- Slack alert displaying URL after manual update of net-data [\#5301](https://github.com/netdata/netdata/issues/5301)
-- Netdata update in a /tmp hardened system [\#5289](https://github.com/netdata/netdata/issues/5289)
-- Certificate error while running netdata kickstart script [\#5273](https://github.com/netdata/netdata/issues/5273)
-- Netdata won't update anymore [\#5272](https://github.com/netdata/netdata/issues/5272)
-- alarm-notify.sh not working with latest update of netdata [\#5261](https://github.com/netdata/netdata/issues/5261)
-- /etc/netdata/edit-config charts.d.conf [\#5252](https://github.com/netdata/netdata/issues/5252)
-- Cannot install netdata from source \(the source directory does not include netdata-installer.sh\) [\#5251](https://github.com/netdata/netdata/issues/5251)
-- Non-interactive install fails if required packages are already present [\#5240](https://github.com/netdata/netdata/issues/5240)
-- apps.plugin memory usage bug [\#5237](https://github.com/netdata/netdata/issues/5237)
-- Automatic updates \(via CRON\) giving error [\#5229](https://github.com/netdata/netdata/issues/5229)
-- Updater script no longer seems to be working after a recent update [\#5228](https://github.com/netdata/netdata/issues/5228)
-- Cron Update fails \(again\) [\#5208](https://github.com/netdata/netdata/issues/5208)
-- It is netdata instalation hacked ? [\#5207](https://github.com/netdata/netdata/issues/5207)
-- Wrong version string in GUI [\#5204](https://github.com/netdata/netdata/issues/5204)
-- GUI links to github wiki [\#5202](https://github.com/netdata/netdata/issues/5202)
-- Version checker shouldn't compare commits [\#5201](https://github.com/netdata/netdata/issues/5201)
-- python.d/dockerd plugin update error [\#5200](https://github.com/netdata/netdata/issues/5200)
-- Netdata registry with basic auth \(behind nginx proxy\) results in error 409 [\#5180](https://github.com/netdata/netdata/issues/5180)
-- alarm-notify.sh: WARNING: Cannot find file [\#5136](https://github.com/netdata/netdata/issues/5136)
-- zfs charts appear, even when they are zero [\#4115](https://github.com/netdata/netdata/issues/4115)
-- Ceph - No JSON object could be decoded [\#3563](https://github.com/netdata/netdata/issues/3563)
-
-**Closed issues:**
-
-- integrate go-orchestrator into go.d.plugin [\#5308](https://github.com/netdata/netdata/issues/5308)
-- Update not working or UI just showing wrong information? How to uninstall? [\#5285](https://github.com/netdata/netdata/issues/5285)
-- Slack Notifications Ignored by alarm-notify.sh [\#5267](https://github.com/netdata/netdata/issues/5267)
-- varnish plugin doesn't support custom varnishd working directory [\#5262](https://github.com/netdata/netdata/issues/5262)
-- Developing a new plugin questions [\#5235](https://github.com/netdata/netdata/issues/5235)
-- Split go.d plugin into two packages [\#5195](https://github.com/netdata/netdata/issues/5195)
-- move python module nvidia\_smi to go.d [\#5190](https://github.com/netdata/netdata/issues/5190)
-- Logstash monitoring [\#5147](https://github.com/netdata/netdata/issues/5147)
-- integrate go.d into netdata [\#5006](https://github.com/netdata/netdata/issues/5006)
-- new database format design [\#4687](https://github.com/netdata/netdata/issues/4687)
-- \[REQUEST\] Prowl integration for iOS users? [\#3788](https://github.com/netdata/netdata/issues/3788)
-- CUPS information [\#857](https://github.com/netdata/netdata/issues/857)
-
**Merged pull requests:**
- Fix Codacy issues for FreeBSD plugin [\#5334](https://github.com/netdata/netdata/pull/5334) ([vlvkobal](https://github.com/vlvkobal))
@@ -968,38 +616,8 @@
**Fixed bugs:**
-- megacli isn't included in python.d.conf [\#5191](https://github.com/netdata/netdata/issues/5191)
-- Unix Domain Socket no longer working. Permission denied [\#5181](https://github.com/netdata/netdata/issues/5181)
-- netdata-updater.sh doesn't have exec perms [\#5175](https://github.com/netdata/netdata/issues/5175)
-- FireQoS name not showing due to recent change [\#5171](https://github.com/netdata/netdata/issues/5171)
-- python go\_expvar: reuse same expvar key in different charts [\#5133](https://github.com/netdata/netdata/issues/5133)
-- hddtemp.chart.py is hardcoded to only use /dev/sdX [\#5129](https://github.com/netdata/netdata/issues/5129)
-- RabbitMQ Plugin wrong metrics for nodes in cluster [\#5118](https://github.com/netdata/netdata/issues/5118)
-- cannot install netdata [\#5117](https://github.com/netdata/netdata/issues/5117)
-- Anomalous \(big\) values on graphite/carbon [\#5104](https://github.com/netdata/netdata/issues/5104)
-- \[Bug\] Stale metrics being exported to prometheus [\#5064](https://github.com/netdata/netdata/issues/5064)
-- Uninstaller script should be self-contained [\#5031](https://github.com/netdata/netdata/issues/5031)
-- Netdata doesn't properly lookup docker container name when running in ECS with task level cpu/memory limits enabled [\#4981](https://github.com/netdata/netdata/issues/4981)
-- Dashboard TV white page [\#4710](https://github.com/netdata/netdata/issues/4710)
-- Review of system.ram plugin: treat Slab memory as Cached \(PR 3288\) [\#3929](https://github.com/netdata/netdata/issues/3929)
- Fix for unix sockets after addition of port ACLs [\#5184](https://github.com/netdata/netdata/pull/5184) ([cakrit](https://github.com/cakrit))
-**Closed issues:**
-
-- Remove support for multi-threaded and single-threaded web servers [\#5154](https://github.com/netdata/netdata/issues/5154)
-- Use GCS instead of git for updating netdata [\#5110](https://github.com/netdata/netdata/issues/5110)
-- error.log: IPv6 not properly show in error messages [\#5067](https://github.com/netdata/netdata/issues/5067)
-- Introduce Polymorphic Linux in the Docker Image [\#5034](https://github.com/netdata/netdata/issues/5034)
-- Allow netdata to listen to multiple ports [\#5017](https://github.com/netdata/netdata/issues/5017)
-- SNMP section not visible [\#4021](https://github.com/netdata/netdata/issues/4021)
-- allow different ports for streaming reception and API requests [\#3830](https://github.com/netdata/netdata/issues/3830)
-- Consul monitoring service health checks [\#3674](https://github.com/netdata/netdata/issues/3674)
-- maintenance time and silence time [\#3187](https://github.com/netdata/netdata/issues/3187)
-- Suppressing alerts programatically [\#2673](https://github.com/netdata/netdata/issues/2673)
-- include chart values in alarm info text [\#2351](https://github.com/netdata/netdata/issues/2351)
-- allow streamed data to be received on dedicated port [\#2149](https://github.com/netdata/netdata/issues/2149)
-- alarm notifications should state a count of active alarms per state [\#946](https://github.com/netdata/netdata/issues/946)
-
**Merged pull requests:**
- update bug\_report.md [\#5205](https://github.com/netdata/netdata/pull/5205) ([ilyam8](https://github.com/ilyam8))
@@ -1033,37 +651,6 @@
## [v1.12.0-rc2](https://github.com/netdata/netdata/tree/v1.12.0-rc2) (2019-01-03)
-**Fixed bugs:**
-
-- smartd\_log: check\(\) unhandled exception: list index out of range [\#5079](https://github.com/netdata/netdata/issues/5079)
-- Additional character in Counter64 hex string [\#5028](https://github.com/netdata/netdata/issues/5028)
-- Error every second PLUGIN\[proc\] [\#4994](https://github.com/netdata/netdata/issues/4994)
-- Inconsistency in netdata.spec.in when comparing logdir permission with git-installation [\#4963](https://github.com/netdata/netdata/issues/4963)
-- Docker-compose: a lot of errors; Connection refused, Can't establish connection to MySQL... [\#4956](https://github.com/netdata/netdata/issues/4956)
-- Log flooding with new proc plugin [\#4945](https://github.com/netdata/netdata/issues/4945)
-- Free memory shows as 'inactive' in FreeBSD [\#4737](https://github.com/netdata/netdata/issues/4737)
-- Should use IEC-compliant abbreviations, e.g. KiB, MiB, etc. [\#4711](https://github.com/netdata/netdata/issues/4711)
-- FreeBSD: \(apps.cpu\) not show a specific program [\#4037](https://github.com/netdata/netdata/issues/4037)
-- Apcupsd: Connection loss further collects data, but it should stop [\#3927](https://github.com/netdata/netdata/issues/3927)
-- FreeBSD: apps.plugin reports spikes and apps.cpu less user CPU [\#3245](https://github.com/netdata/netdata/issues/3245)
-
-**Closed issues:**
-
-- disable respect `Retry-After` response header in python UrlService by default [\#5078](https://github.com/netdata/netdata/issues/5078)
-- move freeradius module to go.d [\#5063](https://github.com/netdata/netdata/issues/5063)
-- move python module dns\_query\_time to go.d [\#5047](https://github.com/netdata/netdata/issues/5047)
-- move python module web\_log to go.d [\#5046](https://github.com/netdata/netdata/issues/5046)
-- R&D: Collectors landscape page [\#5045](https://github.com/netdata/netdata/issues/5045)
-- Copy updater script instead of linking it [\#4924](https://github.com/netdata/netdata/issues/4924)
-- Activemq monitoring [\#4818](https://github.com/netdata/netdata/issues/4818)
-- Move packaging related code into `packaging/` directory [\#4611](https://github.com/netdata/netdata/issues/4611)
-- Simplify makeself [\#4527](https://github.com/netdata/netdata/issues/4527)
-- new netdata logo [\#4476](https://github.com/netdata/netdata/issues/4476)
-- Add info on disabling alarms for specific target - part 2 [\#4324](https://github.com/netdata/netdata/issues/4324)
-- Add info on disabling alarms for specific target - part 1 [\#4323](https://github.com/netdata/netdata/issues/4323)
-- Document how to monitor log files [\#4318](https://github.com/netdata/netdata/issues/4318)
-- Solr monitoring [\#3218](https://github.com/netdata/netdata/issues/3218)
-
**Merged pull requests:**
- postgres : fix WAL query [\#5105](https://github.com/netdata/netdata/pull/5105) ([anayrat](https://github.com/anayrat))
@@ -1093,38 +680,6 @@
## [v1.12.0-rc1](https://github.com/netdata/netdata/tree/v1.12.0-rc1) (2018-12-19)
-**Fixed bugs:**
-
-- mdstat module causing netdata segv and crash [\#4990](https://github.com/netdata/netdata/issues/4990)
-- Cannot read /proc/mdstat line. Expected 7 params, read 6. [\#4975](https://github.com/netdata/netdata/issues/4975)
-- custom notification method does not work [\#4968](https://github.com/netdata/netdata/issues/4968)
-- Info logging command in netdata-updater.sh contains command substitution. [\#4950](https://github.com/netdata/netdata/issues/4950)
-- No data in charts [\#4920](https://github.com/netdata/netdata/issues/4920)
-- Postgres module: detect servers version and use the right query [\#4910](https://github.com/netdata/netdata/issues/4910)
-- Uninstaller script is always interactive [\#4791](https://github.com/netdata/netdata/issues/4791)
-- Cannot update & cannot disable mail logging of events [\#4557](https://github.com/netdata/netdata/issues/4557)
-- web\_log plugin cannot handle high load traffic [\#4354](https://github.com/netdata/netdata/issues/4354)
-- \[bug\]some metrics don't report to /allmetrics endpoint with prometheus format [\#3866](https://github.com/netdata/netdata/issues/3866)
-
-**Closed issues:**
-
-- move python module portcheck to go.d [\#5005](https://github.com/netdata/netdata/issues/5005)
-- move python module httpcheck to go.d [\#5004](https://github.com/netdata/netdata/issues/5004)
-- move python module lighttpd to go.d [\#5003](https://github.com/netdata/netdata/issues/5003)
-- move python module rabbitmq to go.d [\#5002](https://github.com/netdata/netdata/issues/5002)
-- move python module nginx to go.d [\#5001](https://github.com/netdata/netdata/issues/5001)
-- move python module apache to go.d [\#5000](https://github.com/netdata/netdata/issues/5000)
-- Pass cloud\_base\_url from netdata.conf to web/gui [\#4980](https://github.com/netdata/netdata/issues/4980)
-- Improve configuration documentation [\#4781](https://github.com/netdata/netdata/issues/4781)
-- Python.d.plugin infinite retries, ignore penalty, and plotting 'None' [\#4756](https://github.com/netdata/netdata/issues/4756)
-- move `/proc` and `/sys` python modules to `proc` plugin [\#4541](https://github.com/netdata/netdata/issues/4541)
-- mdstat RAID0 support [\#4010](https://github.com/netdata/netdata/issues/4010)
-- FreeIPMI Plugin cant graph the wattage [\#3977](https://github.com/netdata/netdata/issues/3977)
-- web\_log: charts per URL [\#3111](https://github.com/netdata/netdata/issues/3111)
-- FQDN in alert sending [\#2477](https://github.com/netdata/netdata/issues/2477)
-- on frontend, if JavaScript is disabled, there's no graceful degradation [\#2422](https://github.com/netdata/netdata/issues/2422)
-- netdata dead but pid file exists [\#2266](https://github.com/netdata/netdata/issues/2266)
-
**Merged pull requests:**
- Non-interactive uninstaller [\#5021](https://github.com/netdata/netdata/pull/5021) ([paulfantom](https://github.com/paulfantom))
@@ -1175,60 +730,6 @@
## [v1.12.0-rc0](https://github.com/netdata/netdata/tree/v1.12.0-rc0) (2018-12-06)
-**Fixed bugs:**
-
-- nvidia\_smi module bug [\#4892](https://github.com/netdata/netdata/issues/4892)
-- No alarms are running in some systems [\#4809](https://github.com/netdata/netdata/issues/4809)
-- netdata-updater.sh cron report [\#4808](https://github.com/netdata/netdata/issues/4808)
-- Netdata is not generating any alarms [\#4793](https://github.com/netdata/netdata/issues/4793)
-- Fail2ban: Read "Restore Ban" for persistent bans [\#4769](https://github.com/netdata/netdata/issues/4769)
-- Change in Incomming Webhooks Slack API breaks alerts [\#4755](https://github.com/netdata/netdata/issues/4755)
-- registry items are clickable, but no action is taken [\#4721](https://github.com/netdata/netdata/issues/4721)
-- Enable default alarms disabled after restart service netdata [\#4636](https://github.com/netdata/netdata/issues/4636)
-- Spec file doesn't generate configure script before build [\#4570](https://github.com/netdata/netdata/issues/4570)
-- sensors.chart.py ignores fans running at 0 RPM when netdata was started [\#4158](https://github.com/netdata/netdata/issues/4158)
-- Postgres plugin lock output incorrect [\#4090](https://github.com/netdata/netdata/issues/4090)
-- python plugins got behind by 5 seconds [\#3752](https://github.com/netdata/netdata/issues/3752)
-- Constant stream of "chart took too long to be updated" INFO messages in error.log [\#3505](https://github.com/netdata/netdata/issues/3505)
-- SNMP 64bit Counter Issue - Far from correct bandwidth values [\#3488](https://github.com/netdata/netdata/issues/3488)
-- Update health reference documentation [\#3468](https://github.com/netdata/netdata/issues/3468)
-- Alarm badge link escaping for disk paths in default dashboard [\#3253](https://github.com/netdata/netdata/issues/3253)
-
-**Closed issues:**
-
-- Docker netdata documentation [\#4899](https://github.com/netdata/netdata/issues/4899)
-- Tiny Proxy monitoring [\#4834](https://github.com/netdata/netdata/issues/4834)
-- Phusion Passenger monitoring [\#4833](https://github.com/netdata/netdata/issues/4833)
-- Iis monitoring [\#4832](https://github.com/netdata/netdata/issues/4832)
-- Scaleio monitoring [\#4828](https://github.com/netdata/netdata/issues/4828)
-- Leofs monitoring [\#4826](https://github.com/netdata/netdata/issues/4826)
-- Jumpy data when running on kubernetes [\#4778](https://github.com/netdata/netdata/issues/4778)
-- Create documentation on how to opt-out of anonymous data collection [\#4746](https://github.com/netdata/netdata/issues/4746)
-- Use `--future-release` in changelog generation [\#4718](https://github.com/netdata/netdata/issues/4718)
-- requirements.txt in TLD are not related to netdata [\#4693](https://github.com/netdata/netdata/issues/4693)
-- What is The Right Role for Netdata MongoDB Python Plugins? [\#4666](https://github.com/netdata/netdata/issues/4666)
-- Store nightly build artifacts somewhere [\#4628](https://github.com/netdata/netdata/issues/4628)
-- Remove old packaging scripts [\#4608](https://github.com/netdata/netdata/issues/4608)
-- Use the new logo in web/gui [\#4598](https://github.com/netdata/netdata/issues/4598)
-- Labelling bot [\#4528](https://github.com/netdata/netdata/issues/4528)
-- Extract registry functionality from dashboard.js [\#4474](https://github.com/netdata/netdata/issues/4474)
-- HTML Documentation [\#4439](https://github.com/netdata/netdata/issues/4439)
-- New documentation structure [\#4321](https://github.com/netdata/netdata/issues/4321)
-- Add instructions to debug alarm notifications [\#4319](https://github.com/netdata/netdata/issues/4319)
-- Fix file classification in LGTM [\#4259](https://github.com/netdata/netdata/issues/4259)
-- Add CONTRIBUTING.md [\#4146](https://github.com/netdata/netdata/issues/4146)
-- Send alerts via Slack to a single user \(direct message\)? [\#3722](https://github.com/netdata/netdata/issues/3722)
-- golang orchestrator [\#3589](https://github.com/netdata/netdata/issues/3589)
-- \[web api\] Add /api/v1/version [\#3540](https://github.com/netdata/netdata/issues/3540)
-- Feature: UKSM support [\#2994](https://github.com/netdata/netdata/issues/2994)
-- web\_log reports unmatched lines [\#2295](https://github.com/netdata/netdata/issues/2295)
-- Ceph support [\#1673](https://github.com/netdata/netdata/issues/1673)
-- Misaligned option points of REST API v1 endpoint data [\#1628](https://github.com/netdata/netdata/issues/1628)
-- Adding support for time markers [\#1195](https://github.com/netdata/netdata/issues/1195)
-- Scheduled “downtime” for a type of check? [\#1133](https://github.com/netdata/netdata/issues/1133)
-- split snmp.conf into several conf files possible? [\#1126](https://github.com/netdata/netdata/issues/1126)
-- sensu/collectd integration [\#174](https://github.com/netdata/netdata/issues/174)
-
**Merged pull requests:**
- run shfmt on CI scripts [\#4928](https://github.com/netdata/netdata/pull/4928) ([paulfantom](https://github.com/paulfantom))
@@ -1299,63 +800,6 @@
## [v1.11.1](https://github.com/netdata/netdata/tree/v1.11.1) (2018-11-22)
-**Fixed bugs:**
-
-- Sensors module of python plugin not working \(again?\) [\#4692](https://github.com/netdata/netdata/issues/4692)
-- Ubuntu 18.04 apt package is still on v1.9.0, though apt is the recommended installation method [\#4675](https://github.com/netdata/netdata/issues/4675)
-- pre-built static binary install script does not detect SLES as systemd OS [\#4641](https://github.com/netdata/netdata/issues/4641)
-- Sensors don`t work [\#4602](https://github.com/netdata/netdata/issues/4602)
-- smartd\_log check\(\) unhandled exception: 'list' object has no attribute 'clear' [\#4583](https://github.com/netdata/netdata/issues/4583)
-- 1m\_received\_traffic\_overflow alarm is faulty on 10G or 40G network interfaces [\#4577](https://github.com/netdata/netdata/issues/4577)
-- 1.11 release reports as 1.10.0\_rolling [\#4572](https://github.com/netdata/netdata/issues/4572)
-- update netdata error [\#4560](https://github.com/netdata/netdata/issues/4560)
-- edit-config uses vi, even if it isn't the system editor [\#4549](https://github.com/netdata/netdata/issues/4549)
-- inbound packets dropped inbound [\#4536](https://github.com/netdata/netdata/issues/4536)
-- incremental chart algorithm doesn't handle counter wrap properly [\#4533](https://github.com/netdata/netdata/issues/4533)
-- Disk full \(inodes\) due to netdata [\#4518](https://github.com/netdata/netdata/issues/4518)
-- Systemd not working on Ubuntu 14.04 [\#4465](https://github.com/netdata/netdata/issues/4465)
-- Links on the wiki are returning 404s [\#4408](https://github.com/netdata/netdata/issues/4408)
-- It figures [\#4184](https://github.com/netdata/netdata/issues/4184)
-- netdata stream clients disconnecting from netdata server [\#4049](https://github.com/netdata/netdata/issues/4049)
-- False positive alarm for RAM [\#4013](https://github.com/netdata/netdata/issues/4013)
-- Occasional rm "cannot remove" on netdata-updater [\#3457](https://github.com/netdata/netdata/issues/3457)
-- opensuse - installation by hand issues due to hardcoded libexec in netdata-installer.sh [\#3346](https://github.com/netdata/netdata/issues/3346)
-- Netdata Installation failed in Manjaro(Arch)Latest [\#2812](https://github.com/netdata/netdata/issues/2812)
-- undefined applications show up in system category? [\#2385](https://github.com/netdata/netdata/issues/2385)
-- memory mode map initialization slow when database is too big [\#2382](https://github.com/netdata/netdata/issues/2382)
-- Long hostnames cause alignment issues in my-netdata [\#2335](https://github.com/netdata/netdata/issues/2335)
-- dont get snmp running properly [\#1734](https://github.com/netdata/netdata/issues/1734)
-- Plugins continue to log to old error.log after a SIGHUP [\#805](https://github.com/netdata/netdata/issues/805)
-
-**Closed issues:**
-
-- Improve footer of web/gui [\#4708](https://github.com/netdata/netdata/issues/4708)
-- Ignores EMAIL\_SENDER [\#4695](https://github.com/netdata/netdata/issues/4695)
-- Add option to do pre-releases in GitHub [\#4684](https://github.com/netdata/netdata/issues/4684)
-- Invalid links in \*.md files [\#4672](https://github.com/netdata/netdata/issues/4672)
-- Replace all wiki links with repo links in netdata files [\#4650](https://github.com/netdata/netdata/issues/4650)
-- Replace http URLs with https in markdown fils [\#4626](https://github.com/netdata/netdata/issues/4626)
-- Extract JS and CSS from index.html [\#4586](https://github.com/netdata/netdata/issues/4586)
-- Improved management of netdata urls in the `my-netdata` menu [\#4582](https://github.com/netdata/netdata/issues/4582)
-- Ignore web/gui/src in LGTM and Codacy checks. [\#4516](https://github.com/netdata/netdata/issues/4516)
-- Remove excessive requestAnimationFrame\(\) compatibility checks [\#4501](https://github.com/netdata/netdata/issues/4501)
-- Remove obsolete chart renderers [\#4492](https://github.com/netdata/netdata/issues/4492)
-- Split dashboard.js into multiple files [\#4479](https://github.com/netdata/netdata/issues/4479)
-- Hdd temperature monitoring on FreeBSD [\#4463](https://github.com/netdata/netdata/issues/4463)
-- Modernize dashboard.js [\#4461](https://github.com/netdata/netdata/issues/4461)
-- Documentation links sanity checker [\#4416](https://github.com/netdata/netdata/issues/4416)
-- Write a blog entry about monitoring and performance tuning mysql with netdata [\#4326](https://github.com/netdata/netdata/issues/4326)
-- Document supported python versions [\#4322](https://github.com/netdata/netdata/issues/4322)
-- Add coverity scans to Travis [\#4248](https://github.com/netdata/netdata/issues/4248)
-- Lint all shell scripts [\#4166](https://github.com/netdata/netdata/issues/4166)
-- Include tests in CI pipeline [\#4133](https://github.com/netdata/netdata/issues/4133)
-- Runfile installation doesn't fix earlier incorrect netdata init script [\#4009](https://github.com/netdata/netdata/issues/4009)
-- http://IP:19999/lib/bootstrap-3.3.7.min.js [\#3908](https://github.com/netdata/netdata/issues/3908)
-- Netdata - Spring boot plugin [\#2074](https://github.com/netdata/netdata/issues/2074)
-- support standard deviation in reduce functions [\#808](https://github.com/netdata/netdata/issues/808)
-- web server optimization [\#532](https://github.com/netdata/netdata/issues/532)
-- Containers: running plugins in different namespaces to allow netdata collect application metrics from containers [\#474](https://github.com/netdata/netdata/issues/474)
-
**Merged pull requests:**
- Cleanup of web/gui footer [\#4709](https://github.com/netdata/netdata/pull/4709) ([gmosx](https://github.com/gmosx))
@@ -1398,121 +842,8 @@
**Fixed bugs:**
-- Cannot use oidname in snmp config [\#4512](https://github.com/netdata/netdata/issues/4512)
-- config.status: error: cannot find input file: `web/api/badges/Makefile.in' [\#4502](https://github.com/netdata/netdata/issues/4502)
-- Diskspace plugin accesses excluded filesystem and stalls netdata process [\#4491](https://github.com/netdata/netdata/issues/4491)
-- netdata allocates 170MB memory after startup \(without the database\) [\#4487](https://github.com/netdata/netdata/issues/4487)
-- Logcheck security alert: netdata : command not allowed ; TTY=unknown ; PWD=/etc/netdata ; USER=root ; COMMAND=validate [\#4473](https://github.com/netdata/netdata/issues/4473)
-- duplicate name in cgroup if dash present in container name [\#4468](https://github.com/netdata/netdata/issues/4468)
-- Wrong logos in infographic [\#4455](https://github.com/netdata/netdata/issues/4455)
-- Netdata in Docker cannot load stock config \(permission denied\) [\#4453](https://github.com/netdata/netdata/issues/4453)
-- Icecast module not working [\#4432](https://github.com/netdata/netdata/issues/4432)
-- Installer does not detect systemd on Ubuntu 14.04 [\#4421](https://github.com/netdata/netdata/issues/4421)
-- netdata.spec seems to reference missing files [\#4409](https://github.com/netdata/netdata/issues/4409)
-- mongodb.chart.py does not check pymongo version [\#4407](https://github.com/netdata/netdata/issues/4407)
-- node.d.plugin issue after modularizing plugins commit [\#4406](https://github.com/netdata/netdata/issues/4406)
-- netdata: CONFIG: cannot load user config ‘/etc/netdata/stream.conf’. Will try stock config. [\#4403](https://github.com/netdata/netdata/issues/4403)
-- netdata \(20181015\) compiles fine but 'make dist' aborts [\#4400](https://github.com/netdata/netdata/issues/4400)
-- netdata does not compile on FreeBSD 11.2-RELEASE-p4 [\#4393](https://github.com/netdata/netdata/issues/4393)
-- API documentation cannot be read [\#4371](https://github.com/netdata/netdata/issues/4371)
-- Error message: Cannot open file stream.conf [\#4341](https://github.com/netdata/netdata/issues/4341)
-- MegaCli Plugin fails to parse [\#4278](https://github.com/netdata/netdata/issues/4278)
-- Apps plugin: wrong open\_sockets counter when fd type changes [\#4233](https://github.com/netdata/netdata/issues/4233)
-- Logind bug [\#4230](https://github.com/netdata/netdata/issues/4230)
-- Should netdata identify the js binary as NodeJS by default? [\#4217](https://github.com/netdata/netdata/issues/4217)
-- Cannot load jQuery: ERROR 101 [\#4212](https://github.com/netdata/netdata/issues/4212)
-- redis.chart.py stops with error "check\(\) unhandled exception: 'rdb\_bgsave\_in\_progress'" [\#4204](https://github.com/netdata/netdata/issues/4204)
-- Failed to start netdata.service: Exec format error [\#4169](https://github.com/netdata/netdata/issues/4169)
-- python clocks don't work under FreeBSD [\#4152](https://github.com/netdata/netdata/issues/4152)
-- error: cannot take the address of an rvalue of type 'FILE \*' when building on OpenBSD [\#4145](https://github.com/netdata/netdata/issues/4145)
-- packages installer failed [\#4119](https://github.com/netdata/netdata/issues/4119)
-- update\_every in postgres plugin [\#4089](https://github.com/netdata/netdata/issues/4089)
-- /proc/interrupts plugin memory leak [\#4051](https://github.com/netdata/netdata/issues/4051)
-- Problem with logrotate config \(PID discovery\) [\#4020](https://github.com/netdata/netdata/issues/4020)
-- \[SECURITY\] Mitigate CVE-2017-18342 [\#4012](https://github.com/netdata/netdata/issues/4012)
-- Netdata looks in ../../../../ to get it's config [\#3988](https://github.com/netdata/netdata/issues/3988)
-- Statsd counters/gauges stuck on -167,772,150,000,000 [\#3978](https://github.com/netdata/netdata/issues/3978)
-- After netdata slave is rebooted, timestamp doesn't match [\#3966](https://github.com/netdata/netdata/issues/3966)
-- netdata-updater.sh fails due to missing './' on 'netdata-installer.sh' line [\#3940](https://github.com/netdata/netdata/issues/3940)
-- Problem with running any python.d plugin? [\#3854](https://github.com/netdata/netdata/issues/3854)
-- Alert email syntax problem [\#3843](https://github.com/netdata/netdata/issues/3843)
-- kickstart-static64.sh fails with sh as root [\#3840](https://github.com/netdata/netdata/issues/3840)
-- Illegal characters in URLs [\#3819](https://github.com/netdata/netdata/issues/3819)
-- Use bash loadable sleep in tc-qos-helper.sh [\#3754](https://github.com/netdata/netdata/issues/3754)
-- btrfs shows wrong disk space when the filesystem has sector size 4k but the logical disk sector size is 512B [\#3746](https://github.com/netdata/netdata/issues/3746)
-- ERROR 405 with squid and web\_logs plugin [\#3738](https://github.com/netdata/netdata/issues/3738)
-- tcp listen alarm integer expression expected [\#3733](https://github.com/netdata/netdata/issues/3733)
-- Cannot load required JS library: http://ipaddress:19999/dashboard\_info.js?v20180510-2 after update or fresh install [\#3707](https://github.com/netdata/netdata/issues/3707)
-- IPv4 UDPLite stats are always visible, even if UDPLite is not used on a system [\#3706](https://github.com/netdata/netdata/issues/3706)
-- When listening on a unix socket, web server still attempts to set TCP\_NODELAY. [\#3682](https://github.com/netdata/netdata/issues/3682)
-- httpcheck do not accept URLs that do not end with com [\#3656](https://github.com/netdata/netdata/issues/3656)
-- httpcheck python.d plugin fails [\#3641](https://github.com/netdata/netdata/issues/3641)
-- Issue with statsd sample rate [\#3630](https://github.com/netdata/netdata/issues/3630)
-- netdata-uninstaller.sh not working \(with macOS 10.13\) [\#2941](https://github.com/netdata/netdata/issues/2941)
-- Problem with plugins in debug mode \(wrong path to cfgs\) [\#2593](https://github.com/netdata/netdata/issues/2593)
-- dashboard with thousands of charts [\#2275](https://github.com/netdata/netdata/issues/2275)
- fix docker image tagging problem [\#4250](https://github.com/netdata/netdata/pull/4250) ([paulfantom](https://github.com/paulfantom))
-**Closed issues:**
-
-- Feature request: Support for Adaptec RAID [\#4396](https://github.com/netdata/netdata/issues/4396)
-- Is there any way to diable the example chart? [\#4384](https://github.com/netdata/netdata/issues/4384)
-- modularize c source [\#4339](https://github.com/netdata/netdata/issues/4339)
-- Diff migration of Wiki updates to new documentation [\#4320](https://github.com/netdata/netdata/issues/4320)
-- Change GPL-3.0+ to GPL-3.0-or-later in all SPDX headers [\#4274](https://github.com/netdata/netdata/issues/4274)
-- How to stop some metrics to save bandwidth [\#4223](https://github.com/netdata/netdata/issues/4223)
-- UTC Timezone [\#4202](https://github.com/netdata/netdata/issues/4202)
-- stock config files should be in `/usr/lib/netdata/` [\#4182](https://github.com/netdata/netdata/issues/4182)
-- Lint python code \(PEP8 standard\) [\#4167](https://github.com/netdata/netdata/issues/4167)
-- Fail2ban'd IPv6 addresses are not processed [\#4144](https://github.com/netdata/netdata/issues/4144)
-- httpcheck support for HTTP methods \(e.g. GET, OPTIONS, HEAD, etc...\) [\#4127](https://github.com/netdata/netdata/issues/4127)
-- Naming for Diskstats for Veritas Volume Manger disks [\#4116](https://github.com/netdata/netdata/issues/4116)
-- Raise an alarm when a docker container is unhealthy [\#4111](https://github.com/netdata/netdata/issues/4111)
-- elasticsearch plugin python json exception if another service running on port 9200 [\#4092](https://github.com/netdata/netdata/issues/4092)
-- varnish 5 support [\#4073](https://github.com/netdata/netdata/issues/4073)
-- 'Other' is the Largest Category Under Applications \> Mem due to Node processes [\#4063](https://github.com/netdata/netdata/issues/4063)
-- send netdata health monitoring variables to backends [\#4035](https://github.com/netdata/netdata/issues/4035)
-- Badges - seconds units [\#4029](https://github.com/netdata/netdata/issues/4029)
-- Web\_log doesn't support response times in nanoseconds [\#4003](https://github.com/netdata/netdata/issues/4003)
-- 400 error when netdata tries to send slack notification [\#3989](https://github.com/netdata/netdata/issues/3989)
-- Disable probing device mapper [\#3974](https://github.com/netdata/netdata/issues/3974)
-- MySQL Python Plugin not work [\#3968](https://github.com/netdata/netdata/issues/3968)
-- How to enable sensor plugin? [\#3953](https://github.com/netdata/netdata/issues/3953)
-- netdata does not appear to send host tags via graphite backend [\#3936](https://github.com/netdata/netdata/issues/3936)
-- Netdata breaks suspend in debian stretch [\#3842](https://github.com/netdata/netdata/issues/3842)
-- NUT ups names [\#3829](https://github.com/netdata/netdata/issues/3829)
-- New at netdata and a lot of alarms [\#3826](https://github.com/netdata/netdata/issues/3826)
-- \[REQUEST\] Add Fleep/webhook notifications [\#3792](https://github.com/netdata/netdata/issues/3792)
-- Enhancement Redis protocol\(Pika\) ? [\#3783](https://github.com/netdata/netdata/issues/3783)
-- Plugin for Litespeed stats [\#3781](https://github.com/netdata/netdata/issues/3781)
-- Do you have in plan to implement Megacli \(hardware RAID\) support metrics ? [\#3757](https://github.com/netdata/netdata/issues/3757)
-- Add a Safari pinned tab icon [\#3743](https://github.com/netdata/netdata/issues/3743)
-- Colors for BTRFS graphs are inconsistent [\#3719](https://github.com/netdata/netdata/issues/3719)
-- \[Information\] Adding tutorial for Netdata in HTTPS for Plesk systems [\#3717](https://github.com/netdata/netdata/issues/3717)
-- hddtemp module fails: received data doesn't have needed records [\#3683](https://github.com/netdata/netdata/issues/3683)
-- "alarm-notify.sh test" produces error exit code on success [\#3667](https://github.com/netdata/netdata/issues/3667)
-- init file is not installed on Amazon Linux 2018.03 [\#3650](https://github.com/netdata/netdata/issues/3650)
-- Option to prevent netdata dashboard.js from downloading FontAwesome [\#3644](https://github.com/netdata/netdata/issues/3644)
-- FYI: Homebrew formula \(package\) of netdata for macOS [\#3642](https://github.com/netdata/netdata/issues/3642)
-- python.d nginx module -- stub status from https server block on localhost? [\#3628](https://github.com/netdata/netdata/issues/3628)
-- mdadm mismatch\_cnt statistic/alarm [\#3622](https://github.com/netdata/netdata/issues/3622)
-- Python.d postgres unhandled exception [\#3614](https://github.com/netdata/netdata/issues/3614)
-- Support for RethinkDB stats [\#3422](https://github.com/netdata/netdata/issues/3422)
-- Notifications to Microsoft Teams [\#3330](https://github.com/netdata/netdata/issues/3330)
-- enable system alarms on freebsd [\#3267](https://github.com/netdata/netdata/issues/3267)
-- web\_log: response time should support summary or histgram [\#3102](https://github.com/netdata/netdata/issues/3102)
-- Alarm for big system load [\#3003](https://github.com/netdata/netdata/issues/3003)
-- Illegal instruction - Debian Stretch i586 [\#2909](https://github.com/netdata/netdata/issues/2909)
-- New documentation \[bounty\] [\#2638](https://github.com/netdata/netdata/issues/2638)
-- web\_log: support squid logs [\#2235](https://github.com/netdata/netdata/issues/2235)
-- Monitoring PHP APCu [\#2199](https://github.com/netdata/netdata/issues/2199)
-- MySQLService \(or DatabaseService\) for python.d [\#1906](https://github.com/netdata/netdata/issues/1906)
-- RocketChat notifications [\#1811](https://github.com/netdata/netdata/issues/1811)
-- SCTP Information [\#1218](https://github.com/netdata/netdata/issues/1218)
-- python.d enhancements [\#692](https://github.com/netdata/netdata/issues/692)
-- feature request: pause all data processing if noone is watching the graphs [\#656](https://github.com/netdata/netdata/issues/656)
-- netdata package maintainers [\#651](https://github.com/netdata/netdata/issues/651)
-
**Merged pull requests:**
- Changed swagger editor url to the correct one [\#4539](https://github.com/netdata/netdata/pull/4539) ([infeeeee](https://github.com/infeeeee))
diff --git a/CMakeLists.txt b/CMakeLists.txt
deleted file mode 100644
index 4d631f2f..00000000
--- a/CMakeLists.txt
+++ /dev/null
@@ -1,828 +0,0 @@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-# This file is only used for development (netdata in Clion)
-# It can build netdata, but you are on your own...
-
-cmake_minimum_required(VERSION 3.1.0)
-project(netdata C CXX)
-
-find_package(Threads REQUIRED)
-find_package(PkgConfig REQUIRED)
-
-# default is "Debug"
-#set(CMAKE_BUILD_TYPE "Release")
-
-# set this to see the compilation commands
-# set(CMAKE_VERBOSE_MAKEFILE 1)
-
-
-# -----------------------------------------------------------------------------
-# Set compilation options according to build type
-
-IF("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
- message(STATUS "building for: debugging")
-
- ## unfortunately these produce errors
- #include(CheckCXXCompilerFlag)
- #CHECK_CXX_COMPILER_FLAG("-Wformat-signedness" CXX_FORMAT_SIGNEDNESS)
- #CHECK_CXX_COMPILER_FLAG("-Werror=format-security" CXX_FORMAT_SECURITY)
- #CHECK_CXX_COMPILER_FLAG("-fstack-protector-all" CXX_STACK_PROTECTOR)
- set(CXX_FORMAT_SIGNEDNESS "-Wformat-signedness")
- set(CXX_FORMAT_SECURITY "-Werror=format-security")
- set(CXX_STACK_PROTECTOR "-fstack-protector-all")
- set(CXX_FLAGS_DEBUG "-O0")
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O1 -ggdb -Wall -Wextra -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1 ${CXX_FORMAT_SIGNEDNESS} ${CXX_FORMAT_SECURITY} ${CXX_STACK_PROTECTOR} ${CXX_FLAGS_DEBUG}")
-ELSE()
- message(STATUS "building for: release")
- cmake_policy(SET CMP0069 "NEW")
- include(CheckIPOSupported)
- check_ipo_supported(RESULT ipo_supported OUTPUT error)
- IF(${ipo_supported})
- message(STATUS "link time optimization: supported")
- set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
- ELSE()
- message(STATUS "link time optimization: not supported")
- ENDIF()
-ENDIF()
-
-
-# -----------------------------------------------------------------------------
-# O/S Detection
-
-# these are defined in common.h too
-SET(LINUX False)
-SET(FREEBSD False)
-SET(MACOS False)
-
-# Detect the operating system
-IF(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
- SET(TARGET_OS_NAME "macos")
- SET(TARGET_OS 3)
- SET(MACOS True)
-ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD")
- SET(TARGET_OS_NAME "freebsd")
- SET(TARGET_OS 2)
- SET(FREEBSD True)
-ELSE()
- SET(TARGET_OS_NAME "linux")
- SET(TARGET_OS 1)
- SET(LINUX True)
-ENDIF()
-
-# show the operating system on the console
-message(STATUS "operating system: ${TARGET_OS_NAME} (TARGET_OS=${TARGET_OS})")
-
-
-# -----------------------------------------------------------------------------
-# Detect libuuid
-
-pkg_check_modules(UUID REQUIRED uuid)
-set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${UUID_CFLAGS_OTHER})
-set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${UUID_LIBRARIES})
-set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${UUID_INCLUDE_DIRS})
-
-# -----------------------------------------------------------------------------
-# Detect libz
-
-pkg_check_modules(ZLIB REQUIRED zlib)
-set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${ZLIB_CFLAGS_OTHER})
-set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${ZLIB_LIBRARIES})
-set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${ZLIB_INCLUDE_DIRS})
-
-# -----------------------------------------------------------------------------
-# libuv multi-platform support library with a focus on asynchronous I/O
-
-pkg_check_modules(LIBUV REQUIRED libuv)
-set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${LIBUV_CFLAGS_OTHER})
-set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${LIBUV_LIBRARIES})
-set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${LIBUV_INCLUDE_DIRS})
-
-# -----------------------------------------------------------------------------
-# lz4 Extremely Fast Compression algorithm
-
-pkg_check_modules(LIBLZ4 REQUIRED liblz4)
-set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${LIBLZ4_CFLAGS_OTHER})
-set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${LIBLZ4_LIBRARIES})
-set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${LIBLZ4_INCLUDE_DIRS})
-
-# -----------------------------------------------------------------------------
-# Judy General purpose dynamic array
-
-# pkgconfig not working in Ubuntu, why? upstream package broken?
-#pkg_check_modules(JUDY REQUIRED Judy)
-#set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${JUDY_CFLAGS_OTHER})
-#set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${JUDY_LIBRARIES})
-#set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${JUDY_INCLUDE_DIRS})
-set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} "-lJudy")
-set(CMAKE_REQUIRED_LIBRARIES "Judy")
-check_symbol_exists("JudyLLast" "Judy.h" HAVE_JUDY)
-IF(HAVE_JUDY)
- message(STATUS "Judy library found")
-ELSE()
- message( FATAL_ERROR "libJudy required but not found. Try installing 'libjudy-dev' or 'Judy-devel'." )
-ENDIF()
-
-# -----------------------------------------------------------------------------
-# OpenSSL Cryptography and SSL/TLS Toolkit
-
-pkg_check_modules(OPENSSL REQUIRED openssl)
-set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${OPENSSL_CFLAGS_OTHER})
-set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${OPENSSL_LIBRARIES})
-set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${OPENSSL_INCLUDE_DIRS})
-
-# -----------------------------------------------------------------------------
-# JSON-C used to health
-
-pkg_check_modules(JSON REQUIRED json-c)
-set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${JSONC_CFLAGS_OTHER})
-set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${JSON_LIBRARIES})
-set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${JSON_INCLUDE_DIRS})
-
-
-# -----------------------------------------------------------------------------
-# Detect libcap
-
-IF(LINUX)
- pkg_check_modules(CAP QUIET libcap)
- # later we use:
- # ${CAP_LIBRARIES}
- # ${CAP_CFLAGS_OTHER}
- # ${CAP_INCLUDE_DIRS}
-ENDIF(LINUX)
-
-
-# -----------------------------------------------------------------------------
-# Detect libipmimonitoring
-
-IF(LINUX)
- pkg_check_modules(IPMI libipmimonitoring)
- # later we use:
- # ${IPMI_LIBRARIES}
- # ${IPMI_CFLAGS_OTHER}
- # ${IPMI_INCLUDE_DIRS}
-ENDIF(LINUX)
-
-
-# -----------------------------------------------------------------------------
-# Detect libmnl
-IF(LINUX)
- pkg_check_modules(MNL libmnl)
- # later we use:
- # ${MNL_LIBRARIES}
- # ${MNL_CFLAGS_OTHER}
- # ${MNL_INCLUDE_DIRS}
-ENDIF(LINUX)
-
-
-# -----------------------------------------------------------------------------
-# Detect libmnl
-
-pkg_check_modules(NFACCT libnetfilter_acct)
-# later we use:
-# ${NFACCT_LIBRARIES}
-# ${NFACCT_CFLAGS_OTHER}
-# ${NFACCT_INCLUDE_DIRS}
-
-
-# -----------------------------------------------------------------------------
-# Detect libxenstat
-
-pkg_check_modules(XENSTAT libxenstat)
-# later we use:
-# ${XENSTAT_LIBRARIES}
-# ${XENSTAT_CFLAGS_OTHER}
-# ${XENSTAT_INCLUDE_DIRS}
-
-
-# -----------------------------------------------------------------------------
-# Detect MacOS IOKit/Foundation framework
-
-IF(MACOS)
- find_library(IOKIT IOKit)
- find_library(FOUNDATION Foundation)
- # later we use:
- # ${FOUNDATION}
- # ${IOKIT}
-ENDIF(MACOS)
-
-
-# -----------------------------------------------------------------------------
-# Detect libcrypto
-
-pkg_check_modules(CRYPTO libcrypto)
-# later we use:
-# ${CRYPTO_LIBRARIES}
-# ${CRYPTO_CFLAGS_OTHER}
-# ${CRYPTO_INCLUDE_DIRS}
-
-
-# -----------------------------------------------------------------------------
-# Detect libssl
-
-pkg_check_modules(SSL libssl)
-# later we use:
-# ${SSL_LIBRARIES}
-# ${SSL_CFLAGS_OTHER}
-# ${SSL_INCLUDE_DIRS}
-
-
-# -----------------------------------------------------------------------------
-# Detect libcurl
-
-pkg_check_modules(CURL libcurl)
-# later we use:
-# ${CURL_LIBRARIES}
-# ${CURL_CFLAGS_OTHER}
-# ${CURL_INCLUDE_DIRS}
-
-
-# -----------------------------------------------------------------------------
-# Detect libaws-cpp-sdk-core
-
-find_library(HAVE_AWS aws-cpp-sdk-core)
-# later we use:
-# ${HAVE_AWS}
-
-# -----------------------------------------------------------------------------
-# Detect libaws-cpp-sdk-kinesis
-
-find_library(HAVE_KINESIS aws-cpp-sdk-kinesis)
-# later we use:
-# ${HAVE_KINESIS}
-
-
-# -----------------------------------------------------------------------------
-# Detect libprotobuf
-
-pkg_check_modules(PROTOBUF protobuf)
-# later we use:
-# ${PROTOBUF_LIBRARIES}
-# ${PROTOBUF_CFLAGS_OTHER}
-# ${PROTOBUF_INCLUDE_DIRS}
-
-
-# -----------------------------------------------------------------------------
-# Detect libsnappy
-
-pkg_check_modules(SNAPPY snappy)
-# later we use:
-# ${SNAPPY_LIBRARIES}
-# ${SNAPPY_CFLAGS_OTHER}
-# ${SNAPPY_INCLUDE_DIRS}
-
-
-# -----------------------------------------------------------------------------
-# netdata files
-
-set(LIBNETDATA_FILES
- libnetdata/adaptive_resortable_list/adaptive_resortable_list.c
- libnetdata/adaptive_resortable_list/adaptive_resortable_list.h
- libnetdata/config/appconfig.c
- libnetdata/config/appconfig.h
- libnetdata/avl/avl.c
- libnetdata/avl/avl.h
- libnetdata/buffer/buffer.c
- libnetdata/buffer/buffer.h
- libnetdata/clocks/clocks.c
- libnetdata/clocks/clocks.h
- libnetdata/dictionary/dictionary.c
- libnetdata/dictionary/dictionary.h
- libnetdata/eval/eval.c
- libnetdata/eval/eval.h
- libnetdata/inlined.h
- libnetdata/libnetdata.c
- libnetdata/libnetdata.h
- libnetdata/locks/locks.c
- libnetdata/locks/locks.h
- libnetdata/log/log.c
- libnetdata/log/log.h
- libnetdata/os.c
- libnetdata/os.h
- libnetdata/popen/popen.c
- libnetdata/popen/popen.h
- libnetdata/procfile/procfile.c
- libnetdata/procfile/procfile.h
- libnetdata/simple_pattern/simple_pattern.c
- libnetdata/simple_pattern/simple_pattern.h
- libnetdata/socket/socket.c
- libnetdata/socket/socket.h
- libnetdata/statistical/statistical.c
- libnetdata/statistical/statistical.h
- libnetdata/storage_number/storage_number.c
- libnetdata/storage_number/storage_number.h
- libnetdata/threads/threads.c
- libnetdata/threads/threads.h
- libnetdata/url/url.c
- libnetdata/url/url.h
- libnetdata/json/json.c
- libnetdata/json/json.h
- libnetdata/json/jsmn.c
- libnetdata/json/jsmn.h
- libnetdata/health/health.c
- libnetdata/health/health.h
- libnetdata/string/utf8.h
- libnetdata/socket/security.c
- libnetdata/socket/security.h)
-
-add_library(libnetdata OBJECT ${LIBNETDATA_FILES})
-
-set(APPS_PLUGIN_FILES
- collectors/apps.plugin/apps_plugin.c
- )
-
-set(CHECKS_PLUGIN_FILES
- collectors/checks.plugin/plugin_checks.c
- collectors/checks.plugin/plugin_checks.h
- )
-
-set(FREEBSD_PLUGIN_FILES
- collectors/freebsd.plugin/plugin_freebsd.c
- collectors/freebsd.plugin/plugin_freebsd.h
- collectors/freebsd.plugin/freebsd_sysctl.c
- collectors/freebsd.plugin/freebsd_getmntinfo.c
- collectors/freebsd.plugin/freebsd_getifaddrs.c
- collectors/freebsd.plugin/freebsd_devstat.c
- collectors/freebsd.plugin/freebsd_kstat_zfs.c
- collectors/freebsd.plugin/freebsd_ipfw.c
- collectors/proc.plugin/zfs_common.c
- collectors/proc.plugin/zfs_common.h
- )
-
-set(HEALTH_PLUGIN_FILES
- health/health.c
- health/health.h
- health/health_config.c
- health/health_json.c
- health/health_log.c)
-
-set(IDLEJITTER_PLUGIN_FILES
- collectors/idlejitter.plugin/plugin_idlejitter.c
- collectors/idlejitter.plugin/plugin_idlejitter.h
- )
-
-set(CGROUPS_PLUGIN_FILES
- collectors/cgroups.plugin/sys_fs_cgroup.c
- collectors/cgroups.plugin/sys_fs_cgroup.h
- )
-
-set(CGROUP_NETWORK_FILES
- collectors/cgroups.plugin/cgroup-network.c
- )
-
-set(DISKSPACE_PLUGIN_FILES
- collectors/diskspace.plugin/plugin_diskspace.h
- collectors/diskspace.plugin/plugin_diskspace.c
- )
-
-set(FREEIPMI_PLUGIN_FILES
- collectors/freeipmi.plugin/freeipmi_plugin.c
- )
-
-set(NFACCT_PLUGIN_FILES
- collectors/nfacct.plugin/plugin_nfacct.c
- )
-
-set(XENSTAT_PLUGIN_FILES
- collectors/xenstat.plugin/xenstat_plugin.c
- )
-
-set(PERF_PLUGIN_FILES
- collectors/perf.plugin/perf_plugin.c
- )
-
-set(PROC_PLUGIN_FILES
- collectors/proc.plugin/ipc.c
- collectors/proc.plugin/plugin_proc.c
- collectors/proc.plugin/plugin_proc.h
- collectors/proc.plugin/proc_diskstats.c
- collectors/proc.plugin/proc_mdstat.c
- collectors/proc.plugin/proc_interrupts.c
- collectors/proc.plugin/proc_softirqs.c
- collectors/proc.plugin/proc_loadavg.c
- collectors/proc.plugin/proc_meminfo.c
- collectors/proc.plugin/proc_net_dev.c
- collectors/proc.plugin/proc_net_ip_vs_stats.c
- collectors/proc.plugin/proc_net_netstat.c
- collectors/proc.plugin/proc_net_rpc_nfs.c
- collectors/proc.plugin/proc_net_rpc_nfsd.c
- collectors/proc.plugin/proc_net_snmp.c
- collectors/proc.plugin/proc_net_snmp6.c
- collectors/proc.plugin/proc_net_sctp_snmp.c
- collectors/proc.plugin/proc_net_sockstat.c
- collectors/proc.plugin/proc_net_sockstat6.c
- collectors/proc.plugin/proc_net_softnet_stat.c
- collectors/proc.plugin/proc_net_stat_conntrack.c
- collectors/proc.plugin/proc_net_stat_synproxy.c
- collectors/proc.plugin/proc_self_mountinfo.c
- collectors/proc.plugin/proc_self_mountinfo.h
- collectors/proc.plugin/zfs_common.c
- collectors/proc.plugin/zfs_common.h
- collectors/proc.plugin/proc_spl_kstat_zfs.c
- collectors/proc.plugin/proc_stat.c
- collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c
- collectors/proc.plugin/proc_vmstat.c
- collectors/proc.plugin/proc_uptime.c
- collectors/proc.plugin/sys_kernel_mm_ksm.c
- collectors/proc.plugin/sys_devices_system_edac_mc.c
- collectors/proc.plugin/sys_devices_system_node.c
- collectors/proc.plugin/sys_fs_btrfs.c
- collectors/proc.plugin/sys_class_power_supply.c
- )
-
-set(TC_PLUGIN_FILES
- collectors/tc.plugin/plugin_tc.c
- collectors/tc.plugin/plugin_tc.h
- )
-
-set(MACOS_PLUGIN_FILES
- collectors/macos.plugin/plugin_macos.c
- collectors/macos.plugin/plugin_macos.h
- collectors/macos.plugin/macos_sysctl.c
- collectors/macos.plugin/macos_mach_smi.c
- collectors/macos.plugin/macos_fw.c
- )
-
-set(PLUGINSD_PLUGIN_FILES
- collectors/plugins.d/plugins_d.c
- collectors/plugins.d/plugins_d.h
- )
-
-set(REGISTRY_PLUGIN_FILES
- registry/registry.c
- registry/registry.h
- registry/registry_db.c
- registry/registry_init.c
- registry/registry_internals.c
- registry/registry_internals.h
- registry/registry_log.c
- registry/registry_machine.c
- registry/registry_machine.h
- registry/registry_person.c
- registry/registry_person.h
- registry/registry_url.c
- registry/registry_url.h
- )
-
-set(STATSD_PLUGIN_FILES
- collectors/statsd.plugin/statsd.c
- collectors/statsd.plugin/statsd.h
- )
-
-set(RRD_PLUGIN_FILES
- database/rrdcalc.c
- database/rrdcalc.h
- database/rrdcalctemplate.c
- database/rrdcalctemplate.h
- database/rrddim.c
- database/rrddimvar.c
- database/rrddimvar.h
- database/rrdfamily.c
- database/rrdhost.c
- database/rrd.c
- database/rrd.h
- database/rrdset.c
- database/rrdsetvar.c
- database/rrdsetvar.h
- database/rrdvar.c
- database/rrdvar.h
- database/engine/rrdengine.c
- database/engine/rrdengine.h
- database/engine/rrddiskprotocol.h
- database/engine/datafile.c
- database/engine/datafile.h
- database/engine/journalfile.c
- database/engine/journalfile.h
- database/engine/rrdenginelib.c
- database/engine/rrdenginelib.h
- database/engine/rrdengineapi.c
- database/engine/rrdengineapi.h
- database/engine/pagecache.c
- database/engine/pagecache.h
- database/engine/rrdenglocking.c
- database/engine/rrdenglocking.h
- )
-
-set(WEB_PLUGIN_FILES
- web/server/web_client.c
- web/server/web_client.h
- web/server/web_server.c
- web/server/web_server.h
- web/server/static/static-threaded.c
- web/server/static/static-threaded.h
- web/server/web_client_cache.c
- web/server/web_client_cache.h
- )
-
-set(API_PLUGIN_FILES
- web/api/web_api_v1.c
- web/api/web_api_v1.h
- web/api/badges/web_buffer_svg.c
- web/api/badges/web_buffer_svg.h
- web/api/exporters/allmetrics.c
- web/api/exporters/allmetrics.h
- web/api/exporters/shell/allmetrics_shell.c
- web/api/exporters/shell/allmetrics_shell.h
- web/api/queries/rrdr.c
- web/api/queries/rrdr.h
- web/api/queries/query.c
- web/api/queries/query.h
- web/api/queries/average/average.c
- web/api/queries/average/average.h
- web/api/queries/incremental_sum/incremental_sum.c
- web/api/queries/incremental_sum/incremental_sum.h
- web/api/queries/max/max.c
- web/api/queries/max/max.h
- web/api/queries/min/min.c
- web/api/queries/min/min.h
- web/api/queries/sum/sum.c
- web/api/queries/sum/sum.h
- web/api/queries/median/median.c
- web/api/queries/median/median.h
- web/api/queries/stddev/stddev.c
- web/api/queries/stddev/stddev.h
- web/api/queries/ses/ses.c
- web/api/queries/ses/ses.h
- web/api/queries/des/des.c
- web/api/queries/des/des.h
- web/api/formatters/rrd2json.c
- web/api/formatters/rrd2json.h
- web/api/formatters/csv/csv.c
- web/api/formatters/csv/csv.h
- web/api/formatters/json/json.c
- web/api/formatters/json/json.h
- web/api/formatters/ssv/ssv.c
- web/api/formatters/ssv/ssv.h
- web/api/formatters/value/value.c
- web/api/formatters/value/value.h
- web/api/formatters/json_wrapper.c
- web/api/formatters/json_wrapper.h
- web/api/formatters/charts2json.c
- web/api/formatters/charts2json.h
- web/api/formatters/rrdset2json.c
- web/api/formatters/rrdset2json.h
- web/api/health/health_cmdapi.c
- )
-
-set(STREAMING_PLUGIN_FILES
- streaming/rrdpush.c
- streaming/rrdpush.h
- )
-
-set(BACKENDS_PLUGIN_FILES
- backends/backends.c
- backends/backends.h
- backends/graphite/graphite.c
- backends/graphite/graphite.h
- backends/json/json.c
- backends/json/json.h
- backends/opentsdb/opentsdb.c
- backends/opentsdb/opentsdb.h
- backends/prometheus/backend_prometheus.c
- backends/prometheus/backend_prometheus.h
- )
-
-set(KINESIS_BACKEND_FILES
- backends/aws_kinesis/aws_kinesis.c
- backends/aws_kinesis/aws_kinesis.h
- backends/aws_kinesis/aws_kinesis_put_record.cc
- backends/aws_kinesis/aws_kinesis_put_record.h
- )
-
-set(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES
- backends/prometheus/remote_write/remote_write.cc
- backends/prometheus/remote_write/remote_write.h
- )
-
-set(DAEMON_FILES
- daemon/common.c
- daemon/common.h
- daemon/daemon.c
- daemon/daemon.h
- daemon/global_statistics.c
- daemon/global_statistics.h
- daemon/main.c
- daemon/main.h
- daemon/signals.c
- daemon/signals.h
- daemon/unit_test.c
- daemon/unit_test.h
- )
-
-set(NETDATA_FILES
- collectors/all.h
- ${DAEMON_FILES}
- ${API_PLUGIN_FILES}
- ${BACKENDS_PLUGIN_FILES}
- ${CHECKS_PLUGIN_FILES}
- ${HEALTH_PLUGIN_FILES}
- ${IDLEJITTER_PLUGIN_FILES}
- ${PLUGINSD_PLUGIN_FILES}
- ${RRD_PLUGIN_FILES}
- ${REGISTRY_PLUGIN_FILES}
- ${STATSD_PLUGIN_FILES}
- ${STREAMING_PLUGIN_FILES}
- ${WEB_PLUGIN_FILES}
- )
-
-include_directories(AFTER .)
-
-add_definitions(
- -DHAVE_CONFIG_H
- -DTARGET_OS=${TARGET_OS}
- -DCACHE_DIR="/var/cache/netdata"
- -DCONFIG_DIR="/etc/netdata"
- -DLIBCONFIG_DIR="/usr/lib/netdata/conf.d"
- -DLOG_DIR="/var/log/netdata"
- -DPLUGINS_DIR="/usr/libexec/netdata/plugins.d"
- -DWEB_DIR="/usr/share/netdata/web"
- -DVARLIB_DIR="/var/lib/netdata"
-)
-
-# -----------------------------------------------------------------------------
-# kinesis backend
-
-IF(HAVE_KINESIS AND HAVE_AWS AND CRYPTO_LIBRARIES AND SSL_LIBRARIES AND CURL_LIBRARIES)
- SET(ENABLE_BACKEND_KINESIS True)
-ELSE()
- SET(ENABLE_BACKEND_KINESIS False)
-ENDIF()
-
-IF(ENABLE_BACKEND_KINESIS)
- message(STATUS "kinesis backend: enabled")
- list(APPEND NETDATA_FILES ${KINESIS_BACKEND_FILES})
- list(APPEND NETDATA_COMMON_LIBRARIES aws-cpp-sdk-kinesis aws-cpp-sdk-core ${CRYPTO_LIBRARIES} ${SSL_LIBRARIES} ${CURL_LIBRARIES})
- list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${CRYPTO_INCLUDE_DIRS} ${SSL_INCLUDE_DIRS} ${CURL_INCLUDE_DIRS})
- list(APPEND NETDATA_COMMON_CFLAGS ${CRYPTO_CFLAGS_OTHER} ${SSL_CFLAGS_OTHER} ${CURL_CFLAGS_OTHER})
-ELSE()
- message(STATUS "kinesis backend: disabled (requires AWS SDK for C++)")
-ENDIF()
-
-# -----------------------------------------------------------------------------
-# prometheus remote write backend
-
-IF(PROTOBUF_LIBRARIES AND SNAPPY_LIBRARIES)
- SET(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE True)
-ELSE()
- SET(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE False)
-ENDIF()
-
-IF(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
- message(STATUS "prometheus remote write backend: enabled")
-
- find_package(Protobuf REQUIRED)
- protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS backends/prometheus/remote_write/remote_write.proto)
-
- list(APPEND NETDATA_FILES ${PROMETHEUS_REMOTE_WRITE_BACKEND_FILES} ${PROTO_SRCS} ${PROTO_HDRS})
- list(APPEND NETDATA_COMMON_LIBRARIES ${PROTOBUF_LIBRARIES} ${SNAPPY_LIBRARIES})
- list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${PROTOBUF_INCLUDE_DIRS} ${SNAPPY_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR})
- list(APPEND NETDATA_COMMON_CFLAGS ${PROTOBUF_CFLAGS_OTHER} ${SNAPPY_CFLAGS_OTHER})
-ELSE()
- message(STATUS "prometheus remote write backend: disabled (requires protobuf and snappy libraries)")
-ENDIF()
-
-# -----------------------------------------------------------------------------
-# netdata
-
-set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} m ${CMAKE_THREAD_LIBS_INIT})
-
-IF(LINUX)
- add_executable(netdata config.h ${NETDATA_FILES}
- ${CGROUPS_PLUGIN_FILES}
- ${DISKSPACE_PLUGIN_FILES}
- ${PROC_PLUGIN_FILES}
- ${TC_PLUGIN_FILES}
- )
- target_link_libraries (netdata libnetdata ${NETDATA_COMMON_LIBRARIES})
- target_include_directories(netdata PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS})
- target_compile_options(netdata PUBLIC ${NETDATA_COMMON_CFLAGS})
-
- SET(ENABLE_PLUGIN_CGROUP_NETWORK True)
- SET(ENABLE_PLUGIN_APPS True)
- SET(ENABLE_PLUGIN_PERF True)
-
-ELSEIF(FREEBSD)
- add_executable(netdata config.h ${NETDATA_FILES} ${FREEBSD_PLUGIN_FILES})
- target_link_libraries (netdata libnetdata ${NETDATA_COMMON_LIBRARIES})
- target_include_directories(netdata PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS})
- target_compile_options(netdata PUBLIC ${NETDATA_COMMON_CFLAGS})
- SET(ENABLE_PLUGIN_CGROUP_NETWORK False)
- SET(ENABLE_PLUGIN_APPS True)
- SET(ENABLE_PLUGIN_PERF False)
-
-ELSEIF(MACOS)
- add_executable(netdata config.h ${NETDATA_FILES} ${MACOS_PLUGIN_FILES})
- target_link_libraries (netdata libnetdata ${NETDATA_COMMON_LIBRARIES} ${IOKIT} ${FOUNDATION})
- target_include_directories(netdata PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS})
- target_compile_options(netdata PUBLIC ${NETDATA_COMMON_CFLAGS})
- SET(ENABLE_PLUGIN_CGROUP_NETWORK False)
- SET(ENABLE_PLUGIN_APPS False)
- SET(ENABLE_PLUGIN_PERF False)
-
-ENDIF()
-
-IF(ENABLE_BACKEND_KINESIS OR ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
- set_property(TARGET netdata PROPERTY CXX_STANDARD 11)
- set_property(TARGET netdata PROPERTY CMAKE_CXX_STANDARD_REQUIRED ON)
-ENDIF()
-
-IF(IPMI_LIBRARIES)
- SET(ENABLE_PLUGIN_FREEIPMI True)
-ELSE()
- SET(ENABLE_PLUGIN_FREEIPMI False)
-ENDIF()
-
-IF(LINUX AND MNL_LIBRARIES AND NFACCT_LIBRARIES)
- SET(ENABLE_PLUGIN_NFACCT True)
-ELSE()
- SET(ENABLE_PLUGIN_NFACCT False)
-ENDIF()
-
-IF(XENSTAT_LIBRARIES)
- SET(ENABLE_PLUGIN_XENSTAT True)
-ELSE()
- SET(ENABLE_PLUGIN_XENSTAT False)
-ENDIF()
-
-
-# -----------------------------------------------------------------------------
-# apps.plugin
-
-IF(ENABLE_PLUGIN_APPS)
- message(STATUS "apps.plugin: enabled")
- add_executable(apps.plugin config.h ${APPS_PLUGIN_FILES})
- target_link_libraries (apps.plugin libnetdata ${NETDATA_COMMON_LIBRARIES} ${CAP_LIBRARIES})
- target_include_directories(apps.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS} ${CAP_INCLUDE_DIRS})
- target_compile_options(apps.plugin PUBLIC ${NETDATA_COMMON_CFLAGS} ${CAP_CFLAGS_OTHER})
-ELSE()
- message(STATUS "apps.plugin: disabled")
-ENDIF()
-
-
-# -----------------------------------------------------------------------------
-# freeipmi.plugin
-
-IF(ENABLE_PLUGIN_FREEIPMI)
- message(STATUS "freeipmi.plugin: enabled")
- add_executable(freeipmi.plugin config.h ${FREEIPMI_PLUGIN_FILES})
- target_link_libraries (freeipmi.plugin libnetdata ${NETDATA_COMMON_LIBRARIES} ${IPMI_LIBRARIES})
- target_include_directories(freeipmi.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS} ${IPMI_INCLUDE_DIRS})
- target_compile_options(freeipmi.plugin PUBLIC ${NETDATA_COMMON_CFLAGS} ${IPMI_CFLAGS_OTHER})
-ELSE()
- message(STATUS "freeipmi.plugin: disabled (depends on libipmimonitoring)")
-ENDIF()
-
-
-# -----------------------------------------------------------------------------
-# nfacct.plugin
-
-IF(ENABLE_PLUGIN_NFACCT)
- message(STATUS "nfacct.plugin: enabled")
- add_executable(nfacct.plugin config.h ${NFACCT_PLUGIN_FILES})
- target_link_libraries (nfacct.plugin libnetdata ${NETDATA_COMMON_LIBRARIES} ${MNL_LIBRARIES} ${NFACCT_LIBRARIES})
- target_include_directories(nfacct.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS} ${MNL_INCLUDE_DIRS} ${NFACCT_INCLUDE_DIRS})
- target_compile_options(nfacct.plugin PUBLIC ${NETDATA_COMMON_CFLAGS} ${MNL_CFLAGS_OTHER} ${NFACCT_CFLAGS_OTHER})
-ELSE()
- message(STATUS "nfacct.plugin: disabled (requires libmnl and libnetfilter_acct)")
-ENDIF()
-
-
-# -----------------------------------------------------------------------------
-# xenstat.plugin
-
-IF(ENABLE_PLUGIN_XENSTAT)
- message(STATUS "xenstat.plugin: enabled")
- add_executable(xenstat.plugin config.h ${XENSTAT_PLUGIN_FILES})
- target_link_libraries (xenstat.plugin libnetdata ${NETDATA_COMMON_LIBRARIES} ${XENSTAT_LIBRARIES})
- target_include_directories(xenstat.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS} ${XENSTAT_INCLUDE_DIRS})
- target_compile_options(xenstat.plugin PUBLIC ${NETDATA_COMMON_CFLAGS} ${XENSTAT_CFLAGS_OTHER})
-ELSE()
- message(STATUS "xenstat.plugin: disabled (requires libxenstat)")
-ENDIF()
-
-
-# -----------------------------------------------------------------------------
-# perf.plugin
-
-IF(ENABLE_PLUGIN_PERF)
- message(STATUS "perf.plugin: enabled")
- add_executable(perf.plugin config.h ${PERF_PLUGIN_FILES})
- target_link_libraries (perf.plugin libnetdata ${NETDATA_COMMON_LIBRARIES})
- target_include_directories(perf.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS})
- target_compile_options(perf.plugin PUBLIC ${NETDATA_COMMON_CFLAGS})
-ELSE()
- message(STATUS "perf.plugin: disabled")
-ENDIF()
-
-
-# -----------------------------------------------------------------------------
-# cgroup-network
-
-IF(ENABLE_PLUGIN_CGROUP_NETWORK)
- message(STATUS "cgroup-network: enabled")
- add_executable(cgroup-network config.h ${CGROUP_NETWORK_FILES})
- target_link_libraries (cgroup-network libnetdata ${NETDATA_COMMON_LIBRARIES})
- target_include_directories(cgroup-network PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS})
- target_compile_options(cgroup-network PUBLIC ${NETDATA_COMMON_CFLAGS})
-ELSE()
- message(STATUS "cgroup-network: disabled (requires Linux)")
-ENDIF()
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 13424ce2..c3427a50 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -14,22 +14,22 @@ appearance, race, religion, or sexual identity and orientation.
Examples of behavior that contributes to creating a positive environment
include:
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
+- Using welcoming and inclusive language
+- Being respectful of differing viewpoints and experiences
+- Gracefully accepting constructive criticism
+- Focusing on what is best for the community
+- Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
-* The use of sexualized language or imagery and unwelcome sexual attention or
- advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic
- address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a
- professional setting
+- The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+- Trolling, insulting/derogatory comments, and personal or political attacks
+- Public or private harassment
+- Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+- Other conduct which could reasonably be considered inappropriate in a
+ professional setting
## Our Responsibilities
@@ -68,9 +68,8 @@ members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
-available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+available at <https://www.contributor-covenant.org/version/1/4/code-of-conduct.html>
[homepage]: https://www.contributor-covenant.org
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCODE_OF_CONDUCT&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCODE_OF_CONDUCT&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 8847f0c2..ab7ddff8 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -15,15 +15,15 @@ This is the minimum open-source users should contribute back to the projects the
### Spread the word
-Community growth allows the project to attract new talent willing to contribute. This talent is then developing new features and improves the project. These new features and improvements attract more users and so on. It is a loop. So, post about netdata, present it to local meetups you attend, let your online social network or twitter, facebook, reddit, etc. know you are using it. **The more people involved, the faster the project evolves**.
+Community growth allows the project to attract new talent willing to contribute. This talent is then developing new features and improves the project. These new features and improvements attract more users and so on. It is a loop. So, post about Netdata, present it to local meetups you attend, let your online social network or twitter, facebook, reddit, etc. know you are using it. **The more people involved, the faster the project evolves**.
### Provide feedback
-Is there anything that bothers you about netdata? Did you experience an issue while installing it or using it? Would you like to see it evolve to you need? Let us know. [Open a github issue](https://github.com/netdata/netdata/issues) to discuss it. Feedback is very important for open-source projects. We can't commit we will do everything, but your feedback influences our road-map significantly. **We rely on your feedback to make Netdata better**.
+Is there anything that bothers you about Netdata? Did you experience an issue while installing it or using it? Would you like to see it evolve to you need? Let us know. [Open a github issue](https://github.com/netdata/netdata/issues) to discuss it. Feedback is very important for open-source projects. We can't commit we will do everything, but your feedback influences our road-map significantly. **We rely on your feedback to make Netdata better**.
### Translate some documentation
-The [netdata localization project](https://github.com/netdata/localization) contains instructions on how to provide translations for parts of our documentation. Translating the entire documentation is a daunting task, but you can contribute as much as you like, even a single file. The Chinese translation effort has already begun and we are looking forward to more contributions.
+The [Netdata localization project](https://github.com/netdata/localization) contains instructions on how to provide translations for parts of our documentation. Translating the entire documentation is a daunting task, but you can contribute as much as you like, even a single file. The Chinese translation effort has already begun and we are looking forward to more contributions.
### Sponsor a part of Netdata
@@ -32,38 +32,45 @@ Netdata is a complex system, with many integrations for the various collectors,
#### Sponsor a collector
Netdata is all about simplicity and meaningful presentation. A "sponsor" for a collector does the following:
- - Assists the devs with feedback on the charts.
- - Specifies the alarms that would make sense for each metric.
- - When the implementation passes QA, tests the implementation in production.
- - Uses the charts and alarms in his/her day to day work and provides additional feedback.
- - Requests additional improvements as things change (e.g. new versions of an API are available).
+
+- Assists the devs with feedback on the charts.
+- Specifies the alarms that would make sense for each metric.
+- When the implementation passes QA, tests the implementation in production.
+- Uses the charts and alarms in his/her day to day work and provides additional feedback.
+- Requests additional improvements as things change (e.g. new versions of an API are available).
#### Sponsor a backend
We already support various [backends](backends) and we intend to support more. A "sponsor" for a backend:
-- Suggests ways in which the information in Netdata could best be exposed to the particular backend, to facilitate meaningful presentation.
- - When the implementation passes QA, tests the implementation in production.
-- Uses the backend in his/her day to day work and provides additional feedback, after the backend is delivered.
- - Requests additional improvements as things change (e.g. new versions of the backend API are available).
+
+- Suggests ways in which the information in Netdata could best be exposed to the particular backend, to facilitate meaningful presentation.
+- When the implementation passes QA, tests the implementation in production.
+- Uses the backend in his/her day to day work and provides additional feedback, after the backend is delivered.
+- Requests additional improvements as things change (e.g. new versions of the backend API are available).
#### Sponsor a notification method
Netdata delivers alarms via various [notification methods](health/notifications). A "sponsor" for a notification method:
-- Points the devs to the documentation for the API and identifies any unusual features of interest (e.g. the ability in Slack to send a notification either to a channel or to a user).
-- Uses the notification method in production and provides feedback.
-- Requests additional improvements as things change (e.g. new versions of the API are available).
+
+- Points the devs to the documentation for the API and identifies any unusual features of interest (e.g. the ability in Slack to send a notification either to a channel or to a user).
+- Uses the notification method in production and provides feedback.
+- Requests additional improvements as things change (e.g. new versions of the API are available).
## Experienced Users
### Help other users
-As the project grows, an increasing share of our time is spent on supporting this community of users in terms of answering questions, of helping users understand how netdata works and find their way with it. Helping other users is crucial. It allows the developers and maintainers of the project to focus on improving it.
+As the project grows, an increasing share of our time is spent on supporting this community of users in terms of answering questions, of helping users understand how Netdata works and find their way with it. Helping other users is crucial. It allows the developers and maintainers of the project to focus on improving it.
### Improve documentation
-All of our documentation is in markdown (.md) files inside the netdata GitHub project. All of our [HTML documentation](https://docs.netdata.cloud) is generated from these files. At the top right of each documentation page you will see a pencil, that leads you directly to the markdown file that was used to generated it. Don't be afraid to click it and edit any of these documents and submit a GitHub Pull Request with your corrections/additions.
+Our documentation is in need of constant improvement and expansion. As Netdata's features grow, we need to clearly explain how each feature works and document all the possible configurations. And as Netdata's community grows, we need to improve existing documentation to make it more accessible to people of all skill levels.
+
+We also need to produce beginner-level tutorials on using Netdata to monitor common applications, web servers, and more.
+
+Start with the [guide for contributing to documentation](docs/contributing/contributing-documentation.md), and then review the [documentation style guide](docs/contributing/style-guide.md) for specifics on how we write our documentation.
-We also need help to [document each chart in the default dashboard](https://github.com/netdata/netdata/issues/279).
+Don't be afraid to submit a pull request with your corrections or additions! We need a lot of help and are willing to guide new contributors through the process.
## Developers
@@ -71,16 +78,15 @@ We expect most contributions to be for new data collection plugins. You can read
Of course we appreciate contributions for any other part of the NetData agent, including the [daemon](daemon), [backends for long term archiving](backends/), innovative ways of using the [REST API](web/api) to create cool [Custom Dashboards](web/gui/custom/) or to include NetData charts in other applications, similarly to what can be done with [Confluence](web/gui/confluence/).
-
### Contributions Ground Rules
#### Code of Conduct and CLA
-We expect all contributors to abide by the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). For a pull request to be accepted, you will also need to accept the [netdata contributors license agreement](CONTRIBUTORS.md), as part of the PR process.
+We expect all contributors to abide by the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). For a pull request to be accepted, you will also need to accept the [Netdata contributors license agreement](CONTRIBUTORS.md), as part of the PR process.
#### Performance and efficiency
-Everything on Netdata is about efficiency. We need netdata to always be the most lightweight monitoring solution available. We will reject to merge PRs that are not optimal in resource utilization and efficiency.
+Everything on Netdata is about efficiency. We need Netdata to always be the most lightweight monitoring solution available. We will reject to merge PRs that are not optimal in resource utilization and efficiency.
Of course there are cases that such technical excellence is either not reasonable or not feasible. In these cases, we may require the feature or code submitted to be by disabled by default.
@@ -88,9 +94,9 @@ Of course there are cases that such technical excellence is either not reasonabl
Unlike other monitoring solutions, Netdata requires all metrics collected to have some structure attached to them. So, Netdata metrics have a name, units, belong to a chart that has a title, a family, a context, belong to an application, etc.
-This structure is what makes netdata different. Most other monitoring solution collect bulk metrics in terms of name-value pairs and then expect their users to give meaning to these metrics during visualization. This does not work. It is neither practical nor reasonable to give to someone 2000 metrics and let him/her visualize them in a meaningful way.
+This structure is what makes Netdata different. Most other monitoring solution collect bulk metrics in terms of name-value pairs and then expect their users to give meaning to these metrics during visualization. This does not work. It is neither practical nor reasonable to give to someone 2000 metrics and let him/her visualize them in a meaningful way.
-So, netdata requires all metrics to have a meaning at the time they are collected. We will reject to merge PRs that loosely collect just a "bunch of metrics", but we are very keen to help you fix this.
+So, Netdata requires all metrics to have a meaning at the time they are collected. We will reject to merge PRs that loosely collect just a "bunch of metrics", but we are very keen to help you fix this.
#### Automated Testing
@@ -102,7 +108,7 @@ Of course, manual testing is always required.
#### Netdata is a distributed application
-Netdata is a distributed monitoring application. A few basic features can become quite complicated for such applications. We may reject features that alter or influence the nature of netdata, though we usually discuss the requirements with contributors and help them adapt their code to be better suited for Netdata.
+Netdata is a distributed monitoring application. A few basic features can become quite complicated for such applications. We may reject features that alter or influence the nature of Netdata, though we usually discuss the requirements with contributors and help them adapt their code to be better suited for Netdata.
#### Operating systems supported
@@ -127,16 +133,18 @@ The single most important rule when writing code is this: *check the surrounding
We use several different languages and have had contributions from several people with different styles. When in doubt, you can check similar existing code.
For C contributions in particular, we try to respect the [Linux kernel style](https://www.kernel.org/doc/html/v4.10/process/coding-style.html), with the following exceptions:
- - Use 4 space indentation instead of 8
- - We occassionally have multiple statements on a single line (e.g. `if (a) b;`)
- - Allow max line length of 120 chars
- - Allow opening brace at the end of a function declaration: `function() {`.
+
+- Use 4 space indentation instead of 8
+- We occassionally have multiple statements on a single line (e.g. `if (a) b;`)
+- Allow max line length of 120 chars
+- Allow opening brace at the end of a function declaration: `function() {`.
### Your first pull request
There are several guides for pull requests, such as the following:
-- https://thenewstack.io/getting-legit-with-git-and-github-your-first-pull-request/
-- https://github.com/firstcontributions/first-contributions#first-contributions
+
+- <https://thenewstack.io/getting-legit-with-git-and-github-your-first-pull-request/>
+- <https://github.com/firstcontributions/first-contributions#first-contributions>
However, it's not always that simple. Our [PR approval process](#pr-approval-process) and the several merges we do every day may cause your fork to get behind the Netdata master. If you worked on something that has changed in the meantime, you will be required to do a git rebase, to bring your fork to the correct state. A very easy to follow guide on how to do it without learning all the intricacies of GitHub can be found [here](https://medium.com/@ruthmpardee/git-fork-workflow-using-rebase-587a144be470)
@@ -150,5 +158,4 @@ We also have a series of automated checks running, such as linters to check code
One special type of automated check is the "WIP" check. You may add "[WIP]" to the title of the PR, to tell us that the particular request is "Work In Progress" and should not be merged. You're still not done with it, you created it to get some feedback. When you're ready to get the final approvals and get it merged, just remove the "[WIP]" string from the title of your PR and the "WIP" check will pass.
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCONTRIBUTING&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCONTRIBUTING&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index 1cb02caa..2cfdee4e 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -2,9 +2,9 @@
SPDX-License-Identifier: GPL-3.0-or-later
-->
-# netdata contributors license agreement
+# Netdata contributors license agreement
-**Thank you for contributing to netdata!**
+**Thank you for contributing to Netdata!**
This agreement is part of the legal framework of the open-source ecosystem
that adds some red tape, but protects both the contributor and the project.
@@ -17,115 +17,117 @@ contributions for any other purpose.
## copyright license
-The Contributor (*you*) grants netdata Inc. a perpetual, worldwide, non-exclusive,
+The Contributor (_you_) grants Netdata Inc. a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable copyright license to reproduce,
prepare derivative works of, publicly display, publicly perform, sublicense,
and distribute his contributions and such derivative works.
## copyright transfer
-The Contributor (*you*) hereby assigns netdata Inc. copyright in his
+The Contributor (_you_) hereby assigns Netdata Inc. copyright in his
contributions, to be licensed under the same terms as the rest of the code.
-> *Note: this means we may re-license netdata (your contributions included)
+> _Note: this means we may re-license Netdata (your contributions included)
> any way we see fit, without asking your permission.
-> We intend to keep the netdata agent forever FOSS.
+> We intend to keep the Netdata agent forever FOSS.
> But open-source licenses have significant differences and in our attempt to
-> help netdata grow we may have to distribute it under a different license.
-> For example, CNCF, the Cloud Native Computing Foundation, requires netdata
+> help Netdata grow we may have to distribute it under a different license.
+> For example, CNCF, the Cloud Native Computing Foundation, requires Netdata
> to be licensed under Apache-2.0 for it to be accepted as a member of the
-> Foundation. We want to be free to do it.*
+> Foundation. We want to be free to do it._
## original work
-The Contributor (*you*) represent that each of his contributions is his
+The Contributor (_you_) represent that each of his contributions is his
original creation and that he is legally entitled to grant the above license.
-> *Note: if you are committing third party code, please make sure the third party
+> _Note: if you are committing third party code, please make sure the third party
> license or any other restrictions are also included with your commits.
-> netdata includes many third party libraries and tools and this is not a
+> Netdata includes many third party libraries and tools and this is not a
> problem, provided that the license of the third party code is compatible with
-> the one we use for netdata.*
+> the one we use for Netdata._
## signature
-Since Sep 17th 2018, we use https://cla-assistant.io/netdata/netdata for signing the CLA, on all pull requests.
+Since Sep 17th 2018, we use <https://cla-assistant.io/netdata/netdata> for signing the CLA, on all pull requests.
Old contributors can sign the CLA at any time using this link.
## HISTORICAL SIGNATURES
-(they have been imported to https://cla-assistant.io/netdata/netdata already)
-The Contributor (*you*) signs this agreement by adding his personal data in
+(they have been imported to <https://cla-assistant.io/netdata/netdata> already)
+
+The Contributor (_you_) signs this agreement by adding his personal data in
this document and committing it to the project repo
(the same way contributions are submitted to the project).
-By signing once, all contributions (past and future) of The Contributor (*you*),
+By signing once, all contributions (past and future) of The Contributor (_you_),
are subject to this agreement.
-> *Note: so you have to:*
-> 1. add your github username and name in this file
-> 2. commit it to the repo with a PR, using the same github username, or include this change in your first PR.
+> _Note: so you have to:_
+>
+> 1. add your github username and name in this file
+> 2. commit it to the repo with a PR, using the same github username, or include this change in your first PR.
-# netdata contributors
+# Netdata contributors
This is the list of contributors that have signed this agreement:
-username|name|email (optional)
-:--------:|:----:|:----------------
-@lets00|Luís Eduardo|leduardo@lsd.ufcg.edu.br
-@ktsaou|Costa Tsaousis|costa@tsaousis.gr
-@tycho|Steven Noonan|steven@uplinklabs.net
-@philwhineray|Phil Whineray|
-@paulfantom|Paweł Krupa|pawel@krupa.net.pl
-@Ferroin|Austin S. Hemmelgarn|ahferroin7@gmail.com
-@glensc|Elan Ruusamäe|
-@l2isbad|Ilya Mashchenko|ilyamaschenko@gmail.com
-@rlefevre|Rémi Lefèvre|
-@vlvkobal|Vladimir Kobal|vlad@prokk.net
-@simonnagl|Simon Nagl|
-@manosf|Emmanouil Fokas|manosf@protonmail.com
-@user501254|Ashesh Singh|user501254@gmail.com
-@t-h-e|Stefan Forstenlechner|
-@facetoe|Facetoe|
-@ntlug|Christopher Cox|ccox@endlessnow.com
-@alonbl|Alon Bar-Lev|alon.barlev@gmail.com
-@Wing924|Wei He|weihe924stephen@gmail.com
-@NeonSludge|Kirill Buev|kirill.buev@gmx.com
-@kmlucy|Kyle Lucy|kmlucy@gmail.com
-@RicardoSette|Ricardo Sette|ricardosette@freebsdbrasil.com.br
-@383c57|Shinichi Tagashira|
-@davidak|David Kleuker|netdata-contributors+vyff@davidak.de
-@ccremer|Christian Cremer|
-@jimcooley|Jim Cooley|jim.cooley@healthvana.com
-@Chocobo1|Mike Tzou|
-@vinyasmusic|Vinyas Malagaudanavar|vinyasmusic@gmail.com
-@cosmix|Dimosthenis Kaponis|
-@shadycuz|Levi Blaney|shadycuz+spam@gmail.com
-@Flums|Philip Gabrielsen|philip@digno.no
-@domschl|Dominik Schlösser|dominik.schloesser@gmail.com
-@tioumen|Guillaume Hospital|
-@arch273|Jacob Ayres
-@x4FF3|David Fuellgraf|
-@jasonwbarnett|Jason Barnett|
-@ecowed|Ed Wade|
-@wungad|Rob Man|
-@rda0|Sven Mäder|maeder@phys.ethz.ch
-@alibo|Ali Borhani|aliborhani1@gmail.com
-@Nani-o|Sofiane Medjkoune|sofiane@medjkoune.fr
-@n0guest|Evgeniy K.|ask@osshelp.ru
-@amichelic|Adalbert Michelic|
-@abalabahaha|abalabahaha|hi@abal.moe
-@illes|Illes S.|
-@plasticrake|Patrick Seal
-@jonfairbanks|Jon Fairbanks
-@pjz|Paul Jimenez|pj@place.org
-@jgrossiord|Julien Grossiord|julien@grossiord.net
-@pohzipohzi|Poh Zi How
-@vladmovchan|Vladyslav Movchan|vladislav.movchan@gmail.com
-@gmosx|George Moschovitis
-@adherzog|Adam Herzog|adam@adamherzog.com
-@skrzyp1|Jerzy S.|
-@akwan|Alan Kwan|
-@underhood|Timotej Šiškovič|
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCONTRIBUTORS&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+|username|name|email (optional)|
+|:------:|:--:|:---------------|
+|@lets00|Luís Eduardo|leduardo@lsd.ufcg.edu.br|
+|@ktsaou|Costa Tsaousis|costa@tsaousis.gr|
+|@tycho|Steven Noonan|steven@uplinklabs.net|
+|@philwhineray|Phil Whineray||
+|@paulfantom|Paweł Krupa|pawel@krupa.net.pl|
+|@Ferroin|Austin S. Hemmelgarn|ahferroin7@gmail.com|
+|@glensc|Elan Ruusamäe||
+|@l2isbad|Ilya Mashchenko|ilyamaschenko@gmail.com|
+|@rlefevre|Rémi Lefèvre||
+|@vlvkobal|Vladimir Kobal|vlad@prokk.net|
+|@simonnagl|Simon Nagl||
+|@manosf|Emmanouil Fokas|manosf@protonmail.com|
+|@user501254|Ashesh Singh|user501254@gmail.com|
+|@t-h-e|Stefan Forstenlechner||
+|@facetoe|Facetoe||
+|@ntlug|Christopher Cox|ccox@endlessnow.com|
+|@alonbl|Alon Bar-Lev|alon.barlev@gmail.com|
+|@Wing924|Wei He|weihe924stephen@gmail.com|
+|@NeonSludge|Kirill Buev|kirill.buev@gmx.com|
+|@kmlucy|Kyle Lucy|kmlucy@gmail.com|
+|@RicardoSette|Ricardo Sette|ricardosette@freebsdbrasil.com.br|
+|@383c57|Shinichi Tagashira||
+|@davidak|David Kleuker|netdata-contributors+vyff@davidak.de|
+|@ccremer|Christian Cremer||
+|@jimcooley|Jim Cooley|jim.cooley@healthvana.com|
+|@Chocobo1|Mike Tzou||
+|@vinyasmusic|Vinyas Malagaudanavar|vinyasmusic@gmail.com|
+|@cosmix|Dimosthenis Kaponis||
+|@shadycuz|Levi Blaney|shadycuz+spam@gmail.com|
+|@Flums|Philip Gabrielsen|philip@digno.no|
+|@domschl|Dominik Schlösser|dominik.schloesser@gmail.com|
+|@tioumen|Guillaume Hospital||
+|@arch273|Jacob Ayres||
+|@x4FF3|David Fuellgraf||
+|@jasonwbarnett|Jason Barnett||
+|@ecowed|Ed Wade||
+|@wungad|Rob Man||
+|@rda0|Sven Mäder|maeder@phys.ethz.ch|
+|@alibo|Ali Borhani|aliborhani1@gmail.com|
+|@Nani-o|Sofiane Medjkoune|sofiane@medjkoune.fr|
+|@n0guest|Evgeniy K.|ask@osshelp.ru|
+|@amichelic|Adalbert Michelic||
+|@abalabahaha|abalabahaha|hi@abal.moe|
+|@illes|Illes S.||
+|@plasticrake|Patrick Seal||
+|@jonfairbanks|Jon Fairbanks||
+|@pjz|Paul Jimenez|pj@place.org|
+|@jgrossiord|Julien Grossiord|julien@grossiord.net|
+|@pohzipohzi|Poh Zi How||
+|@vladmovchan|Vladyslav Movchan|vladislav.movchan@gmail.com|
+|@gmosx|George Moschovitis||
+|@adherzog|Adam Herzog|adam@adamherzog.com|
+|@skrzyp1|Jerzy S.||
+|@akwan|Alan Kwan||
+|@underhood|Timotej Šiškovič||
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCONTRIBUTORS&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md
deleted file mode 100644
index df18a6d4..00000000
--- a/DOCUMENTATION.md
+++ /dev/null
@@ -1,53 +0,0 @@
-# Netdata Documentation
-
-**Netdata is real-time health monitoring and performance troubleshooting for systems and applications.** It helps you instantly diagnose slowdowns and anomalies in your infrastructure with thousands of metrics, interactive visualizations, and insightful health alarms.
-
-
-## Navigating the Netdata documentation
-
-Welcome! You've arrived at the documentation for Netdata. Use the links below to find answers to the most common questions about Netdata, such as how to install it, getting started guides, basic configuration, and adding more charts. Or, explore all of Netdata's documentation using the table of contents to your left.
-
-<div class="homepage-nav">
-
- <div class="nav-install">
- <a class="nav-button" href="packaging/installer/#one-line-installation">One-line installation</a>
- <p>Use our completely automated one-line installation process to get Netdata on all Linux distributions. Or, find detailed instructions for binary packages, Kubernetes, macOS, and more.</p>
-
- </div>
- <div class="nav-getting-started">
- <a class="nav-button" href="docs/GettingStarted/">Getting started guide</a>
- <p>The perfect place for Netdata beginners to start. Learn how to access Netdata's dashboard, start and stop the service, basic configuration, and more.</p>
-
- </div>
- <div class="nav-configuration">
- <a class="nav-button" href="docs/configuration-guide/">Configuration guide</a>
- <p>Take your configuration options from the <em>getting started guide</em> to the next level. Increase metrics retention, modify how charts are displayed, disable collectors, and modify alarms.</p>
- </div>
-
-</div>
-
-**Advanced users**: For those who already understand how to access a Netdata dashboard and perform basic configuration, feel free to see what's behind any of these other doors.
-
- - [Netdata Behind Nginx](docs/Running-behind-nginx.md): Use an Nginx web server instead of Netdata's built-in server to enable TLS, HTTPS, and basic authentication.
- - [Add More Charts](docs/Add-more-charts-to-netdata.md): Enable new internal or external plugins and understand when auto-detection works.
- - [Performance](docs/Performance.md): Tips on running Netdata on devices with limited CPU and RAM resources, such as embedded devices, IoT, and edge devices.
- - [Streaming](streaming/): Information for those who want to centralize Netdata metrics from any number of distributed agents.
- - [Backends](backends/): Learn how to archive Netdata's real-time metrics to a time series database (like Prometheus) for long-term archiving.
-
-
-Visit the [contributing](CONTRIBUTING.md) page to find guides about the Netdata code of conduct, our community, and how you can get started contributing to Netdata.
-
-
-## Subscribe for news and tips from monitoring pros
-
-<script charset="utf-8" type="text/javascript" src="//js.hsforms.net/forms/shell.js"></script>
-<script>
- hbspt.forms.create({
- portalId: "4567453",
- formId: "6a20deb5-a1e6-4312-9c4d-f6862f947fe0"
-});
-</script>
-
----
-
-![A GIF of the standard Netdata dashboard](https://user-images.githubusercontent.com/2662304/48346998-96cf3180-e685-11e8-9f4e-059d23aa3aa5.gif) \ No newline at end of file
diff --git a/HISTORICAL_CHANGELOG.md b/HISTORICAL_CHANGELOG.md
deleted file mode 100644
index 3e7688f3..00000000
--- a/HISTORICAL_CHANGELOG.md
+++ /dev/null
@@ -1,655 +0,0 @@
-netdata (1.10.0) - 2018-03-27
-
- Please check full changelog at github.
- https://github.com/netdata/netdata/releases
-
-
-netdata (1.9.0) - 2017-12-17
-
- Please check full changelog at github.
- https://github.com/netdata/netdata/releases
-
-
-netdata (1.8.0) - 2017-09-17
-
- This is mainly a bugfix release.
- Please check full changelog at github.
-
-
-netdata (1.7.0) - 2017-07-16
-
- * netdata is still spreading fast
-
- we are at 320.000 users and 132.000 servers
-
- Almost 100k new users, 52k new installations and 800k docker pulls
- since the previous release, 4 and a half months ago.
-
- netdata user base grows at about 1000 new users and 600 new servers
- per day. Thank you. You are awesome.
-
- * The next release (v1.8) will be focused on providing a global health
- monitoring service, for all netdata users, for free.
-
- * netdata is now a (very fast) fully featured statsd server and the
- only one with automatic visualization: push a statsd metric and hit
- F5 on the netdata dashboard: your metric visualized. It also supports
- synthetic charts, defined by you, so that you can correlate and
- visualize your application the way you like it.
-
- * netdata got new installation options
- It is now easier than ever to install netdata - we also distribute a
- statically linked netdata x86_64 binary, including key dependencies
- (like bash, curl, etc) that can run everywhere a Linux kernel runs
- (CoreOS, CirrOS, etc).
-
- * metrics streaming and replication has been improved significantly.
- All known issues have been solved and key enhancements have been added.
- Headless collectors and proxies can now send metrics to backends when
- data source = as collected.
-
- * backends have got quite a few enhancements, including host tags and
- metrics filtering at the netdata side;
- prometheus support has been re-written to utilize more prometheus
- features and provide more flexibility and integration options.
-
- * netdata now monitors ZFS (on Linux and FreeBSD), ElasticSearch,
- RabbitMQ, Go applications (via expvar), ipfw (on FreeBSD 11), samba,
- squid logs (with web_log plugin).
-
- * netdata dashboard loading times have been improved significantly
- (hit F5 a few times on a netdata dashboard - it is now amazingly fast),
- to support dashboards with thousands of charts.
-
- * netdata alarms now support custom hooks, so you can run whatever you
- like in parallel with netdata alarms.
-
- * As usual, this release brings dozens of more improvements, enhancements
- and compatibility fixes.
-
-
-netdata (1.6.0) - 2017-03-20
-
- * birthday release: 1 year netdata
-
- netdata was first published on March 30th, 2016.
- It has been a crazy year since then:
-
- 225.000 unique netdata users
- currently, at 1.000 new unique users per day
-
- 80.000 unique netdata installations
- currently, at 500 new installation per day
-
- 610.000 docker pulls on docker hub
-
- 4.000.000 netdata sessions served
- currently, at 15.000 sessions served per day
-
- 20.000 github stars
-
- Thank you!
- You are awesome!
-
- * central netdata is here
-
- This is the first release that supports real-time streaming of
- metrics between netdata servers.
-
- netdata can now be:
-
- - autonomous host monitoring
- (like it always has been)
-
- - headless data collector
- (collect and stream metrics in real-time to another netdata)
-
- - headless proxy
- (collect metrics from multiple netdata and stream them to another netdata)
-
- - store and forward proxy
- (like headless proxy, but with a local database)
-
- - central database
- (metrics from multiple hosts are aggregated)
-
- metrics databases can be configured on all nodes and each node maintaining
- a database may have a different retention policy and possibly run
- (even different) alarms on them.
-
- * monitoring ephemeral nodes
-
- netdata now supports monitoring autoscaled ephemeral nodes,
- that are started and stopped on demand (their IP is not known).
-
- When the ephemeral nodes start streaming metrics to the central
- netdata, the central netdata will show register them at "my-netdata"
- menu on the dashboard.
-
- For more information check:
- https://github.com/netdata/netdata/tree/master/streaming#monitoring-ephemeral-nodes
-
- * monitoring ephemeral containers and VM guests
-
- netdata now cleans up container, guest VM, network interfaces and mounted
- disk metrics, disabling automatically their alarms too.
-
- For more information check:
- https://github.com/netdata/netdata/tree/master/collectors/cgroups.plugin#monitoring-ephemeral-containers
-
- * apps.plugin ported for FreeBSD
-
- @vlvkobal has ported "apps.plugin" to FreeBSD. netdata can now provide
- "Applications", "Users" and "User Groups" on FreeBSD.
-
- * web_log plugin
-
- @l2isbad has done a wonderful job creating a unified web log parsing plugin
- for all kinds of web server logs. With it, netdata provides real-time
- performance information and health monitoring alarms for web applications
- and web sites!
-
- For more information check:
- https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/web_log#web_log
-
- * backends
-
- netdata can now archive metrics to `JSON` backends
- (both push, by @lfdominguez, and pull modes).
-
- * IPMI monitoring
-
- netdata now has an IPMI plugin (based on freeipmi)
- for monitoring server hardware.
-
- The plugin creates (up to) 8 charts:
-
- 1. number of sensors by state
- 2. number of events in SEL
- 3. Temperatures CELCIUS
- 4. Temperatures FAHRENHEIT
- 5. Voltages
- 6. Currents
- 7. Power
- 8. Fans
-
- It also supports alarms (including the number of sensors in critical state).
-
- For more information, check:
- https://github.com/netdata/netdata/tree/master/collectors/freeipmi.plugin
-
- * new plugins
-
- @l2isbad builds python data collection plugins for netdata at an wonderfull
- rate! He rocks!
-
- - **web_log** for monitoring in real-time all kinds of web server log files @l2isbad
- - **freeipmi** for monitoring IPMI (server hardware)
- - **nsd** (the [name server daemon](https://www.nlnetlabs.nl/projects/nsd/)) @383c57
- - **mongodb** @l2isbad
- - **smartd_log** (monitoring disk S.M.A.R.T. values) @l2isbad
-
- * improved plugins
-
- - **nfacct** reworked and now collects connection tracker information using netlink.
- - **ElasticSearch** re-worked @l2isbad
- - **mysql** re-worked to allow faster development of custom mysql based plugins (MySQLService) @l2isbad
- - **SNMP**
- - **tomcat** @NMcCloud
- - **ap** (monitoring hostapd access points)
- - **php_fpm** @l2isbad
- - **postgres** @l2isbad
- - **isc_dhcpd** @l2isbad
- - **bind_rndc** @l2isbad
- - **numa**
- - **apps.plugin** improvements and freebsd support @vlvkobal
- - **fail2ban** @l2isbad
- - **freeradius** @l2isbad
- - **nut** (monitoring UPSes)
- - **tc** (Linux QoS) now works on qdiscs instead of classes for the same result (a lot faster) @t-h-e
- - **varnish** @l2isbad
-
- * new and improved alarms
- - **web_log**, many alarms to detect common web site/API issues
- - **fping**, alarms to detect packet loss, disconnects and unusually high latency
- - **cpu**, cpu utilization alarm now ignores `nice`
-
- * new and improved alarm notification methods
- - **HipChat** to allow hosted HipChat @frei-style
- - **discordapp** @lowfive
-
- * dashboard improvements
- - dashboard now works on HiDPi screens
- - dashboard now shows version of netdata
- - dashboard now resets charts properly
- - dashboard updated to use latest gauge.js release
-
- * other improvements
- - thanks to @rlefevre netdata now uses a lot of different high resolution system clocks.
-
- netdata has received a lot more improvements from many more contributors!
-
- Thank you all!
-
-
-netdata (1.5.0) - 2017-01-22
-
- * yet another release that makes netdata the fastest
- netdata ever!
-
- * netdata runs on FreeBSD, FreeNAS and MacOS !
-
- Vladimir Kobal (@vlvkobal) has done a magnificent work
- porting netdata to FreeBSD and MacOS.
-
- Everyhing works: cpu, memory, disks performance, disks space,
- network interfaces, interrupts, IPv4 metrics, IPv6 metrics
- processes, context switches, softnet, IPC queues,
- IPC semaphores, IPC shared memory, uptime, etc. Wow!
-
- * netdata supports data archiving to backend databases:
-
- - Graphite
- - OpenTSDB
- - Prometheus
-
- and of course all the compatible ones
- (KairosDB, InfluxDB, Blueflood, etc)
-
- * new plugins:
-
- Ilya Mashchenko (@l2isbad) has created most of the python
- data collection plugins in this release !
-
- - systemd Services (using cgroups!)
- - FPing (yes, network latency in netdata!)
- - postgres databases @facetoe, @moumoul
- - Vanish disk cache (v3 and v4) @l2isbad
- - ElasticSearch @l2isbad
- - HAproxy @l2isbad
- - FreeRadius @l2isbad, @lgz
- - mdstat (RAID) @l2isbad
- - ISC bind (via rndc) @l2isbad
- - ISC dhcpd @l2isbad, @lgz
- - Fail2Ban @l2isbad
- - OpenVPN status log @l2isbad, @lgz
- - NUMA memory @tycho
- - CPU Idle @tycho
- - gunicorn log @deltaskelta
- - ECC memory hardware errors
- - IPC semaphores
- - uptime plugin (with a nice badge too)
-
- * improved plugins:
-
- - netfilter conntrack
- - mysql (replication) @l2isbad
- - ipfs @pjz
- - cpufreq @tycho
- - hddtemp @l2isbad
- - sensors @l2isbad
- - nginx @leolovenet
- - nginx_log @paulfantom
- - phpfpm @leolovenet
- - redis @leolovenet
- - dovecot @justohall
- - cgroups
- - disk space
- - apps.plugin
- - /proc/interrupts @rlefevre
- - /proc/softirqs @rlefevre
- - /proc/vmstat (system memory charts)
- - /proc/net/snmp6 (IPv6 charts)
- - /proc/self/meminfo (system memory charts)
- - /proc/net/dev (network interfaces)
- - tc (linux QoS)
-
- * new/improved alarms:
-
- - MySQL / MariaDB alarms (incl. replication)
- - IPFS alarms
- - HAproxy alarms
- - UDP buffer alarms
- - TCP AttemptFails
- - ECC memory alarms
- - netfilter connections alarms
- - SNMP
-
- * new alarm notifications:
-
- - messagebird.com @tech-no-logical
- - pagerduty.com @jimcooley
- - pushbullet.com @tperalta82
- - twilio.com @shadycuz
- - HipChat
- - kafka
-
- * shell integration
-
- - shell scripts can now query netdata easily!
-
- * dashboard improvements:
- - dashboard is now faster on firefox, safari, opera, edge
- (edge is still the slowest)
- - dashboard now has a little bigger fonts
- - SHIFT + mouse wheel to zoom charts, works on all browsers
- - perfect-scrollbar on the dashboard
- - dashboard 4K resolution fixes
- - dashboard compatibility fixes for embedding charts in
- third party web sites
- - charts on custom dashboards can have common min/max
- even if they come from different netdata servers
- - alarm log is now saved and loaded back so that
- the alarm history is available at the dashboard
-
- * other improvements:
- - python.d.plugin has received way to many improvements
- from many contributors!
- - charts.d.plugin can now be forked to support
- multiple independent instances
- - registry has been re-factored to lower its memory
- requirements (required for the public registry)
- - simple patterns in cgroups, disks and alarms
- - netdata-installer.sh can now correctly install
- netdata in containers
- - supplied logrotate script compatibility fixes
- - spec cleanup @breed808
- - clocks and timers reworked @rlefevre
-
- netdata has received a lot more improvements from many more
- contributors!
-
- Thank you all guys!
-
-
-netdata (1.4.0) - 2016-10-04
-
- At a glance:
-
- - the fastest netdata ever (with a better look too)!
- - improved IoT and containers support!
- - alarms improved in almost every way!
-
- - new plugins:
- softnet netdev,
- extended TCP metrics,
- UDPLite
- NFS v2, v3 client (server was there already),
- NFS v4 server & client,
- APCUPSd,
- RetroShare
-
- - improved plugins:
- mysql,
- cgroups,
- hddtemp,
- sensors,
- phpfm,
- tc (QoS)
-
- In detail:
-
- * improved alarms
-
- Many new alarms have been added to detect common kernel
- configuration errors and old alarms have been re-worked
- to avoid notification floods.
-
- Alarms now support notification hysteresis (both static
- and dynamic), notification self-cancellation, dynamic
- thresholds based on current alarm status
-
- * improved alarm notifications
-
- netdata now supports:
-
- - email notifications
- - slack.com notifications on slack channels
- - pushover.net notifications (mobile push notifications)
- - telegram.org notifications
-
- For all the above methods, netdata supports role-based
- notifications, with multiple recipients for each role
- and severity filtering per recipient!
-
- Also, netdata support HTML5 notifications, while the
- dashboard is open in a browser window (no need to be
- the active one).
-
- All notifications are now clickable to get to the chart
- that raised the alarm.
-
- * improved IoT support!
-
- netdata builds and runs with musl libc and runs on systems
- based on busybox.
-
- * improved containers support!
-
- netdata runs on alpine linux (a low profile linux distribution
- used in containers).
-
- * Dozens of other improvements and bugfixes
-
-
-netdata (1.3.0) - 2016-08-28
-
- At a glance:
-
- - netdata has health monitoring / alarms!
- - netdata has badges that can be embeded anywhere!
- - netdata plugins are now written in Python!
- - new plugins: redis, memcached, nginx_log, ipfs, apache_cache
-
- IMPORTANT:
- Since netdata now uses Python plugins, new packages are
- required to be installed on a system to allow it work.
- For more information, please check the installation page:
-
- https://github.com/netdata/netdata/tree/master/installer#installation
-
- In detail:
-
- * netdata has alarms!
-
- Based on the POLL we made on github
- (https://github.com/netdata/netdata/issues/436),
- health monitoring was the winner. So here it is!
-
- netdata now has a poweful health monitoring system embedded.
- Please check the wiki page:
-
- https://github.com/netdata/netdata/tree/master/health
-
- * netdata has badges!
-
- netdata can generate badges with live information from the
- collected metrics.
- Please check the wiki page:
-
- https://github.com/netdata/netdata/tree/master/web/api/badges
-
- * netdata plugins are now written in Python!
-
- Thanks to the great work of Paweł Krupa (@paulfantom), most BASH
- plugins have been ported to Python.
-
- The new python.d.plugin supports both python2 and python3 and
- data collection from multiple sources for all modules.
-
- The following pre-existing modules have been ported to Python:
-
- - apache
- - cpufreq
- - example
- - exim
- - hddtemp
- - mysql
- - nginx
- - phpfm
- - postfix
- - sensors
- - squid
- - tomcat
-
- The following new modules have been added:
-
- - apache_cache
- - dovecot
- - ipfs
- - memcached
- - nginx_log
- - redis
-
- * other data collectors:
-
- - Thanks to @simonnagl netdata now reports disk space usage.
-
- * dashboards now transfer a certain settings from server to server
- when changing servers via the my-netdata menu.
-
- The settings transferred are the dashboard theme, the online
- help status and current pan and zoom timeframe of the dashboard.
-
- * API improvements:
-
- - reduction functions now support 'min', 'sum' and 'incremental-sum'.
-
- - netdata now offers a multi-threaded and a single threaded
- web server (single threaded is better for IoT).
-
- * apps.plugin improvements:
-
- - can now run with command line argument 'without-files'
- to prevent it from enumating all the open files/sockets/pipes
- of all running processes.
-
- - apps.plugin now scales the collected values to match the
- the total system usage.
-
- - apps.plugin can now report guest CPU usage per process.
-
- - repeating errors are now logged once per process.
-
- * netdata now runs with IDLE process priority (lower than nice 19)
-
- * netdata now instructs the kernel to kill it first when it starves
- for memory.
-
- * netdata listens for signals:
-
- - SIGHUP to netdata instructs it to re-open its log files
- (new logrotate files added too).
-
- - SIGUSR1 to netdata saves the database
-
- - SIGUSR2 to netdata reloads health / alarms configuration
-
- * netdata can now bind to multiple IPs and ports.
-
- * netdata now has new systemd service file (it starts as user
- netdata and does not fork).
-
- * Dozens of other improvements and bugfixes
-
-
-netdata (1.2.0) - 2016-05-16
-
- At a glance:
-
- - netdata is now 30% faster
- - netdata now has a registry (my-netdata dashboard menu)
- - netdata now monitors Linux Containers (docker, lxc, etc)
-
- IMPORTANT:
- This version requires libuuid. The package you need is:
-
- - uuid-dev (debian/ubuntu), or
- - libuuid-devel (centos/fedora/redhat)
-
- In detail:
-
- * netdata is now 30% faster !
-
- - Patches submitted by @fredericopissarra improved overall
- netdata performance by 10%.
-
- - A new improved search function in the internal indexes
- made all searches faster by 50%, resulting in about
- 20% better performance for the core of netdata.
-
- - More efficient threads locking in key components
- contributed to the overal efficiency.
-
- * netdata now has a CENTRAL REGISTRY !
-
- The central registry tracks all your netdata servers
- and bookmarks them for you at the 'my-netdata' menu
- on all dashboards.
-
- Every netdata can act as a registry, but there is also
- a global registry provided for free for all netdata users!
-
- * netdata now monitors CONTAINERS !
-
- docker, lxc, or anything else. For each container it monitors
- CPU, RAM, DISK I/O (network interfaces were already monitored)
-
- * apps.plugin: now uses linux capabilities by default
- without setuid to root
-
- * netdata has now an improved signal handler
- thanks to @simonnagl
-
- * API: new improved CORS support
-
- * SNMP: counter64 support fixed
-
- * MYSQL: more charts, about QCache, MyISAM key cache,
- InnoDB buffer pools, open files
-
- * DISK charts now show mount point when available
-
- * Dashboard: improved support for older web browsers
- and mobile web browsers (thanks to @simonnagl)
-
- * Multi-server dashboards now allow de-coupled refreshes for
- each chart, so that if one netdata has a network latency
- the other charts are not affected
-
- * Several other minor improvements and bugfixes
-
-
-netdata (1.1.0) - 2016-04-20
-
- Dozens of commits that improve netdata in several ways:
-
- - Data collection: added IPv6 monitoring
- - Data collection: added SYNPROXY DDoS protection monitoring
- - Data collection: apps.plugin: added charts for users and user groups
- - Data collection: apps.plugin: grouping of processes now support patterns
- - Data collection: apps.plugin: now it is faster, after the new features added
- - Data collection: better auto-detection of partitions for disk monitoring
- - Data collection: better fireqos intergation for QoS monitoring
- - Data collection: squid monitoring now uses squidclient
- - Data collection: SNMP monitoring now supports 64bit counters
- - API: fixed issues in CSV output generation
- - API: netdata can now be restricted to listen on a specific IP
- - Core and apps.plugin: error log flood protection
- - Dashboard: better error handling when the netdata server is unreachable
- - Dashboard: each chart now has a toolbox
- - Dashboard: on-line help support
- - Dashboard: check for netdata updates button
- - Dashboard: added example /tv.html dashboard
- - Packaging: now compiles with musl libc (alpine linux)
- - Packaging: added debian packaging
- - Packaging: support non-root installations
- - Packaging: the installer generates uninstall script
-
-netdata (1.0.0) - 2016-03-22
-
- - first public release
-
-netdata (1.0.0-rc.1) - 2015-11-28
-
- - initial packaging
diff --git a/Makefile.am b/Makefile.am
index de161c84..fa12a757 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -60,6 +60,7 @@ dist_noinst_DATA= \
package.json \
docs \
packaging/version \
+ packaging/go.d.version \
packaging/go.d.checksums \
packaging/installer/README.md \
packaging/installer/UNINSTALL.md \
@@ -279,6 +280,7 @@ PROC_PLUGIN_FILES = \
collectors/proc.plugin/proc_vmstat.c \
collectors/proc.plugin/proc_uptime.c \
collectors/proc.plugin/sys_kernel_mm_ksm.c \
+ collectors/proc.plugin/sys_block_zram.c \
collectors/proc.plugin/sys_devices_system_edac_mc.c \
collectors/proc.plugin/sys_devices_system_node.c \
collectors/proc.plugin/sys_fs_btrfs.c \
@@ -456,6 +458,11 @@ PROMETHEUS_REMOTE_WRITE_BACKEND_FILES = \
backends/prometheus/remote_write/remote_write.proto \
$(NULL)
+MONGODB_BACKEND_FILES = \
+ backends/mongodb/mongodb.c \
+ backends/mongodb/mongodb.h \
+ $(NULL)
+
DAEMON_FILES = \
daemon/common.c \
daemon/common.h \
@@ -613,3 +620,8 @@ backends/prometheus/remote_write/remote_write.pb.h: backends/prometheus/remote_w
$(PROTOC) --proto_path=$(srcdir) --cpp_out=$(builddir) $^
endif
+
+if ENABLE_BACKEND_MONGODB
+ netdata_SOURCES += $(MONGODB_BACKEND_FILES)
+ netdata_LDADD += $(OPTIONAL_MONGOC_LIBS)
+endif
diff --git a/Makefile.in b/Makefile.in
new file mode 100644
index 00000000..0d9d0955
--- /dev/null
+++ b/Makefile.in
@@ -0,0 +1,3601 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+sbin_PROGRAMS = netdata$(EXEEXT)
+plugins_PROGRAMS = $(am__EXEEXT_1) $(am__EXEEXT_2) $(am__EXEEXT_3) \
+ $(am__EXEEXT_4) $(am__EXEEXT_5) $(am__EXEEXT_6) \
+ $(am__EXEEXT_7)
+@ENABLE_DBENGINE_TRUE@am__append_1 = \
+@ENABLE_DBENGINE_TRUE@ database/engine/rrdengine.c \
+@ENABLE_DBENGINE_TRUE@ database/engine/rrdengine.h \
+@ENABLE_DBENGINE_TRUE@ database/engine/rrddiskprotocol.h \
+@ENABLE_DBENGINE_TRUE@ database/engine/datafile.c \
+@ENABLE_DBENGINE_TRUE@ database/engine/datafile.h \
+@ENABLE_DBENGINE_TRUE@ database/engine/journalfile.c \
+@ENABLE_DBENGINE_TRUE@ database/engine/journalfile.h \
+@ENABLE_DBENGINE_TRUE@ database/engine/rrdenginelib.c \
+@ENABLE_DBENGINE_TRUE@ database/engine/rrdenginelib.h \
+@ENABLE_DBENGINE_TRUE@ database/engine/rrdengineapi.c \
+@ENABLE_DBENGINE_TRUE@ database/engine/rrdengineapi.h \
+@ENABLE_DBENGINE_TRUE@ database/engine/pagecache.c \
+@ENABLE_DBENGINE_TRUE@ database/engine/pagecache.h \
+@ENABLE_DBENGINE_TRUE@ database/engine/rrdenglocking.c \
+@ENABLE_DBENGINE_TRUE@ database/engine/rrdenglocking.h \
+@ENABLE_DBENGINE_TRUE@ $(NULL)
+
+@FREEBSD_TRUE@am__append_2 = \
+@FREEBSD_TRUE@ $(FREEBSD_PLUGIN_FILES) \
+@FREEBSD_TRUE@ $(NULL)
+
+@MACOS_TRUE@am__append_3 = \
+@MACOS_TRUE@ $(MACOS_PLUGIN_FILES) \
+@MACOS_TRUE@ $(NULL)
+
+@LINUX_TRUE@am__append_4 = \
+@LINUX_TRUE@ $(CGROUPS_PLUGIN_FILES) \
+@LINUX_TRUE@ $(DISKSPACE_PLUGIN_FILES) \
+@LINUX_TRUE@ $(PROC_PLUGIN_FILES) \
+@LINUX_TRUE@ $(TC_PLUGIN_FILES) \
+@LINUX_TRUE@ $(NULL)
+
+@ENABLE_PLUGIN_APPS_TRUE@am__append_5 = apps.plugin
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@am__append_6 = cgroup-network
+@ENABLE_PLUGIN_FREEIPMI_TRUE@am__append_7 = freeipmi.plugin
+@ENABLE_PLUGIN_CUPS_TRUE@am__append_8 = cups.plugin
+@ENABLE_PLUGIN_NFACCT_TRUE@am__append_9 = nfacct.plugin
+@ENABLE_PLUGIN_XENSTAT_TRUE@am__append_10 = xenstat.plugin
+@ENABLE_PLUGIN_PERF_TRUE@am__append_11 = perf.plugin
+@ENABLE_BACKEND_KINESIS_TRUE@am__append_12 = $(KINESIS_BACKEND_FILES)
+@ENABLE_BACKEND_KINESIS_TRUE@am__append_13 = $(OPTIONAL_KINESIS_LIBS)
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@am__append_14 = $(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES)
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@am__append_15 = $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS)
+@ENABLE_BACKEND_MONGODB_TRUE@am__append_16 = $(MONGODB_BACKEND_FILES)
+@ENABLE_BACKEND_MONGODB_TRUE@am__append_17 = $(OPTIONAL_MONGOC_LIBS)
+subdir = .
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \
+ $(am__configure_deps) $(dist_noinst_SCRIPTS) \
+ $(dist_cache_DATA) $(dist_log_DATA) $(dist_noinst_DATA) \
+ $(dist_registry_DATA) $(dist_varlib_DATA) $(am__DIST_COMMON)
+am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
+ configure.lineno config.status.lineno
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = config.h
+CONFIG_CLEAN_FILES = netdata.spec
+CONFIG_CLEAN_VPATH_FILES =
+@ENABLE_PLUGIN_APPS_TRUE@am__EXEEXT_1 = apps.plugin$(EXEEXT)
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@am__EXEEXT_2 = \
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ cgroup-network$(EXEEXT)
+@ENABLE_PLUGIN_FREEIPMI_TRUE@am__EXEEXT_3 = freeipmi.plugin$(EXEEXT)
+@ENABLE_PLUGIN_CUPS_TRUE@am__EXEEXT_4 = cups.plugin$(EXEEXT)
+@ENABLE_PLUGIN_NFACCT_TRUE@am__EXEEXT_5 = nfacct.plugin$(EXEEXT)
+@ENABLE_PLUGIN_XENSTAT_TRUE@am__EXEEXT_6 = xenstat.plugin$(EXEEXT)
+@ENABLE_PLUGIN_PERF_TRUE@am__EXEEXT_7 = perf.plugin$(EXEEXT)
+am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(sbindir)" \
+ "$(DESTDIR)$(cachedir)" "$(DESTDIR)$(logdir)" \
+ "$(DESTDIR)$(registrydir)" "$(DESTDIR)$(varlibdir)"
+PROGRAMS = $(plugins_PROGRAMS) $(sbin_PROGRAMS)
+am__apps_plugin_SOURCES_DIST = collectors/apps.plugin/apps_plugin.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \
+ libnetdata/config/appconfig.c libnetdata/config/appconfig.h \
+ libnetdata/avl/avl.c libnetdata/avl/avl.h \
+ libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \
+ libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \
+ libnetdata/dictionary/dictionary.c \
+ libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \
+ libnetdata/eval/eval.h libnetdata/inlined.h \
+ libnetdata/libnetdata.c libnetdata/libnetdata.h \
+ libnetdata/locks/locks.c libnetdata/locks/locks.h \
+ libnetdata/log/log.c libnetdata/log/log.h \
+ libnetdata/popen/popen.c libnetdata/popen/popen.h \
+ libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \
+ libnetdata/os.c libnetdata/os.h \
+ libnetdata/simple_pattern/simple_pattern.c \
+ libnetdata/simple_pattern/simple_pattern.h \
+ libnetdata/socket/socket.c libnetdata/socket/socket.h \
+ libnetdata/socket/security.c libnetdata/socket/security.h \
+ libnetdata/statistical/statistical.c \
+ libnetdata/statistical/statistical.h \
+ libnetdata/storage_number/storage_number.c \
+ libnetdata/storage_number/storage_number.h \
+ libnetdata/threads/threads.c libnetdata/threads/threads.h \
+ libnetdata/url/url.c libnetdata/url/url.h \
+ libnetdata/json/json.c libnetdata/json/json.h \
+ libnetdata/json/jsmn.c libnetdata/json/jsmn.h \
+ libnetdata/health/health.c libnetdata/health/health.h \
+ libnetdata/string/utf8.h
+am__dirstamp = $(am__leading_dot)dirstamp
+am__objects_1 = libnetdata/adaptive_resortable_list/adaptive_resortable_list.$(OBJEXT) \
+ libnetdata/config/appconfig.$(OBJEXT) \
+ libnetdata/avl/avl.$(OBJEXT) \
+ libnetdata/buffer/buffer.$(OBJEXT) \
+ libnetdata/clocks/clocks.$(OBJEXT) \
+ libnetdata/dictionary/dictionary.$(OBJEXT) \
+ libnetdata/eval/eval.$(OBJEXT) libnetdata/libnetdata.$(OBJEXT) \
+ libnetdata/locks/locks.$(OBJEXT) libnetdata/log/log.$(OBJEXT) \
+ libnetdata/popen/popen.$(OBJEXT) \
+ libnetdata/procfile/procfile.$(OBJEXT) libnetdata/os.$(OBJEXT) \
+ libnetdata/simple_pattern/simple_pattern.$(OBJEXT) \
+ libnetdata/socket/socket.$(OBJEXT) \
+ libnetdata/socket/security.$(OBJEXT) \
+ libnetdata/statistical/statistical.$(OBJEXT) \
+ libnetdata/storage_number/storage_number.$(OBJEXT) \
+ libnetdata/threads/threads.$(OBJEXT) \
+ libnetdata/url/url.$(OBJEXT) libnetdata/json/json.$(OBJEXT) \
+ libnetdata/json/jsmn.$(OBJEXT) \
+ libnetdata/health/health.$(OBJEXT)
+am__objects_2 = collectors/apps.plugin/apps_plugin.$(OBJEXT) \
+ $(am__objects_1)
+@ENABLE_PLUGIN_APPS_TRUE@am_apps_plugin_OBJECTS = $(am__objects_2)
+apps_plugin_OBJECTS = $(am_apps_plugin_OBJECTS)
+am__DEPENDENCIES_1 =
+am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
+ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
+ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
+ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
+ $(am__DEPENDENCIES_1)
+@ENABLE_PLUGIN_APPS_TRUE@apps_plugin_DEPENDENCIES = \
+@ENABLE_PLUGIN_APPS_TRUE@ $(am__DEPENDENCIES_2) \
+@ENABLE_PLUGIN_APPS_TRUE@ $(am__DEPENDENCIES_1)
+am__cgroup_network_SOURCES_DIST = \
+ collectors/cgroups.plugin/cgroup-network.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \
+ libnetdata/config/appconfig.c libnetdata/config/appconfig.h \
+ libnetdata/avl/avl.c libnetdata/avl/avl.h \
+ libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \
+ libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \
+ libnetdata/dictionary/dictionary.c \
+ libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \
+ libnetdata/eval/eval.h libnetdata/inlined.h \
+ libnetdata/libnetdata.c libnetdata/libnetdata.h \
+ libnetdata/locks/locks.c libnetdata/locks/locks.h \
+ libnetdata/log/log.c libnetdata/log/log.h \
+ libnetdata/popen/popen.c libnetdata/popen/popen.h \
+ libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \
+ libnetdata/os.c libnetdata/os.h \
+ libnetdata/simple_pattern/simple_pattern.c \
+ libnetdata/simple_pattern/simple_pattern.h \
+ libnetdata/socket/socket.c libnetdata/socket/socket.h \
+ libnetdata/socket/security.c libnetdata/socket/security.h \
+ libnetdata/statistical/statistical.c \
+ libnetdata/statistical/statistical.h \
+ libnetdata/storage_number/storage_number.c \
+ libnetdata/storage_number/storage_number.h \
+ libnetdata/threads/threads.c libnetdata/threads/threads.h \
+ libnetdata/url/url.c libnetdata/url/url.h \
+ libnetdata/json/json.c libnetdata/json/json.h \
+ libnetdata/json/jsmn.c libnetdata/json/jsmn.h \
+ libnetdata/health/health.c libnetdata/health/health.h \
+ libnetdata/string/utf8.h
+am__objects_3 = collectors/cgroups.plugin/cgroup-network.$(OBJEXT) \
+ $(am__objects_1)
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@am_cgroup_network_OBJECTS = \
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ $(am__objects_3)
+cgroup_network_OBJECTS = $(am_cgroup_network_OBJECTS)
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@cgroup_network_DEPENDENCIES = \
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ $(am__DEPENDENCIES_2)
+am__cups_plugin_SOURCES_DIST = collectors/cups.plugin/cups_plugin.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \
+ libnetdata/config/appconfig.c libnetdata/config/appconfig.h \
+ libnetdata/avl/avl.c libnetdata/avl/avl.h \
+ libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \
+ libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \
+ libnetdata/dictionary/dictionary.c \
+ libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \
+ libnetdata/eval/eval.h libnetdata/inlined.h \
+ libnetdata/libnetdata.c libnetdata/libnetdata.h \
+ libnetdata/locks/locks.c libnetdata/locks/locks.h \
+ libnetdata/log/log.c libnetdata/log/log.h \
+ libnetdata/popen/popen.c libnetdata/popen/popen.h \
+ libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \
+ libnetdata/os.c libnetdata/os.h \
+ libnetdata/simple_pattern/simple_pattern.c \
+ libnetdata/simple_pattern/simple_pattern.h \
+ libnetdata/socket/socket.c libnetdata/socket/socket.h \
+ libnetdata/socket/security.c libnetdata/socket/security.h \
+ libnetdata/statistical/statistical.c \
+ libnetdata/statistical/statistical.h \
+ libnetdata/storage_number/storage_number.c \
+ libnetdata/storage_number/storage_number.h \
+ libnetdata/threads/threads.c libnetdata/threads/threads.h \
+ libnetdata/url/url.c libnetdata/url/url.h \
+ libnetdata/json/json.c libnetdata/json/json.h \
+ libnetdata/json/jsmn.c libnetdata/json/jsmn.h \
+ libnetdata/health/health.c libnetdata/health/health.h \
+ libnetdata/string/utf8.h
+am__objects_4 = collectors/cups.plugin/cups_plugin.$(OBJEXT) \
+ $(am__objects_1)
+@ENABLE_PLUGIN_CUPS_TRUE@am_cups_plugin_OBJECTS = $(am__objects_4)
+cups_plugin_OBJECTS = $(am_cups_plugin_OBJECTS)
+@ENABLE_PLUGIN_CUPS_TRUE@cups_plugin_DEPENDENCIES = \
+@ENABLE_PLUGIN_CUPS_TRUE@ $(am__DEPENDENCIES_2) \
+@ENABLE_PLUGIN_CUPS_TRUE@ $(am__DEPENDENCIES_1)
+am__freeipmi_plugin_SOURCES_DIST = \
+ collectors/freeipmi.plugin/freeipmi_plugin.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \
+ libnetdata/config/appconfig.c libnetdata/config/appconfig.h \
+ libnetdata/avl/avl.c libnetdata/avl/avl.h \
+ libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \
+ libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \
+ libnetdata/dictionary/dictionary.c \
+ libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \
+ libnetdata/eval/eval.h libnetdata/inlined.h \
+ libnetdata/libnetdata.c libnetdata/libnetdata.h \
+ libnetdata/locks/locks.c libnetdata/locks/locks.h \
+ libnetdata/log/log.c libnetdata/log/log.h \
+ libnetdata/popen/popen.c libnetdata/popen/popen.h \
+ libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \
+ libnetdata/os.c libnetdata/os.h \
+ libnetdata/simple_pattern/simple_pattern.c \
+ libnetdata/simple_pattern/simple_pattern.h \
+ libnetdata/socket/socket.c libnetdata/socket/socket.h \
+ libnetdata/socket/security.c libnetdata/socket/security.h \
+ libnetdata/statistical/statistical.c \
+ libnetdata/statistical/statistical.h \
+ libnetdata/storage_number/storage_number.c \
+ libnetdata/storage_number/storage_number.h \
+ libnetdata/threads/threads.c libnetdata/threads/threads.h \
+ libnetdata/url/url.c libnetdata/url/url.h \
+ libnetdata/json/json.c libnetdata/json/json.h \
+ libnetdata/json/jsmn.c libnetdata/json/jsmn.h \
+ libnetdata/health/health.c libnetdata/health/health.h \
+ libnetdata/string/utf8.h
+am__objects_5 = collectors/freeipmi.plugin/freeipmi_plugin.$(OBJEXT) \
+ $(am__objects_1)
+@ENABLE_PLUGIN_FREEIPMI_TRUE@am_freeipmi_plugin_OBJECTS = \
+@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(am__objects_5)
+freeipmi_plugin_OBJECTS = $(am_freeipmi_plugin_OBJECTS)
+@ENABLE_PLUGIN_FREEIPMI_TRUE@freeipmi_plugin_DEPENDENCIES = \
+@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(am__DEPENDENCIES_2) \
+@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(am__DEPENDENCIES_1)
+am__netdata_SOURCES_DIST = collectors/all.h daemon/common.c \
+ daemon/common.h daemon/daemon.c daemon/daemon.h \
+ daemon/global_statistics.c daemon/global_statistics.h \
+ daemon/main.c daemon/main.h daemon/signals.c daemon/signals.h \
+ daemon/unit_test.c daemon/unit_test.h \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \
+ libnetdata/config/appconfig.c libnetdata/config/appconfig.h \
+ libnetdata/avl/avl.c libnetdata/avl/avl.h \
+ libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \
+ libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \
+ libnetdata/dictionary/dictionary.c \
+ libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \
+ libnetdata/eval/eval.h libnetdata/inlined.h \
+ libnetdata/libnetdata.c libnetdata/libnetdata.h \
+ libnetdata/locks/locks.c libnetdata/locks/locks.h \
+ libnetdata/log/log.c libnetdata/log/log.h \
+ libnetdata/popen/popen.c libnetdata/popen/popen.h \
+ libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \
+ libnetdata/os.c libnetdata/os.h \
+ libnetdata/simple_pattern/simple_pattern.c \
+ libnetdata/simple_pattern/simple_pattern.h \
+ libnetdata/socket/socket.c libnetdata/socket/socket.h \
+ libnetdata/socket/security.c libnetdata/socket/security.h \
+ libnetdata/statistical/statistical.c \
+ libnetdata/statistical/statistical.h \
+ libnetdata/storage_number/storage_number.c \
+ libnetdata/storage_number/storage_number.h \
+ libnetdata/threads/threads.c libnetdata/threads/threads.h \
+ libnetdata/url/url.c libnetdata/url/url.h \
+ libnetdata/json/json.c libnetdata/json/json.h \
+ libnetdata/json/jsmn.c libnetdata/json/jsmn.h \
+ libnetdata/health/health.c libnetdata/health/health.h \
+ libnetdata/string/utf8.h web/api/badges/web_buffer_svg.c \
+ web/api/badges/web_buffer_svg.h web/api/exporters/allmetrics.c \
+ web/api/exporters/allmetrics.h \
+ web/api/exporters/shell/allmetrics_shell.c \
+ web/api/exporters/shell/allmetrics_shell.h \
+ web/api/queries/average/average.c \
+ web/api/queries/average/average.h web/api/queries/des/des.c \
+ web/api/queries/des/des.h \
+ web/api/queries/incremental_sum/incremental_sum.c \
+ web/api/queries/incremental_sum/incremental_sum.h \
+ web/api/queries/max/max.c web/api/queries/max/max.h \
+ web/api/queries/median/median.c \
+ web/api/queries/median/median.h web/api/queries/min/min.c \
+ web/api/queries/min/min.h web/api/queries/query.c \
+ web/api/queries/query.h web/api/queries/rrdr.c \
+ web/api/queries/rrdr.h web/api/queries/ses/ses.c \
+ web/api/queries/ses/ses.h web/api/queries/stddev/stddev.c \
+ web/api/queries/stddev/stddev.h web/api/queries/sum/sum.c \
+ web/api/queries/sum/sum.h web/api/formatters/rrd2json.c \
+ web/api/formatters/rrd2json.h web/api/formatters/csv/csv.c \
+ web/api/formatters/csv/csv.h web/api/formatters/json/json.c \
+ web/api/formatters/json/json.h web/api/formatters/ssv/ssv.c \
+ web/api/formatters/ssv/ssv.h web/api/formatters/value/value.c \
+ web/api/formatters/value/value.h \
+ web/api/formatters/json_wrapper.c \
+ web/api/formatters/json_wrapper.h \
+ web/api/formatters/charts2json.c \
+ web/api/formatters/charts2json.h \
+ web/api/formatters/rrdset2json.c \
+ web/api/formatters/rrdset2json.h \
+ web/api/health/health_cmdapi.c web/api/health/health_cmdapi.h \
+ web/api/web_api_v1.c web/api/web_api_v1.h backends/backends.c \
+ backends/backends.h backends/graphite/graphite.c \
+ backends/graphite/graphite.h backends/json/json.c \
+ backends/json/json.h backends/opentsdb/opentsdb.c \
+ backends/opentsdb/opentsdb.h \
+ backends/prometheus/backend_prometheus.c \
+ backends/prometheus/backend_prometheus.h \
+ collectors/checks.plugin/plugin_checks.c \
+ collectors/checks.plugin/plugin_checks.h health/health.c \
+ health/health.h health/health_config.c health/health_json.c \
+ health/health_log.c \
+ collectors/idlejitter.plugin/plugin_idlejitter.c \
+ collectors/idlejitter.plugin/plugin_idlejitter.h \
+ collectors/plugins.d/plugins_d.c \
+ collectors/plugins.d/plugins_d.h registry/registry.c \
+ registry/registry.h registry/registry_db.c \
+ registry/registry_init.c registry/registry_internals.c \
+ registry/registry_internals.h registry/registry_log.c \
+ registry/registry_machine.c registry/registry_machine.h \
+ registry/registry_person.c registry/registry_person.h \
+ registry/registry_url.c registry/registry_url.h \
+ database/rrdcalc.c database/rrdcalc.h \
+ database/rrdcalctemplate.c database/rrdcalctemplate.h \
+ database/rrddim.c database/rrddimvar.c database/rrddimvar.h \
+ database/rrdfamily.c database/rrdhost.c database/rrd.c \
+ database/rrd.h database/rrdset.c database/rrdsetvar.c \
+ database/rrdsetvar.h database/rrdvar.c database/rrdvar.h \
+ database/engine/rrdengine.c database/engine/rrdengine.h \
+ database/engine/rrddiskprotocol.h database/engine/datafile.c \
+ database/engine/datafile.h database/engine/journalfile.c \
+ database/engine/journalfile.h database/engine/rrdenginelib.c \
+ database/engine/rrdenginelib.h database/engine/rrdengineapi.c \
+ database/engine/rrdengineapi.h database/engine/pagecache.c \
+ database/engine/pagecache.h database/engine/rrdenglocking.c \
+ database/engine/rrdenglocking.h streaming/rrdpush.c \
+ streaming/rrdpush.h collectors/statsd.plugin/statsd.c \
+ collectors/statsd.plugin/statsd.h web/server/web_client.c \
+ web/server/web_client.h web/server/web_server.c \
+ web/server/web_server.h web/server/web_client_cache.c \
+ web/server/web_client_cache.h \
+ web/server/static/static-threaded.c \
+ web/server/static/static-threaded.h \
+ collectors/freebsd.plugin/plugin_freebsd.c \
+ collectors/freebsd.plugin/plugin_freebsd.h \
+ collectors/freebsd.plugin/freebsd_sysctl.c \
+ collectors/freebsd.plugin/freebsd_getmntinfo.c \
+ collectors/freebsd.plugin/freebsd_getifaddrs.c \
+ collectors/freebsd.plugin/freebsd_devstat.c \
+ collectors/freebsd.plugin/freebsd_kstat_zfs.c \
+ collectors/freebsd.plugin/freebsd_ipfw.c \
+ collectors/proc.plugin/zfs_common.c \
+ collectors/proc.plugin/zfs_common.h \
+ collectors/macos.plugin/plugin_macos.c \
+ collectors/macos.plugin/plugin_macos.h \
+ collectors/macos.plugin/macos_sysctl.c \
+ collectors/macos.plugin/macos_mach_smi.c \
+ collectors/macos.plugin/macos_fw.c \
+ collectors/cgroups.plugin/sys_fs_cgroup.c \
+ collectors/cgroups.plugin/sys_fs_cgroup.h \
+ collectors/diskspace.plugin/plugin_diskspace.h \
+ collectors/diskspace.plugin/plugin_diskspace.c \
+ collectors/proc.plugin/ipc.c \
+ collectors/proc.plugin/plugin_proc.c \
+ collectors/proc.plugin/plugin_proc.h \
+ collectors/proc.plugin/proc_diskstats.c \
+ collectors/proc.plugin/proc_mdstat.c \
+ collectors/proc.plugin/proc_interrupts.c \
+ collectors/proc.plugin/proc_softirqs.c \
+ collectors/proc.plugin/proc_loadavg.c \
+ collectors/proc.plugin/proc_meminfo.c \
+ collectors/proc.plugin/proc_net_dev.c \
+ collectors/proc.plugin/proc_net_ip_vs_stats.c \
+ collectors/proc.plugin/proc_net_netstat.c \
+ collectors/proc.plugin/proc_net_rpc_nfs.c \
+ collectors/proc.plugin/proc_net_rpc_nfsd.c \
+ collectors/proc.plugin/proc_net_snmp.c \
+ collectors/proc.plugin/proc_net_snmp6.c \
+ collectors/proc.plugin/proc_net_sctp_snmp.c \
+ collectors/proc.plugin/proc_net_sockstat.c \
+ collectors/proc.plugin/proc_net_sockstat6.c \
+ collectors/proc.plugin/proc_net_softnet_stat.c \
+ collectors/proc.plugin/proc_net_stat_conntrack.c \
+ collectors/proc.plugin/proc_net_stat_synproxy.c \
+ collectors/proc.plugin/proc_self_mountinfo.c \
+ collectors/proc.plugin/proc_self_mountinfo.h \
+ collectors/proc.plugin/proc_spl_kstat_zfs.c \
+ collectors/proc.plugin/proc_stat.c \
+ collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c \
+ collectors/proc.plugin/proc_vmstat.c \
+ collectors/proc.plugin/proc_uptime.c \
+ collectors/proc.plugin/sys_kernel_mm_ksm.c \
+ collectors/proc.plugin/sys_block_zram.c \
+ collectors/proc.plugin/sys_devices_system_edac_mc.c \
+ collectors/proc.plugin/sys_devices_system_node.c \
+ collectors/proc.plugin/sys_fs_btrfs.c \
+ collectors/proc.plugin/sys_class_power_supply.c \
+ collectors/tc.plugin/plugin_tc.c \
+ collectors/tc.plugin/plugin_tc.h \
+ backends/aws_kinesis/aws_kinesis.c \
+ backends/aws_kinesis/aws_kinesis.h \
+ backends/aws_kinesis/aws_kinesis_put_record.cc \
+ backends/aws_kinesis/aws_kinesis_put_record.h \
+ backends/prometheus/remote_write/remote_write.cc \
+ backends/prometheus/remote_write/remote_write.h \
+ backends/prometheus/remote_write/remote_write.proto \
+ backends/mongodb/mongodb.c backends/mongodb/mongodb.h
+am__objects_6 = daemon/common.$(OBJEXT) daemon/daemon.$(OBJEXT) \
+ daemon/global_statistics.$(OBJEXT) daemon/main.$(OBJEXT) \
+ daemon/signals.$(OBJEXT) daemon/unit_test.$(OBJEXT)
+am__objects_7 = web/api/badges/web_buffer_svg.$(OBJEXT) \
+ web/api/exporters/allmetrics.$(OBJEXT) \
+ web/api/exporters/shell/allmetrics_shell.$(OBJEXT) \
+ web/api/queries/average/average.$(OBJEXT) \
+ web/api/queries/des/des.$(OBJEXT) \
+ web/api/queries/incremental_sum/incremental_sum.$(OBJEXT) \
+ web/api/queries/max/max.$(OBJEXT) \
+ web/api/queries/median/median.$(OBJEXT) \
+ web/api/queries/min/min.$(OBJEXT) \
+ web/api/queries/query.$(OBJEXT) web/api/queries/rrdr.$(OBJEXT) \
+ web/api/queries/ses/ses.$(OBJEXT) \
+ web/api/queries/stddev/stddev.$(OBJEXT) \
+ web/api/queries/sum/sum.$(OBJEXT) \
+ web/api/formatters/rrd2json.$(OBJEXT) \
+ web/api/formatters/csv/csv.$(OBJEXT) \
+ web/api/formatters/json/json.$(OBJEXT) \
+ web/api/formatters/ssv/ssv.$(OBJEXT) \
+ web/api/formatters/value/value.$(OBJEXT) \
+ web/api/formatters/json_wrapper.$(OBJEXT) \
+ web/api/formatters/charts2json.$(OBJEXT) \
+ web/api/formatters/rrdset2json.$(OBJEXT) \
+ web/api/health/health_cmdapi.$(OBJEXT) \
+ web/api/web_api_v1.$(OBJEXT)
+am__objects_8 = backends/backends.$(OBJEXT) \
+ backends/graphite/graphite.$(OBJEXT) \
+ backends/json/json.$(OBJEXT) \
+ backends/opentsdb/opentsdb.$(OBJEXT) \
+ backends/prometheus/backend_prometheus.$(OBJEXT)
+am__objects_9 = collectors/checks.plugin/plugin_checks.$(OBJEXT)
+am__objects_10 = health/health.$(OBJEXT) \
+ health/health_config.$(OBJEXT) health/health_json.$(OBJEXT) \
+ health/health_log.$(OBJEXT)
+am__objects_11 = \
+ collectors/idlejitter.plugin/plugin_idlejitter.$(OBJEXT)
+am__objects_12 = collectors/plugins.d/plugins_d.$(OBJEXT)
+am__objects_13 = registry/registry.$(OBJEXT) \
+ registry/registry_db.$(OBJEXT) \
+ registry/registry_init.$(OBJEXT) \
+ registry/registry_internals.$(OBJEXT) \
+ registry/registry_log.$(OBJEXT) \
+ registry/registry_machine.$(OBJEXT) \
+ registry/registry_person.$(OBJEXT) \
+ registry/registry_url.$(OBJEXT)
+@ENABLE_DBENGINE_TRUE@am__objects_14 = \
+@ENABLE_DBENGINE_TRUE@ database/engine/rrdengine.$(OBJEXT) \
+@ENABLE_DBENGINE_TRUE@ database/engine/datafile.$(OBJEXT) \
+@ENABLE_DBENGINE_TRUE@ database/engine/journalfile.$(OBJEXT) \
+@ENABLE_DBENGINE_TRUE@ database/engine/rrdenginelib.$(OBJEXT) \
+@ENABLE_DBENGINE_TRUE@ database/engine/rrdengineapi.$(OBJEXT) \
+@ENABLE_DBENGINE_TRUE@ database/engine/pagecache.$(OBJEXT) \
+@ENABLE_DBENGINE_TRUE@ database/engine/rrdenglocking.$(OBJEXT)
+am__objects_15 = database/rrdcalc.$(OBJEXT) \
+ database/rrdcalctemplate.$(OBJEXT) database/rrddim.$(OBJEXT) \
+ database/rrddimvar.$(OBJEXT) database/rrdfamily.$(OBJEXT) \
+ database/rrdhost.$(OBJEXT) database/rrd.$(OBJEXT) \
+ database/rrdset.$(OBJEXT) database/rrdsetvar.$(OBJEXT) \
+ database/rrdvar.$(OBJEXT) $(am__objects_14)
+am__objects_16 = streaming/rrdpush.$(OBJEXT)
+am__objects_17 = collectors/statsd.plugin/statsd.$(OBJEXT)
+am__objects_18 = web/server/web_client.$(OBJEXT) \
+ web/server/web_server.$(OBJEXT) \
+ web/server/web_client_cache.$(OBJEXT) \
+ web/server/static/static-threaded.$(OBJEXT)
+am__objects_19 = collectors/freebsd.plugin/plugin_freebsd.$(OBJEXT) \
+ collectors/freebsd.plugin/freebsd_sysctl.$(OBJEXT) \
+ collectors/freebsd.plugin/freebsd_getmntinfo.$(OBJEXT) \
+ collectors/freebsd.plugin/freebsd_getifaddrs.$(OBJEXT) \
+ collectors/freebsd.plugin/freebsd_devstat.$(OBJEXT) \
+ collectors/freebsd.plugin/freebsd_kstat_zfs.$(OBJEXT) \
+ collectors/freebsd.plugin/freebsd_ipfw.$(OBJEXT) \
+ collectors/proc.plugin/zfs_common.$(OBJEXT)
+@FREEBSD_TRUE@am__objects_20 = $(am__objects_19)
+am__objects_21 = collectors/macos.plugin/plugin_macos.$(OBJEXT) \
+ collectors/macos.plugin/macos_sysctl.$(OBJEXT) \
+ collectors/macos.plugin/macos_mach_smi.$(OBJEXT) \
+ collectors/macos.plugin/macos_fw.$(OBJEXT)
+@MACOS_TRUE@am__objects_22 = $(am__objects_21)
+am__objects_23 = collectors/cgroups.plugin/sys_fs_cgroup.$(OBJEXT)
+am__objects_24 = \
+ collectors/diskspace.plugin/plugin_diskspace.$(OBJEXT)
+am__objects_25 = collectors/proc.plugin/ipc.$(OBJEXT) \
+ collectors/proc.plugin/plugin_proc.$(OBJEXT) \
+ collectors/proc.plugin/proc_diskstats.$(OBJEXT) \
+ collectors/proc.plugin/proc_mdstat.$(OBJEXT) \
+ collectors/proc.plugin/proc_interrupts.$(OBJEXT) \
+ collectors/proc.plugin/proc_softirqs.$(OBJEXT) \
+ collectors/proc.plugin/proc_loadavg.$(OBJEXT) \
+ collectors/proc.plugin/proc_meminfo.$(OBJEXT) \
+ collectors/proc.plugin/proc_net_dev.$(OBJEXT) \
+ collectors/proc.plugin/proc_net_ip_vs_stats.$(OBJEXT) \
+ collectors/proc.plugin/proc_net_netstat.$(OBJEXT) \
+ collectors/proc.plugin/proc_net_rpc_nfs.$(OBJEXT) \
+ collectors/proc.plugin/proc_net_rpc_nfsd.$(OBJEXT) \
+ collectors/proc.plugin/proc_net_snmp.$(OBJEXT) \
+ collectors/proc.plugin/proc_net_snmp6.$(OBJEXT) \
+ collectors/proc.plugin/proc_net_sctp_snmp.$(OBJEXT) \
+ collectors/proc.plugin/proc_net_sockstat.$(OBJEXT) \
+ collectors/proc.plugin/proc_net_sockstat6.$(OBJEXT) \
+ collectors/proc.plugin/proc_net_softnet_stat.$(OBJEXT) \
+ collectors/proc.plugin/proc_net_stat_conntrack.$(OBJEXT) \
+ collectors/proc.plugin/proc_net_stat_synproxy.$(OBJEXT) \
+ collectors/proc.plugin/proc_self_mountinfo.$(OBJEXT) \
+ collectors/proc.plugin/zfs_common.$(OBJEXT) \
+ collectors/proc.plugin/proc_spl_kstat_zfs.$(OBJEXT) \
+ collectors/proc.plugin/proc_stat.$(OBJEXT) \
+ collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.$(OBJEXT) \
+ collectors/proc.plugin/proc_vmstat.$(OBJEXT) \
+ collectors/proc.plugin/proc_uptime.$(OBJEXT) \
+ collectors/proc.plugin/sys_kernel_mm_ksm.$(OBJEXT) \
+ collectors/proc.plugin/sys_block_zram.$(OBJEXT) \
+ collectors/proc.plugin/sys_devices_system_edac_mc.$(OBJEXT) \
+ collectors/proc.plugin/sys_devices_system_node.$(OBJEXT) \
+ collectors/proc.plugin/sys_fs_btrfs.$(OBJEXT) \
+ collectors/proc.plugin/sys_class_power_supply.$(OBJEXT)
+am__objects_26 = collectors/tc.plugin/plugin_tc.$(OBJEXT)
+@LINUX_TRUE@am__objects_27 = $(am__objects_23) $(am__objects_24) \
+@LINUX_TRUE@ $(am__objects_25) $(am__objects_26)
+am__objects_28 = $(am__objects_6) $(am__objects_1) $(am__objects_7) \
+ $(am__objects_8) $(am__objects_9) $(am__objects_10) \
+ $(am__objects_11) $(am__objects_12) $(am__objects_13) \
+ $(am__objects_15) $(am__objects_16) $(am__objects_17) \
+ $(am__objects_18) $(am__objects_20) $(am__objects_22) \
+ $(am__objects_27)
+am__objects_29 = backends/aws_kinesis/aws_kinesis.$(OBJEXT) \
+ backends/aws_kinesis/aws_kinesis_put_record.$(OBJEXT)
+@ENABLE_BACKEND_KINESIS_TRUE@am__objects_30 = $(am__objects_29)
+am__objects_31 = \
+ backends/prometheus/remote_write/remote_write.$(OBJEXT)
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@am__objects_32 = $(am__objects_31)
+am__objects_33 = backends/mongodb/mongodb.$(OBJEXT)
+@ENABLE_BACKEND_MONGODB_TRUE@am__objects_34 = $(am__objects_33)
+am_netdata_OBJECTS = $(am__objects_28) $(am__objects_30) \
+ $(am__objects_32) $(am__objects_34)
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@am__objects_35 = backends/prometheus/remote_write/remote_write.pb.$(OBJEXT)
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@nodist_netdata_OBJECTS = $(am__objects_35)
+netdata_OBJECTS = $(am_netdata_OBJECTS) $(nodist_netdata_OBJECTS)
+@ENABLE_BACKEND_KINESIS_TRUE@am__DEPENDENCIES_3 = \
+@ENABLE_BACKEND_KINESIS_TRUE@ $(am__DEPENDENCIES_1)
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@am__DEPENDENCIES_4 = $(am__DEPENDENCIES_1)
+@ENABLE_BACKEND_MONGODB_TRUE@am__DEPENDENCIES_5 = \
+@ENABLE_BACKEND_MONGODB_TRUE@ $(am__DEPENDENCIES_1)
+netdata_DEPENDENCIES = $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_3) \
+ $(am__DEPENDENCIES_4) $(am__DEPENDENCIES_5)
+am__nfacct_plugin_SOURCES_DIST = \
+ collectors/nfacct.plugin/plugin_nfacct.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \
+ libnetdata/config/appconfig.c libnetdata/config/appconfig.h \
+ libnetdata/avl/avl.c libnetdata/avl/avl.h \
+ libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \
+ libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \
+ libnetdata/dictionary/dictionary.c \
+ libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \
+ libnetdata/eval/eval.h libnetdata/inlined.h \
+ libnetdata/libnetdata.c libnetdata/libnetdata.h \
+ libnetdata/locks/locks.c libnetdata/locks/locks.h \
+ libnetdata/log/log.c libnetdata/log/log.h \
+ libnetdata/popen/popen.c libnetdata/popen/popen.h \
+ libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \
+ libnetdata/os.c libnetdata/os.h \
+ libnetdata/simple_pattern/simple_pattern.c \
+ libnetdata/simple_pattern/simple_pattern.h \
+ libnetdata/socket/socket.c libnetdata/socket/socket.h \
+ libnetdata/socket/security.c libnetdata/socket/security.h \
+ libnetdata/statistical/statistical.c \
+ libnetdata/statistical/statistical.h \
+ libnetdata/storage_number/storage_number.c \
+ libnetdata/storage_number/storage_number.h \
+ libnetdata/threads/threads.c libnetdata/threads/threads.h \
+ libnetdata/url/url.c libnetdata/url/url.h \
+ libnetdata/json/json.c libnetdata/json/json.h \
+ libnetdata/json/jsmn.c libnetdata/json/jsmn.h \
+ libnetdata/health/health.c libnetdata/health/health.h \
+ libnetdata/string/utf8.h
+am__objects_36 = collectors/nfacct.plugin/plugin_nfacct.$(OBJEXT) \
+ $(am__objects_1)
+@ENABLE_PLUGIN_NFACCT_TRUE@am_nfacct_plugin_OBJECTS = \
+@ENABLE_PLUGIN_NFACCT_TRUE@ $(am__objects_36)
+nfacct_plugin_OBJECTS = $(am_nfacct_plugin_OBJECTS)
+@ENABLE_PLUGIN_NFACCT_TRUE@nfacct_plugin_DEPENDENCIES = \
+@ENABLE_PLUGIN_NFACCT_TRUE@ $(am__DEPENDENCIES_2) \
+@ENABLE_PLUGIN_NFACCT_TRUE@ $(am__DEPENDENCIES_1)
+am__perf_plugin_SOURCES_DIST = collectors/perf.plugin/perf_plugin.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \
+ libnetdata/config/appconfig.c libnetdata/config/appconfig.h \
+ libnetdata/avl/avl.c libnetdata/avl/avl.h \
+ libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \
+ libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \
+ libnetdata/dictionary/dictionary.c \
+ libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \
+ libnetdata/eval/eval.h libnetdata/inlined.h \
+ libnetdata/libnetdata.c libnetdata/libnetdata.h \
+ libnetdata/locks/locks.c libnetdata/locks/locks.h \
+ libnetdata/log/log.c libnetdata/log/log.h \
+ libnetdata/popen/popen.c libnetdata/popen/popen.h \
+ libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \
+ libnetdata/os.c libnetdata/os.h \
+ libnetdata/simple_pattern/simple_pattern.c \
+ libnetdata/simple_pattern/simple_pattern.h \
+ libnetdata/socket/socket.c libnetdata/socket/socket.h \
+ libnetdata/socket/security.c libnetdata/socket/security.h \
+ libnetdata/statistical/statistical.c \
+ libnetdata/statistical/statistical.h \
+ libnetdata/storage_number/storage_number.c \
+ libnetdata/storage_number/storage_number.h \
+ libnetdata/threads/threads.c libnetdata/threads/threads.h \
+ libnetdata/url/url.c libnetdata/url/url.h \
+ libnetdata/json/json.c libnetdata/json/json.h \
+ libnetdata/json/jsmn.c libnetdata/json/jsmn.h \
+ libnetdata/health/health.c libnetdata/health/health.h \
+ libnetdata/string/utf8.h
+am__objects_37 = collectors/perf.plugin/perf_plugin.$(OBJEXT) \
+ $(am__objects_1)
+@ENABLE_PLUGIN_PERF_TRUE@am_perf_plugin_OBJECTS = $(am__objects_37)
+perf_plugin_OBJECTS = $(am_perf_plugin_OBJECTS)
+@ENABLE_PLUGIN_PERF_TRUE@perf_plugin_DEPENDENCIES = \
+@ENABLE_PLUGIN_PERF_TRUE@ $(am__DEPENDENCIES_2)
+am__xenstat_plugin_SOURCES_DIST = \
+ collectors/xenstat.plugin/xenstat_plugin.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \
+ libnetdata/config/appconfig.c libnetdata/config/appconfig.h \
+ libnetdata/avl/avl.c libnetdata/avl/avl.h \
+ libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \
+ libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \
+ libnetdata/dictionary/dictionary.c \
+ libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \
+ libnetdata/eval/eval.h libnetdata/inlined.h \
+ libnetdata/libnetdata.c libnetdata/libnetdata.h \
+ libnetdata/locks/locks.c libnetdata/locks/locks.h \
+ libnetdata/log/log.c libnetdata/log/log.h \
+ libnetdata/popen/popen.c libnetdata/popen/popen.h \
+ libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \
+ libnetdata/os.c libnetdata/os.h \
+ libnetdata/simple_pattern/simple_pattern.c \
+ libnetdata/simple_pattern/simple_pattern.h \
+ libnetdata/socket/socket.c libnetdata/socket/socket.h \
+ libnetdata/socket/security.c libnetdata/socket/security.h \
+ libnetdata/statistical/statistical.c \
+ libnetdata/statistical/statistical.h \
+ libnetdata/storage_number/storage_number.c \
+ libnetdata/storage_number/storage_number.h \
+ libnetdata/threads/threads.c libnetdata/threads/threads.h \
+ libnetdata/url/url.c libnetdata/url/url.h \
+ libnetdata/json/json.c libnetdata/json/json.h \
+ libnetdata/json/jsmn.c libnetdata/json/jsmn.h \
+ libnetdata/health/health.c libnetdata/health/health.h \
+ libnetdata/string/utf8.h
+am__objects_38 = collectors/xenstat.plugin/xenstat_plugin.$(OBJEXT) \
+ $(am__objects_1)
+@ENABLE_PLUGIN_XENSTAT_TRUE@am_xenstat_plugin_OBJECTS = \
+@ENABLE_PLUGIN_XENSTAT_TRUE@ $(am__objects_38)
+xenstat_plugin_OBJECTS = $(am_xenstat_plugin_OBJECTS)
+@ENABLE_PLUGIN_XENSTAT_TRUE@xenstat_plugin_DEPENDENCIES = \
+@ENABLE_PLUGIN_XENSTAT_TRUE@ $(am__DEPENDENCIES_2) \
+@ENABLE_PLUGIN_XENSTAT_TRUE@ $(am__DEPENDENCIES_1)
+SCRIPTS = $(dist_noinst_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+DEFAULT_INCLUDES = -I.@am__isrc@
+depcomp = $(SHELL) $(top_srcdir)/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo " CC " $@;
+am__v_CC_1 =
+CCLD = $(CC)
+LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo " CCLD " $@;
+am__v_CCLD_1 =
+CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+AM_V_CXX = $(am__v_CXX_@AM_V@)
+am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@)
+am__v_CXX_0 = @echo " CXX " $@;
+am__v_CXX_1 =
+CXXLD = $(CXX)
+CXXLINK = $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) \
+ -o $@
+AM_V_CXXLD = $(am__v_CXXLD_@AM_V@)
+am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@)
+am__v_CXXLD_0 = @echo " CXXLD " $@;
+am__v_CXXLD_1 =
+SOURCES = $(apps_plugin_SOURCES) $(cgroup_network_SOURCES) \
+ $(cups_plugin_SOURCES) $(freeipmi_plugin_SOURCES) \
+ $(netdata_SOURCES) $(nodist_netdata_SOURCES) \
+ $(nfacct_plugin_SOURCES) $(perf_plugin_SOURCES) \
+ $(xenstat_plugin_SOURCES)
+DIST_SOURCES = $(am__apps_plugin_SOURCES_DIST) \
+ $(am__cgroup_network_SOURCES_DIST) \
+ $(am__cups_plugin_SOURCES_DIST) \
+ $(am__freeipmi_plugin_SOURCES_DIST) \
+ $(am__netdata_SOURCES_DIST) $(am__nfacct_plugin_SOURCES_DIST) \
+ $(am__perf_plugin_SOURCES_DIST) \
+ $(am__xenstat_plugin_SOURCES_DIST)
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+DATA = $(dist_cache_DATA) $(dist_log_DATA) $(dist_noinst_DATA) \
+ $(dist_registry_DATA) $(dist_varlib_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ cscope distdir dist dist-all distcheck
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \
+ $(LISP)config.h.in
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+CSCOPE = cscope
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/config.h.in \
+ $(srcdir)/netdata.spec.in compile config.guess config.sub \
+ depcomp install-sh missing
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+distdir = $(PACKAGE)-$(VERSION)
+top_distdir = $(distdir)
+am__remove_distdir = \
+ if test -d "$(distdir)"; then \
+ find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \
+ && rm -rf "$(distdir)" \
+ || { sleep 5 && rm -rf "$(distdir)"; }; \
+ else :; fi
+am__post_remove_distdir = $(am__remove_distdir)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+DIST_ARCHIVES = $(distdir).tar.gz
+GZIP_ENV = --best
+DIST_TARGETS = dist-gzip
+distuninstallcheck_listfiles = find . -type f -print
+am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \
+ | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$'
+distcleancheck_listfiles = find . -type f -print
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = foreign subdir-objects 1.11
+ACLOCAL_AMFLAGS = -I build/m4
+MAINTAINERCLEANFILES = \
+ config.log config.status \
+ $(srcdir)/Makefile.in \
+ $(srcdir)/config.h.in $(srcdir)/config.h.in~ $(srcdir)/configure \
+ $(srcdir)/install-sh $(srcdir)/ltmain.sh $(srcdir)/missing \
+ $(srcdir)/compile $(srcdir)/depcomp $(srcdir)/aclocal.m4 \
+ $(srcdir)/config.guess $(srcdir)/config.sub \
+ $(srcdir)/m4/ltsugar.m4 $(srcdir)/m4/libtool.m4 \
+ $(srcdir)/m4/ltversion.m4 $(srcdir)/m4/lt~obsolete.m4 \
+ $(srcdir)/m4/ltoptions.m4 \
+ $(srcdir)/pkcs11-helper.spec $(srcdir)/config-w32-vc.h
+
+CLEANFILES = \
+ $(srcdir)/$(distdir).tar.gz
+
+EXTRA_DIST = \
+ .gitignore \
+ .csslintrc \
+ .eslintignore \
+ .eslintrc \
+ .github/CODEOWNERS \
+ build/m4/jemalloc.m4 \
+ build/m4/ax_c___atomic.m4 \
+ build/m4/ax_check_enable_debug.m4 \
+ build/m4/ax_c_mallinfo.m4 \
+ build/m4/ax_gcc_func_attribute.m4 \
+ build/m4/ax_check_compile_flag.m4 \
+ build/m4/ax_c_statement_expressions.m4 \
+ build/m4/ax_pthread.m4 \
+ build/m4/ax_c_lto.m4 \
+ build/m4/ax_c_mallopt.m4 \
+ build/m4/tcmalloc.m4 \
+ build/m4/ax_c__generic.m4 \
+ README.md \
+ CONTRIBUTORS.md \
+ CODE_OF_CONDUCT.md \
+ LICENSE \
+ REDISTRIBUTED.md \
+ CONTRIBUTING.md \
+ $(NULL)
+
+
+# -----------------------------------------------------------------------------
+# Compile netdata binaries
+SUBDIRS = diagrams system tests $(NULL) backends collectors daemon \
+ database health libnetdata registry streaming web $(NULL)
+dist_noinst_DATA = \
+ CHANGELOG.md \
+ cppcheck.sh \
+ configs.signatures \
+ contrib \
+ netdata.cppcheck \
+ netdata.spec \
+ package.json \
+ docs \
+ packaging/version \
+ packaging/go.d.version \
+ packaging/go.d.checksums \
+ packaging/installer/README.md \
+ packaging/installer/UNINSTALL.md \
+ packaging/installer/UPDATE.md \
+ netlify.toml \
+ $(NULL)
+
+
+# until integrated within build
+# should be proper init.d/openrc/systemd usable
+dist_noinst_SCRIPTS = \
+ coverity-scan.sh \
+ coverity-install.sh \
+ packaging/installer/netdata-updater.sh \
+ packaging/installer/netdata-uninstaller.sh \
+ packaging/installer/kickstart.sh \
+ packaging/installer/kickstart-static64.sh \
+ packaging/installer/functions.sh \
+ netdata-installer.sh \
+ docs/generator/buildhtml.sh \
+ docs/generator/buildyaml.sh \
+ docs/generator/checklinks.sh \
+ docs/generator/requirements.txt \
+ docs/generator/runtime.txt \
+ $(NULL)
+
+AM_CFLAGS = \
+ $(OPTIONAL_MATH_CFLAGS) \
+ $(OPTIONAL_NFACCT_CFLAGS) \
+ $(OPTIONAL_ZLIB_CFLAGS) \
+ $(OPTIONAL_UUID_CFLAGS) \
+ $(OPTIONAL_LIBCAP_LIBS) \
+ $(OPTIONAL_IPMIMONITORING_CFLAGS) \
+ $(OPTIONAL_CUPS_CFLAGS) \
+ $(OPTIONAL_XENSTAT_CFLAGS) \
+ $(NULL)
+
+dist_cache_DATA = packaging/installer/.keep
+dist_varlib_DATA = packaging/installer/.keep
+dist_registry_DATA = packaging/installer/.keep
+dist_log_DATA = packaging/installer/.keep
+LIBNETDATA_FILES = \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \
+ libnetdata/config/appconfig.c \
+ libnetdata/config/appconfig.h \
+ libnetdata/avl/avl.c \
+ libnetdata/avl/avl.h \
+ libnetdata/buffer/buffer.c \
+ libnetdata/buffer/buffer.h \
+ libnetdata/clocks/clocks.c \
+ libnetdata/clocks/clocks.h \
+ libnetdata/dictionary/dictionary.c \
+ libnetdata/dictionary/dictionary.h \
+ libnetdata/eval/eval.c \
+ libnetdata/eval/eval.h \
+ libnetdata/inlined.h \
+ libnetdata/libnetdata.c \
+ libnetdata/libnetdata.h \
+ libnetdata/locks/locks.c \
+ libnetdata/locks/locks.h \
+ libnetdata/log/log.c \
+ libnetdata/log/log.h \
+ libnetdata/popen/popen.c \
+ libnetdata/popen/popen.h \
+ libnetdata/procfile/procfile.c \
+ libnetdata/procfile/procfile.h \
+ libnetdata/os.c \
+ libnetdata/os.h \
+ libnetdata/simple_pattern/simple_pattern.c \
+ libnetdata/simple_pattern/simple_pattern.h \
+ libnetdata/socket/socket.c \
+ libnetdata/socket/socket.h \
+ libnetdata/socket/security.c \
+ libnetdata/socket/security.h \
+ libnetdata/statistical/statistical.c \
+ libnetdata/statistical/statistical.h \
+ libnetdata/storage_number/storage_number.c \
+ libnetdata/storage_number/storage_number.h \
+ libnetdata/threads/threads.c \
+ libnetdata/threads/threads.h \
+ libnetdata/url/url.c \
+ libnetdata/url/url.h \
+ libnetdata/json/json.c \
+ libnetdata/json/json.h \
+ libnetdata/json/jsmn.c \
+ libnetdata/json/jsmn.h \
+ libnetdata/health/health.c \
+ libnetdata/health/health.h \
+ libnetdata/string/utf8.h \
+ $(NULL)
+
+APPS_PLUGIN_FILES = \
+ collectors/apps.plugin/apps_plugin.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+CHECKS_PLUGIN_FILES = \
+ collectors/checks.plugin/plugin_checks.c \
+ collectors/checks.plugin/plugin_checks.h \
+ $(NULL)
+
+FREEBSD_PLUGIN_FILES = \
+ collectors/freebsd.plugin/plugin_freebsd.c \
+ collectors/freebsd.plugin/plugin_freebsd.h \
+ collectors/freebsd.plugin/freebsd_sysctl.c \
+ collectors/freebsd.plugin/freebsd_getmntinfo.c \
+ collectors/freebsd.plugin/freebsd_getifaddrs.c \
+ collectors/freebsd.plugin/freebsd_devstat.c \
+ collectors/freebsd.plugin/freebsd_kstat_zfs.c \
+ collectors/freebsd.plugin/freebsd_ipfw.c \
+ collectors/proc.plugin/zfs_common.c \
+ collectors/proc.plugin/zfs_common.h \
+ $(NULL)
+
+HEALTH_PLUGIN_FILES = \
+ health/health.c \
+ health/health.h \
+ health/health_config.c \
+ health/health_json.c \
+ health/health_log.c \
+ $(NULL)
+
+IDLEJITTER_PLUGIN_FILES = \
+ collectors/idlejitter.plugin/plugin_idlejitter.c \
+ collectors/idlejitter.plugin/plugin_idlejitter.h \
+ $(NULL)
+
+CGROUPS_PLUGIN_FILES = \
+ collectors/cgroups.plugin/sys_fs_cgroup.c \
+ collectors/cgroups.plugin/sys_fs_cgroup.h \
+ $(NULL)
+
+CGROUP_NETWORK_FILES = \
+ collectors/cgroups.plugin/cgroup-network.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+DISKSPACE_PLUGIN_FILES = \
+ collectors/diskspace.plugin/plugin_diskspace.h \
+ collectors/diskspace.plugin/plugin_diskspace.c \
+ $(NULL)
+
+FREEIPMI_PLUGIN_FILES = \
+ collectors/freeipmi.plugin/freeipmi_plugin.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+CUPS_PLUGIN_FILES = \
+ collectors/cups.plugin/cups_plugin.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+NFACCT_PLUGIN_FILES = \
+ collectors/nfacct.plugin/plugin_nfacct.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+XENSTAT_PLUGIN_FILES = \
+ collectors/xenstat.plugin/xenstat_plugin.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+PERF_PLUGIN_FILES = \
+ collectors/perf.plugin/perf_plugin.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+PROC_PLUGIN_FILES = \
+ collectors/proc.plugin/ipc.c \
+ collectors/proc.plugin/plugin_proc.c \
+ collectors/proc.plugin/plugin_proc.h \
+ collectors/proc.plugin/proc_diskstats.c \
+ collectors/proc.plugin/proc_mdstat.c \
+ collectors/proc.plugin/proc_interrupts.c \
+ collectors/proc.plugin/proc_softirqs.c \
+ collectors/proc.plugin/proc_loadavg.c \
+ collectors/proc.plugin/proc_meminfo.c \
+ collectors/proc.plugin/proc_net_dev.c \
+ collectors/proc.plugin/proc_net_ip_vs_stats.c \
+ collectors/proc.plugin/proc_net_netstat.c \
+ collectors/proc.plugin/proc_net_rpc_nfs.c \
+ collectors/proc.plugin/proc_net_rpc_nfsd.c \
+ collectors/proc.plugin/proc_net_snmp.c \
+ collectors/proc.plugin/proc_net_snmp6.c \
+ collectors/proc.plugin/proc_net_sctp_snmp.c \
+ collectors/proc.plugin/proc_net_sockstat.c \
+ collectors/proc.plugin/proc_net_sockstat6.c \
+ collectors/proc.plugin/proc_net_softnet_stat.c \
+ collectors/proc.plugin/proc_net_stat_conntrack.c \
+ collectors/proc.plugin/proc_net_stat_synproxy.c \
+ collectors/proc.plugin/proc_self_mountinfo.c \
+ collectors/proc.plugin/proc_self_mountinfo.h \
+ collectors/proc.plugin/zfs_common.c \
+ collectors/proc.plugin/zfs_common.h \
+ collectors/proc.plugin/proc_spl_kstat_zfs.c \
+ collectors/proc.plugin/proc_stat.c \
+ collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c \
+ collectors/proc.plugin/proc_vmstat.c \
+ collectors/proc.plugin/proc_uptime.c \
+ collectors/proc.plugin/sys_kernel_mm_ksm.c \
+ collectors/proc.plugin/sys_block_zram.c \
+ collectors/proc.plugin/sys_devices_system_edac_mc.c \
+ collectors/proc.plugin/sys_devices_system_node.c \
+ collectors/proc.plugin/sys_fs_btrfs.c \
+ collectors/proc.plugin/sys_class_power_supply.c \
+ $(NULL)
+
+TC_PLUGIN_FILES = \
+ collectors/tc.plugin/plugin_tc.c \
+ collectors/tc.plugin/plugin_tc.h \
+ $(NULL)
+
+MACOS_PLUGIN_FILES = \
+ collectors/macos.plugin/plugin_macos.c \
+ collectors/macos.plugin/plugin_macos.h \
+ collectors/macos.plugin/macos_sysctl.c \
+ collectors/macos.plugin/macos_mach_smi.c \
+ collectors/macos.plugin/macos_fw.c \
+ $(NULL)
+
+PLUGINSD_PLUGIN_FILES = \
+ collectors/plugins.d/plugins_d.c \
+ collectors/plugins.d/plugins_d.h \
+ $(NULL)
+
+RRD_PLUGIN_FILES = database/rrdcalc.c database/rrdcalc.h \
+ database/rrdcalctemplate.c database/rrdcalctemplate.h \
+ database/rrddim.c database/rrddimvar.c database/rrddimvar.h \
+ database/rrdfamily.c database/rrdhost.c database/rrd.c \
+ database/rrd.h database/rrdset.c database/rrdsetvar.c \
+ database/rrdsetvar.h database/rrdvar.c database/rrdvar.h \
+ $(NULL) $(am__append_1)
+API_PLUGIN_FILES = \
+ web/api/badges/web_buffer_svg.c \
+ web/api/badges/web_buffer_svg.h \
+ web/api/exporters/allmetrics.c \
+ web/api/exporters/allmetrics.h \
+ web/api/exporters/shell/allmetrics_shell.c \
+ web/api/exporters/shell/allmetrics_shell.h \
+ web/api/queries/average/average.c \
+ web/api/queries/average/average.h \
+ web/api/queries/des/des.c \
+ web/api/queries/des/des.h \
+ web/api/queries/incremental_sum/incremental_sum.c \
+ web/api/queries/incremental_sum/incremental_sum.h \
+ web/api/queries/max/max.c \
+ web/api/queries/max/max.h \
+ web/api/queries/median/median.c \
+ web/api/queries/median/median.h \
+ web/api/queries/min/min.c \
+ web/api/queries/min/min.h \
+ web/api/queries/query.c \
+ web/api/queries/query.h \
+ web/api/queries/rrdr.c \
+ web/api/queries/rrdr.h \
+ web/api/queries/ses/ses.c \
+ web/api/queries/ses/ses.h \
+ web/api/queries/stddev/stddev.c \
+ web/api/queries/stddev/stddev.h \
+ web/api/queries/sum/sum.c \
+ web/api/queries/sum/sum.h \
+ web/api/formatters/rrd2json.c \
+ web/api/formatters/rrd2json.h \
+ web/api/formatters/csv/csv.c \
+ web/api/formatters/csv/csv.h \
+ web/api/formatters/json/json.c \
+ web/api/formatters/json/json.h \
+ web/api/formatters/ssv/ssv.c \
+ web/api/formatters/ssv/ssv.h \
+ web/api/formatters/value/value.c \
+ web/api/formatters/value/value.h \
+ web/api/formatters/json_wrapper.c \
+ web/api/formatters/json_wrapper.h \
+ web/api/formatters/charts2json.c \
+ web/api/formatters/charts2json.h \
+ web/api/formatters/rrdset2json.c \
+ web/api/formatters/rrdset2json.h \
+ web/api/health/health_cmdapi.c \
+ web/api/health/health_cmdapi.h \
+ web/api/web_api_v1.c \
+ web/api/web_api_v1.h \
+ $(NULL)
+
+STREAMING_PLUGIN_FILES = \
+ streaming/rrdpush.c \
+ streaming/rrdpush.h \
+ $(NULL)
+
+REGISTRY_PLUGIN_FILES = \
+ registry/registry.c \
+ registry/registry.h \
+ registry/registry_db.c \
+ registry/registry_init.c \
+ registry/registry_internals.c \
+ registry/registry_internals.h \
+ registry/registry_log.c \
+ registry/registry_machine.c \
+ registry/registry_machine.h \
+ registry/registry_person.c \
+ registry/registry_person.h \
+ registry/registry_url.c \
+ registry/registry_url.h \
+ $(NULL)
+
+STATSD_PLUGIN_FILES = \
+ collectors/statsd.plugin/statsd.c \
+ collectors/statsd.plugin/statsd.h \
+ $(NULL)
+
+WEB_PLUGIN_FILES = \
+ web/server/web_client.c \
+ web/server/web_client.h \
+ web/server/web_server.c \
+ web/server/web_server.h \
+ web/server/web_client_cache.c \
+ web/server/web_client_cache.h \
+ web/server/static/static-threaded.c \
+ web/server/static/static-threaded.h \
+ $(NULL)
+
+BACKENDS_PLUGIN_FILES = \
+ backends/backends.c \
+ backends/backends.h \
+ backends/graphite/graphite.c \
+ backends/graphite/graphite.h \
+ backends/json/json.c \
+ backends/json/json.h \
+ backends/opentsdb/opentsdb.c \
+ backends/opentsdb/opentsdb.h \
+ backends/prometheus/backend_prometheus.c \
+ backends/prometheus/backend_prometheus.h \
+ $(NULL)
+
+KINESIS_BACKEND_FILES = \
+ backends/aws_kinesis/aws_kinesis.c \
+ backends/aws_kinesis/aws_kinesis.h \
+ backends/aws_kinesis/aws_kinesis_put_record.cc \
+ backends/aws_kinesis/aws_kinesis_put_record.h \
+ $(NULL)
+
+PROMETHEUS_REMOTE_WRITE_BACKEND_FILES = \
+ backends/prometheus/remote_write/remote_write.cc \
+ backends/prometheus/remote_write/remote_write.h \
+ backends/prometheus/remote_write/remote_write.proto \
+ $(NULL)
+
+MONGODB_BACKEND_FILES = \
+ backends/mongodb/mongodb.c \
+ backends/mongodb/mongodb.h \
+ $(NULL)
+
+DAEMON_FILES = \
+ daemon/common.c \
+ daemon/common.h \
+ daemon/daemon.c \
+ daemon/daemon.h \
+ daemon/global_statistics.c \
+ daemon/global_statistics.h \
+ daemon/main.c \
+ daemon/main.h \
+ daemon/signals.c \
+ daemon/signals.h \
+ daemon/unit_test.c \
+ daemon/unit_test.h \
+ $(NULL)
+
+NETDATA_FILES = collectors/all.h $(DAEMON_FILES) $(LIBNETDATA_FILES) \
+ $(API_PLUGIN_FILES) $(BACKENDS_PLUGIN_FILES) \
+ $(CHECKS_PLUGIN_FILES) $(HEALTH_PLUGIN_FILES) \
+ $(IDLEJITTER_PLUGIN_FILES) $(PLUGINSD_PLUGIN_FILES) \
+ $(REGISTRY_PLUGIN_FILES) $(RRD_PLUGIN_FILES) \
+ $(STREAMING_PLUGIN_FILES) $(STATSD_PLUGIN_FILES) \
+ $(WEB_PLUGIN_FILES) $(NULL) $(am__append_2) $(am__append_3) \
+ $(am__append_4)
+NETDATA_COMMON_LIBS = \
+ $(OPTIONAL_MATH_LIBS) \
+ $(OPTIONAL_ZLIB_LIBS) \
+ $(OPTIONAL_SSL_LIBS) \
+ $(OPTIONAL_UUID_LIBS) \
+ $(OPTIONAL_UV_LIBS) \
+ $(OPTIONAL_LZ4_LIBS) \
+ $(OPTIONAL_JUDY_LIBS) \
+ $(OPTIONAL_SSL_LIBS) \
+ $(OPTIONAL_JSONC_LIBS) \
+ $(NULL)
+
+netdata_SOURCES = $(NETDATA_FILES) $(am__append_12) $(am__append_14) \
+ $(am__append_16)
+netdata_LDADD = $(NETDATA_COMMON_LIBS) $(NULL) $(am__append_13) \
+ $(am__append_15) $(am__append_17)
+@ENABLE_CXX_LINKER_FALSE@netdata_LINK = $(CCLD) $(CFLAGS) $(LDFLAGS) -o $@
+@ENABLE_CXX_LINKER_TRUE@netdata_LINK = $(CXXLD) $(CXXFLAGS) $(LDFLAGS) -o $@
+@ENABLE_PLUGIN_APPS_TRUE@apps_plugin_SOURCES = $(APPS_PLUGIN_FILES)
+@ENABLE_PLUGIN_APPS_TRUE@apps_plugin_LDADD = \
+@ENABLE_PLUGIN_APPS_TRUE@ $(NETDATA_COMMON_LIBS) \
+@ENABLE_PLUGIN_APPS_TRUE@ $(OPTIONAL_LIBCAP_LIBS) \
+@ENABLE_PLUGIN_APPS_TRUE@ $(NULL)
+
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@cgroup_network_SOURCES = $(CGROUP_NETWORK_FILES)
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@cgroup_network_LDADD = \
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ $(NETDATA_COMMON_LIBS) \
+@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ $(NULL)
+
+@ENABLE_PLUGIN_FREEIPMI_TRUE@freeipmi_plugin_SOURCES = $(FREEIPMI_PLUGIN_FILES)
+@ENABLE_PLUGIN_FREEIPMI_TRUE@freeipmi_plugin_LDADD = \
+@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(NETDATA_COMMON_LIBS) \
+@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(OPTIONAL_IPMIMONITORING_LIBS) \
+@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(NULL)
+
+@ENABLE_PLUGIN_CUPS_TRUE@cups_plugin_SOURCES = $(CUPS_PLUGIN_FILES)
+@ENABLE_PLUGIN_CUPS_TRUE@cups_plugin_LDADD = \
+@ENABLE_PLUGIN_CUPS_TRUE@ $(NETDATA_COMMON_LIBS) \
+@ENABLE_PLUGIN_CUPS_TRUE@ $(OPTIONAL_CUPS_LIBS) \
+@ENABLE_PLUGIN_CUPS_TRUE@ $(NULL)
+
+@ENABLE_PLUGIN_NFACCT_TRUE@nfacct_plugin_SOURCES = $(NFACCT_PLUGIN_FILES)
+@ENABLE_PLUGIN_NFACCT_TRUE@nfacct_plugin_LDADD = \
+@ENABLE_PLUGIN_NFACCT_TRUE@ $(NETDATA_COMMON_LIBS) \
+@ENABLE_PLUGIN_NFACCT_TRUE@ $(OPTIONAL_NFACCT_LIBS) \
+@ENABLE_PLUGIN_NFACCT_TRUE@ $(NULL)
+
+@ENABLE_PLUGIN_XENSTAT_TRUE@xenstat_plugin_SOURCES = $(XENSTAT_PLUGIN_FILES)
+@ENABLE_PLUGIN_XENSTAT_TRUE@xenstat_plugin_LDADD = \
+@ENABLE_PLUGIN_XENSTAT_TRUE@ $(NETDATA_COMMON_LIBS) \
+@ENABLE_PLUGIN_XENSTAT_TRUE@ $(OPTIONAL_XENSTAT_LIBS) \
+@ENABLE_PLUGIN_XENSTAT_TRUE@ $(NULL)
+
+@ENABLE_PLUGIN_PERF_TRUE@perf_plugin_SOURCES = $(PERF_PLUGIN_FILES)
+@ENABLE_PLUGIN_PERF_TRUE@perf_plugin_LDADD = \
+@ENABLE_PLUGIN_PERF_TRUE@ $(NETDATA_COMMON_LIBS) \
+@ENABLE_PLUGIN_PERF_TRUE@ $(NULL)
+
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@BUILT_SOURCES = \
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@ backends/prometheus/remote_write/remote_write.pb.cc \
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@ backends/prometheus/remote_write/remote_write.pb.h \
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@ $(NULL)
+
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@nodist_netdata_SOURCES = $(BUILT_SOURCES)
+all: $(BUILT_SOURCES) config.h
+ $(MAKE) $(AM_MAKEFLAGS) all-recursive
+
+.SUFFIXES:
+.SUFFIXES: .c .cc .o .obj
+am--refresh: Makefile
+ @:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \
+ $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \
+ && exit 0; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ echo ' $(SHELL) ./config.status'; \
+ $(SHELL) ./config.status;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ $(SHELL) ./config.status --recheck
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ $(am__cd) $(srcdir) && $(AUTOCONF)
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
+$(am__aclocal_m4_deps):
+
+config.h: stamp-h1
+ @test -f $@ || rm -f stamp-h1
+ @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1
+
+stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
+ @rm -f stamp-h1
+ cd $(top_builddir) && $(SHELL) ./config.status config.h
+$(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ ($(am__cd) $(top_srcdir) && $(AUTOHEADER))
+ rm -f stamp-h1
+ touch $@
+
+distclean-hdr:
+ -rm -f config.h stamp-h1
+netdata.spec: $(top_builddir)/config.status $(srcdir)/netdata.spec.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+install-pluginsPROGRAMS: $(plugins_PROGRAMS)
+ @$(NORMAL_INSTALL)
+ @list='$(plugins_PROGRAMS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed 's/$(EXEEXT)$$//' | \
+ while read p p1; do if test -f $$p \
+ ; then echo "$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n;h' \
+ -e 's|.*|.|' \
+ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
+ sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) files[d] = files[d] " " $$1; \
+ else { print "f", $$3 "/" $$4, $$1; } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-pluginsPROGRAMS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(plugins_PROGRAMS)'; test -n "$(pluginsdir)" || list=; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
+ -e 's/$$/$(EXEEXT)/' \
+ `; \
+ test -n "$$list" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(pluginsdir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(pluginsdir)" && rm -f $$files
+
+clean-pluginsPROGRAMS:
+ -test -z "$(plugins_PROGRAMS)" || rm -f $(plugins_PROGRAMS)
+install-sbinPROGRAMS: $(sbin_PROGRAMS)
+ @$(NORMAL_INSTALL)
+ @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(sbindir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(sbindir)" || exit 1; \
+ fi; \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed 's/$(EXEEXT)$$//' | \
+ while read p p1; do if test -f $$p \
+ ; then echo "$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n;h' \
+ -e 's|.*|.|' \
+ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
+ sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) files[d] = files[d] " " $$1; \
+ else { print "f", $$3 "/" $$4, $$1; } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(sbindir)$$dir'"; \
+ $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-sbinPROGRAMS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
+ -e 's/$$/$(EXEEXT)/' \
+ `; \
+ test -n "$$list" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(sbindir)" && rm -f $$files
+
+clean-sbinPROGRAMS:
+ -test -z "$(sbin_PROGRAMS)" || rm -f $(sbin_PROGRAMS)
+collectors/apps.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/apps.plugin
+ @: > collectors/apps.plugin/$(am__dirstamp)
+collectors/apps.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/apps.plugin/$(DEPDIR)
+ @: > collectors/apps.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/apps.plugin/apps_plugin.$(OBJEXT): \
+ collectors/apps.plugin/$(am__dirstamp) \
+ collectors/apps.plugin/$(DEPDIR)/$(am__dirstamp)
+libnetdata/adaptive_resortable_list/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/adaptive_resortable_list
+ @: > libnetdata/adaptive_resortable_list/$(am__dirstamp)
+libnetdata/adaptive_resortable_list/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/adaptive_resortable_list/$(DEPDIR)
+ @: > libnetdata/adaptive_resortable_list/$(DEPDIR)/$(am__dirstamp)
+libnetdata/adaptive_resortable_list/adaptive_resortable_list.$(OBJEXT): \
+ libnetdata/adaptive_resortable_list/$(am__dirstamp) \
+ libnetdata/adaptive_resortable_list/$(DEPDIR)/$(am__dirstamp)
+libnetdata/config/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/config
+ @: > libnetdata/config/$(am__dirstamp)
+libnetdata/config/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/config/$(DEPDIR)
+ @: > libnetdata/config/$(DEPDIR)/$(am__dirstamp)
+libnetdata/config/appconfig.$(OBJEXT): \
+ libnetdata/config/$(am__dirstamp) \
+ libnetdata/config/$(DEPDIR)/$(am__dirstamp)
+libnetdata/avl/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/avl
+ @: > libnetdata/avl/$(am__dirstamp)
+libnetdata/avl/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/avl/$(DEPDIR)
+ @: > libnetdata/avl/$(DEPDIR)/$(am__dirstamp)
+libnetdata/avl/avl.$(OBJEXT): libnetdata/avl/$(am__dirstamp) \
+ libnetdata/avl/$(DEPDIR)/$(am__dirstamp)
+libnetdata/buffer/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/buffer
+ @: > libnetdata/buffer/$(am__dirstamp)
+libnetdata/buffer/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/buffer/$(DEPDIR)
+ @: > libnetdata/buffer/$(DEPDIR)/$(am__dirstamp)
+libnetdata/buffer/buffer.$(OBJEXT): libnetdata/buffer/$(am__dirstamp) \
+ libnetdata/buffer/$(DEPDIR)/$(am__dirstamp)
+libnetdata/clocks/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/clocks
+ @: > libnetdata/clocks/$(am__dirstamp)
+libnetdata/clocks/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/clocks/$(DEPDIR)
+ @: > libnetdata/clocks/$(DEPDIR)/$(am__dirstamp)
+libnetdata/clocks/clocks.$(OBJEXT): libnetdata/clocks/$(am__dirstamp) \
+ libnetdata/clocks/$(DEPDIR)/$(am__dirstamp)
+libnetdata/dictionary/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/dictionary
+ @: > libnetdata/dictionary/$(am__dirstamp)
+libnetdata/dictionary/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/dictionary/$(DEPDIR)
+ @: > libnetdata/dictionary/$(DEPDIR)/$(am__dirstamp)
+libnetdata/dictionary/dictionary.$(OBJEXT): \
+ libnetdata/dictionary/$(am__dirstamp) \
+ libnetdata/dictionary/$(DEPDIR)/$(am__dirstamp)
+libnetdata/eval/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/eval
+ @: > libnetdata/eval/$(am__dirstamp)
+libnetdata/eval/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/eval/$(DEPDIR)
+ @: > libnetdata/eval/$(DEPDIR)/$(am__dirstamp)
+libnetdata/eval/eval.$(OBJEXT): libnetdata/eval/$(am__dirstamp) \
+ libnetdata/eval/$(DEPDIR)/$(am__dirstamp)
+libnetdata/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata
+ @: > libnetdata/$(am__dirstamp)
+libnetdata/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/$(DEPDIR)
+ @: > libnetdata/$(DEPDIR)/$(am__dirstamp)
+libnetdata/libnetdata.$(OBJEXT): libnetdata/$(am__dirstamp) \
+ libnetdata/$(DEPDIR)/$(am__dirstamp)
+libnetdata/locks/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/locks
+ @: > libnetdata/locks/$(am__dirstamp)
+libnetdata/locks/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/locks/$(DEPDIR)
+ @: > libnetdata/locks/$(DEPDIR)/$(am__dirstamp)
+libnetdata/locks/locks.$(OBJEXT): libnetdata/locks/$(am__dirstamp) \
+ libnetdata/locks/$(DEPDIR)/$(am__dirstamp)
+libnetdata/log/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/log
+ @: > libnetdata/log/$(am__dirstamp)
+libnetdata/log/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/log/$(DEPDIR)
+ @: > libnetdata/log/$(DEPDIR)/$(am__dirstamp)
+libnetdata/log/log.$(OBJEXT): libnetdata/log/$(am__dirstamp) \
+ libnetdata/log/$(DEPDIR)/$(am__dirstamp)
+libnetdata/popen/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/popen
+ @: > libnetdata/popen/$(am__dirstamp)
+libnetdata/popen/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/popen/$(DEPDIR)
+ @: > libnetdata/popen/$(DEPDIR)/$(am__dirstamp)
+libnetdata/popen/popen.$(OBJEXT): libnetdata/popen/$(am__dirstamp) \
+ libnetdata/popen/$(DEPDIR)/$(am__dirstamp)
+libnetdata/procfile/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/procfile
+ @: > libnetdata/procfile/$(am__dirstamp)
+libnetdata/procfile/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/procfile/$(DEPDIR)
+ @: > libnetdata/procfile/$(DEPDIR)/$(am__dirstamp)
+libnetdata/procfile/procfile.$(OBJEXT): \
+ libnetdata/procfile/$(am__dirstamp) \
+ libnetdata/procfile/$(DEPDIR)/$(am__dirstamp)
+libnetdata/os.$(OBJEXT): libnetdata/$(am__dirstamp) \
+ libnetdata/$(DEPDIR)/$(am__dirstamp)
+libnetdata/simple_pattern/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/simple_pattern
+ @: > libnetdata/simple_pattern/$(am__dirstamp)
+libnetdata/simple_pattern/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/simple_pattern/$(DEPDIR)
+ @: > libnetdata/simple_pattern/$(DEPDIR)/$(am__dirstamp)
+libnetdata/simple_pattern/simple_pattern.$(OBJEXT): \
+ libnetdata/simple_pattern/$(am__dirstamp) \
+ libnetdata/simple_pattern/$(DEPDIR)/$(am__dirstamp)
+libnetdata/socket/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/socket
+ @: > libnetdata/socket/$(am__dirstamp)
+libnetdata/socket/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/socket/$(DEPDIR)
+ @: > libnetdata/socket/$(DEPDIR)/$(am__dirstamp)
+libnetdata/socket/socket.$(OBJEXT): libnetdata/socket/$(am__dirstamp) \
+ libnetdata/socket/$(DEPDIR)/$(am__dirstamp)
+libnetdata/socket/security.$(OBJEXT): \
+ libnetdata/socket/$(am__dirstamp) \
+ libnetdata/socket/$(DEPDIR)/$(am__dirstamp)
+libnetdata/statistical/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/statistical
+ @: > libnetdata/statistical/$(am__dirstamp)
+libnetdata/statistical/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/statistical/$(DEPDIR)
+ @: > libnetdata/statistical/$(DEPDIR)/$(am__dirstamp)
+libnetdata/statistical/statistical.$(OBJEXT): \
+ libnetdata/statistical/$(am__dirstamp) \
+ libnetdata/statistical/$(DEPDIR)/$(am__dirstamp)
+libnetdata/storage_number/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/storage_number
+ @: > libnetdata/storage_number/$(am__dirstamp)
+libnetdata/storage_number/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/storage_number/$(DEPDIR)
+ @: > libnetdata/storage_number/$(DEPDIR)/$(am__dirstamp)
+libnetdata/storage_number/storage_number.$(OBJEXT): \
+ libnetdata/storage_number/$(am__dirstamp) \
+ libnetdata/storage_number/$(DEPDIR)/$(am__dirstamp)
+libnetdata/threads/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/threads
+ @: > libnetdata/threads/$(am__dirstamp)
+libnetdata/threads/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/threads/$(DEPDIR)
+ @: > libnetdata/threads/$(DEPDIR)/$(am__dirstamp)
+libnetdata/threads/threads.$(OBJEXT): \
+ libnetdata/threads/$(am__dirstamp) \
+ libnetdata/threads/$(DEPDIR)/$(am__dirstamp)
+libnetdata/url/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/url
+ @: > libnetdata/url/$(am__dirstamp)
+libnetdata/url/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/url/$(DEPDIR)
+ @: > libnetdata/url/$(DEPDIR)/$(am__dirstamp)
+libnetdata/url/url.$(OBJEXT): libnetdata/url/$(am__dirstamp) \
+ libnetdata/url/$(DEPDIR)/$(am__dirstamp)
+libnetdata/json/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/json
+ @: > libnetdata/json/$(am__dirstamp)
+libnetdata/json/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/json/$(DEPDIR)
+ @: > libnetdata/json/$(DEPDIR)/$(am__dirstamp)
+libnetdata/json/json.$(OBJEXT): libnetdata/json/$(am__dirstamp) \
+ libnetdata/json/$(DEPDIR)/$(am__dirstamp)
+libnetdata/json/jsmn.$(OBJEXT): libnetdata/json/$(am__dirstamp) \
+ libnetdata/json/$(DEPDIR)/$(am__dirstamp)
+libnetdata/health/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/health
+ @: > libnetdata/health/$(am__dirstamp)
+libnetdata/health/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) libnetdata/health/$(DEPDIR)
+ @: > libnetdata/health/$(DEPDIR)/$(am__dirstamp)
+libnetdata/health/health.$(OBJEXT): libnetdata/health/$(am__dirstamp) \
+ libnetdata/health/$(DEPDIR)/$(am__dirstamp)
+
+apps.plugin$(EXEEXT): $(apps_plugin_OBJECTS) $(apps_plugin_DEPENDENCIES) $(EXTRA_apps_plugin_DEPENDENCIES)
+ @rm -f apps.plugin$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(apps_plugin_OBJECTS) $(apps_plugin_LDADD) $(LIBS)
+collectors/cgroups.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/cgroups.plugin
+ @: > collectors/cgroups.plugin/$(am__dirstamp)
+collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/cgroups.plugin/$(DEPDIR)
+ @: > collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/cgroups.plugin/cgroup-network.$(OBJEXT): \
+ collectors/cgroups.plugin/$(am__dirstamp) \
+ collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp)
+
+cgroup-network$(EXEEXT): $(cgroup_network_OBJECTS) $(cgroup_network_DEPENDENCIES) $(EXTRA_cgroup_network_DEPENDENCIES)
+ @rm -f cgroup-network$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(cgroup_network_OBJECTS) $(cgroup_network_LDADD) $(LIBS)
+collectors/cups.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/cups.plugin
+ @: > collectors/cups.plugin/$(am__dirstamp)
+collectors/cups.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/cups.plugin/$(DEPDIR)
+ @: > collectors/cups.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/cups.plugin/cups_plugin.$(OBJEXT): \
+ collectors/cups.plugin/$(am__dirstamp) \
+ collectors/cups.plugin/$(DEPDIR)/$(am__dirstamp)
+
+cups.plugin$(EXEEXT): $(cups_plugin_OBJECTS) $(cups_plugin_DEPENDENCIES) $(EXTRA_cups_plugin_DEPENDENCIES)
+ @rm -f cups.plugin$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(cups_plugin_OBJECTS) $(cups_plugin_LDADD) $(LIBS)
+collectors/freeipmi.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/freeipmi.plugin
+ @: > collectors/freeipmi.plugin/$(am__dirstamp)
+collectors/freeipmi.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/freeipmi.plugin/$(DEPDIR)
+ @: > collectors/freeipmi.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/freeipmi.plugin/freeipmi_plugin.$(OBJEXT): \
+ collectors/freeipmi.plugin/$(am__dirstamp) \
+ collectors/freeipmi.plugin/$(DEPDIR)/$(am__dirstamp)
+
+freeipmi.plugin$(EXEEXT): $(freeipmi_plugin_OBJECTS) $(freeipmi_plugin_DEPENDENCIES) $(EXTRA_freeipmi_plugin_DEPENDENCIES)
+ @rm -f freeipmi.plugin$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(freeipmi_plugin_OBJECTS) $(freeipmi_plugin_LDADD) $(LIBS)
+daemon/$(am__dirstamp):
+ @$(MKDIR_P) daemon
+ @: > daemon/$(am__dirstamp)
+daemon/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) daemon/$(DEPDIR)
+ @: > daemon/$(DEPDIR)/$(am__dirstamp)
+daemon/common.$(OBJEXT): daemon/$(am__dirstamp) \
+ daemon/$(DEPDIR)/$(am__dirstamp)
+daemon/daemon.$(OBJEXT): daemon/$(am__dirstamp) \
+ daemon/$(DEPDIR)/$(am__dirstamp)
+daemon/global_statistics.$(OBJEXT): daemon/$(am__dirstamp) \
+ daemon/$(DEPDIR)/$(am__dirstamp)
+daemon/main.$(OBJEXT): daemon/$(am__dirstamp) \
+ daemon/$(DEPDIR)/$(am__dirstamp)
+daemon/signals.$(OBJEXT): daemon/$(am__dirstamp) \
+ daemon/$(DEPDIR)/$(am__dirstamp)
+daemon/unit_test.$(OBJEXT): daemon/$(am__dirstamp) \
+ daemon/$(DEPDIR)/$(am__dirstamp)
+web/api/badges/$(am__dirstamp):
+ @$(MKDIR_P) web/api/badges
+ @: > web/api/badges/$(am__dirstamp)
+web/api/badges/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/badges/$(DEPDIR)
+ @: > web/api/badges/$(DEPDIR)/$(am__dirstamp)
+web/api/badges/web_buffer_svg.$(OBJEXT): \
+ web/api/badges/$(am__dirstamp) \
+ web/api/badges/$(DEPDIR)/$(am__dirstamp)
+web/api/exporters/$(am__dirstamp):
+ @$(MKDIR_P) web/api/exporters
+ @: > web/api/exporters/$(am__dirstamp)
+web/api/exporters/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/exporters/$(DEPDIR)
+ @: > web/api/exporters/$(DEPDIR)/$(am__dirstamp)
+web/api/exporters/allmetrics.$(OBJEXT): \
+ web/api/exporters/$(am__dirstamp) \
+ web/api/exporters/$(DEPDIR)/$(am__dirstamp)
+web/api/exporters/shell/$(am__dirstamp):
+ @$(MKDIR_P) web/api/exporters/shell
+ @: > web/api/exporters/shell/$(am__dirstamp)
+web/api/exporters/shell/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/exporters/shell/$(DEPDIR)
+ @: > web/api/exporters/shell/$(DEPDIR)/$(am__dirstamp)
+web/api/exporters/shell/allmetrics_shell.$(OBJEXT): \
+ web/api/exporters/shell/$(am__dirstamp) \
+ web/api/exporters/shell/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/average/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/average
+ @: > web/api/queries/average/$(am__dirstamp)
+web/api/queries/average/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/average/$(DEPDIR)
+ @: > web/api/queries/average/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/average/average.$(OBJEXT): \
+ web/api/queries/average/$(am__dirstamp) \
+ web/api/queries/average/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/des/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/des
+ @: > web/api/queries/des/$(am__dirstamp)
+web/api/queries/des/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/des/$(DEPDIR)
+ @: > web/api/queries/des/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/des/des.$(OBJEXT): \
+ web/api/queries/des/$(am__dirstamp) \
+ web/api/queries/des/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/incremental_sum/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/incremental_sum
+ @: > web/api/queries/incremental_sum/$(am__dirstamp)
+web/api/queries/incremental_sum/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/incremental_sum/$(DEPDIR)
+ @: > web/api/queries/incremental_sum/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/incremental_sum/incremental_sum.$(OBJEXT): \
+ web/api/queries/incremental_sum/$(am__dirstamp) \
+ web/api/queries/incremental_sum/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/max/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/max
+ @: > web/api/queries/max/$(am__dirstamp)
+web/api/queries/max/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/max/$(DEPDIR)
+ @: > web/api/queries/max/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/max/max.$(OBJEXT): \
+ web/api/queries/max/$(am__dirstamp) \
+ web/api/queries/max/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/median/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/median
+ @: > web/api/queries/median/$(am__dirstamp)
+web/api/queries/median/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/median/$(DEPDIR)
+ @: > web/api/queries/median/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/median/median.$(OBJEXT): \
+ web/api/queries/median/$(am__dirstamp) \
+ web/api/queries/median/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/min/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/min
+ @: > web/api/queries/min/$(am__dirstamp)
+web/api/queries/min/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/min/$(DEPDIR)
+ @: > web/api/queries/min/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/min/min.$(OBJEXT): \
+ web/api/queries/min/$(am__dirstamp) \
+ web/api/queries/min/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries
+ @: > web/api/queries/$(am__dirstamp)
+web/api/queries/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/$(DEPDIR)
+ @: > web/api/queries/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/query.$(OBJEXT): web/api/queries/$(am__dirstamp) \
+ web/api/queries/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/rrdr.$(OBJEXT): web/api/queries/$(am__dirstamp) \
+ web/api/queries/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/ses/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/ses
+ @: > web/api/queries/ses/$(am__dirstamp)
+web/api/queries/ses/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/ses/$(DEPDIR)
+ @: > web/api/queries/ses/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/ses/ses.$(OBJEXT): \
+ web/api/queries/ses/$(am__dirstamp) \
+ web/api/queries/ses/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/stddev/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/stddev
+ @: > web/api/queries/stddev/$(am__dirstamp)
+web/api/queries/stddev/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/stddev/$(DEPDIR)
+ @: > web/api/queries/stddev/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/stddev/stddev.$(OBJEXT): \
+ web/api/queries/stddev/$(am__dirstamp) \
+ web/api/queries/stddev/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/sum/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/sum
+ @: > web/api/queries/sum/$(am__dirstamp)
+web/api/queries/sum/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/queries/sum/$(DEPDIR)
+ @: > web/api/queries/sum/$(DEPDIR)/$(am__dirstamp)
+web/api/queries/sum/sum.$(OBJEXT): \
+ web/api/queries/sum/$(am__dirstamp) \
+ web/api/queries/sum/$(DEPDIR)/$(am__dirstamp)
+web/api/formatters/$(am__dirstamp):
+ @$(MKDIR_P) web/api/formatters
+ @: > web/api/formatters/$(am__dirstamp)
+web/api/formatters/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/formatters/$(DEPDIR)
+ @: > web/api/formatters/$(DEPDIR)/$(am__dirstamp)
+web/api/formatters/rrd2json.$(OBJEXT): \
+ web/api/formatters/$(am__dirstamp) \
+ web/api/formatters/$(DEPDIR)/$(am__dirstamp)
+web/api/formatters/csv/$(am__dirstamp):
+ @$(MKDIR_P) web/api/formatters/csv
+ @: > web/api/formatters/csv/$(am__dirstamp)
+web/api/formatters/csv/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/formatters/csv/$(DEPDIR)
+ @: > web/api/formatters/csv/$(DEPDIR)/$(am__dirstamp)
+web/api/formatters/csv/csv.$(OBJEXT): \
+ web/api/formatters/csv/$(am__dirstamp) \
+ web/api/formatters/csv/$(DEPDIR)/$(am__dirstamp)
+web/api/formatters/json/$(am__dirstamp):
+ @$(MKDIR_P) web/api/formatters/json
+ @: > web/api/formatters/json/$(am__dirstamp)
+web/api/formatters/json/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/formatters/json/$(DEPDIR)
+ @: > web/api/formatters/json/$(DEPDIR)/$(am__dirstamp)
+web/api/formatters/json/json.$(OBJEXT): \
+ web/api/formatters/json/$(am__dirstamp) \
+ web/api/formatters/json/$(DEPDIR)/$(am__dirstamp)
+web/api/formatters/ssv/$(am__dirstamp):
+ @$(MKDIR_P) web/api/formatters/ssv
+ @: > web/api/formatters/ssv/$(am__dirstamp)
+web/api/formatters/ssv/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/formatters/ssv/$(DEPDIR)
+ @: > web/api/formatters/ssv/$(DEPDIR)/$(am__dirstamp)
+web/api/formatters/ssv/ssv.$(OBJEXT): \
+ web/api/formatters/ssv/$(am__dirstamp) \
+ web/api/formatters/ssv/$(DEPDIR)/$(am__dirstamp)
+web/api/formatters/value/$(am__dirstamp):
+ @$(MKDIR_P) web/api/formatters/value
+ @: > web/api/formatters/value/$(am__dirstamp)
+web/api/formatters/value/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/formatters/value/$(DEPDIR)
+ @: > web/api/formatters/value/$(DEPDIR)/$(am__dirstamp)
+web/api/formatters/value/value.$(OBJEXT): \
+ web/api/formatters/value/$(am__dirstamp) \
+ web/api/formatters/value/$(DEPDIR)/$(am__dirstamp)
+web/api/formatters/json_wrapper.$(OBJEXT): \
+ web/api/formatters/$(am__dirstamp) \
+ web/api/formatters/$(DEPDIR)/$(am__dirstamp)
+web/api/formatters/charts2json.$(OBJEXT): \
+ web/api/formatters/$(am__dirstamp) \
+ web/api/formatters/$(DEPDIR)/$(am__dirstamp)
+web/api/formatters/rrdset2json.$(OBJEXT): \
+ web/api/formatters/$(am__dirstamp) \
+ web/api/formatters/$(DEPDIR)/$(am__dirstamp)
+web/api/health/$(am__dirstamp):
+ @$(MKDIR_P) web/api/health
+ @: > web/api/health/$(am__dirstamp)
+web/api/health/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/health/$(DEPDIR)
+ @: > web/api/health/$(DEPDIR)/$(am__dirstamp)
+web/api/health/health_cmdapi.$(OBJEXT): \
+ web/api/health/$(am__dirstamp) \
+ web/api/health/$(DEPDIR)/$(am__dirstamp)
+web/api/$(am__dirstamp):
+ @$(MKDIR_P) web/api
+ @: > web/api/$(am__dirstamp)
+web/api/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/api/$(DEPDIR)
+ @: > web/api/$(DEPDIR)/$(am__dirstamp)
+web/api/web_api_v1.$(OBJEXT): web/api/$(am__dirstamp) \
+ web/api/$(DEPDIR)/$(am__dirstamp)
+backends/$(am__dirstamp):
+ @$(MKDIR_P) backends
+ @: > backends/$(am__dirstamp)
+backends/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) backends/$(DEPDIR)
+ @: > backends/$(DEPDIR)/$(am__dirstamp)
+backends/backends.$(OBJEXT): backends/$(am__dirstamp) \
+ backends/$(DEPDIR)/$(am__dirstamp)
+backends/graphite/$(am__dirstamp):
+ @$(MKDIR_P) backends/graphite
+ @: > backends/graphite/$(am__dirstamp)
+backends/graphite/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) backends/graphite/$(DEPDIR)
+ @: > backends/graphite/$(DEPDIR)/$(am__dirstamp)
+backends/graphite/graphite.$(OBJEXT): \
+ backends/graphite/$(am__dirstamp) \
+ backends/graphite/$(DEPDIR)/$(am__dirstamp)
+backends/json/$(am__dirstamp):
+ @$(MKDIR_P) backends/json
+ @: > backends/json/$(am__dirstamp)
+backends/json/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) backends/json/$(DEPDIR)
+ @: > backends/json/$(DEPDIR)/$(am__dirstamp)
+backends/json/json.$(OBJEXT): backends/json/$(am__dirstamp) \
+ backends/json/$(DEPDIR)/$(am__dirstamp)
+backends/opentsdb/$(am__dirstamp):
+ @$(MKDIR_P) backends/opentsdb
+ @: > backends/opentsdb/$(am__dirstamp)
+backends/opentsdb/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) backends/opentsdb/$(DEPDIR)
+ @: > backends/opentsdb/$(DEPDIR)/$(am__dirstamp)
+backends/opentsdb/opentsdb.$(OBJEXT): \
+ backends/opentsdb/$(am__dirstamp) \
+ backends/opentsdb/$(DEPDIR)/$(am__dirstamp)
+backends/prometheus/$(am__dirstamp):
+ @$(MKDIR_P) backends/prometheus
+ @: > backends/prometheus/$(am__dirstamp)
+backends/prometheus/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) backends/prometheus/$(DEPDIR)
+ @: > backends/prometheus/$(DEPDIR)/$(am__dirstamp)
+backends/prometheus/backend_prometheus.$(OBJEXT): \
+ backends/prometheus/$(am__dirstamp) \
+ backends/prometheus/$(DEPDIR)/$(am__dirstamp)
+collectors/checks.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/checks.plugin
+ @: > collectors/checks.plugin/$(am__dirstamp)
+collectors/checks.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/checks.plugin/$(DEPDIR)
+ @: > collectors/checks.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/checks.plugin/plugin_checks.$(OBJEXT): \
+ collectors/checks.plugin/$(am__dirstamp) \
+ collectors/checks.plugin/$(DEPDIR)/$(am__dirstamp)
+health/$(am__dirstamp):
+ @$(MKDIR_P) health
+ @: > health/$(am__dirstamp)
+health/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) health/$(DEPDIR)
+ @: > health/$(DEPDIR)/$(am__dirstamp)
+health/health.$(OBJEXT): health/$(am__dirstamp) \
+ health/$(DEPDIR)/$(am__dirstamp)
+health/health_config.$(OBJEXT): health/$(am__dirstamp) \
+ health/$(DEPDIR)/$(am__dirstamp)
+health/health_json.$(OBJEXT): health/$(am__dirstamp) \
+ health/$(DEPDIR)/$(am__dirstamp)
+health/health_log.$(OBJEXT): health/$(am__dirstamp) \
+ health/$(DEPDIR)/$(am__dirstamp)
+collectors/idlejitter.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/idlejitter.plugin
+ @: > collectors/idlejitter.plugin/$(am__dirstamp)
+collectors/idlejitter.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/idlejitter.plugin/$(DEPDIR)
+ @: > collectors/idlejitter.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/idlejitter.plugin/plugin_idlejitter.$(OBJEXT): \
+ collectors/idlejitter.plugin/$(am__dirstamp) \
+ collectors/idlejitter.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/plugins.d/$(am__dirstamp):
+ @$(MKDIR_P) collectors/plugins.d
+ @: > collectors/plugins.d/$(am__dirstamp)
+collectors/plugins.d/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/plugins.d/$(DEPDIR)
+ @: > collectors/plugins.d/$(DEPDIR)/$(am__dirstamp)
+collectors/plugins.d/plugins_d.$(OBJEXT): \
+ collectors/plugins.d/$(am__dirstamp) \
+ collectors/plugins.d/$(DEPDIR)/$(am__dirstamp)
+registry/$(am__dirstamp):
+ @$(MKDIR_P) registry
+ @: > registry/$(am__dirstamp)
+registry/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) registry/$(DEPDIR)
+ @: > registry/$(DEPDIR)/$(am__dirstamp)
+registry/registry.$(OBJEXT): registry/$(am__dirstamp) \
+ registry/$(DEPDIR)/$(am__dirstamp)
+registry/registry_db.$(OBJEXT): registry/$(am__dirstamp) \
+ registry/$(DEPDIR)/$(am__dirstamp)
+registry/registry_init.$(OBJEXT): registry/$(am__dirstamp) \
+ registry/$(DEPDIR)/$(am__dirstamp)
+registry/registry_internals.$(OBJEXT): registry/$(am__dirstamp) \
+ registry/$(DEPDIR)/$(am__dirstamp)
+registry/registry_log.$(OBJEXT): registry/$(am__dirstamp) \
+ registry/$(DEPDIR)/$(am__dirstamp)
+registry/registry_machine.$(OBJEXT): registry/$(am__dirstamp) \
+ registry/$(DEPDIR)/$(am__dirstamp)
+registry/registry_person.$(OBJEXT): registry/$(am__dirstamp) \
+ registry/$(DEPDIR)/$(am__dirstamp)
+registry/registry_url.$(OBJEXT): registry/$(am__dirstamp) \
+ registry/$(DEPDIR)/$(am__dirstamp)
+database/$(am__dirstamp):
+ @$(MKDIR_P) database
+ @: > database/$(am__dirstamp)
+database/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) database/$(DEPDIR)
+ @: > database/$(DEPDIR)/$(am__dirstamp)
+database/rrdcalc.$(OBJEXT): database/$(am__dirstamp) \
+ database/$(DEPDIR)/$(am__dirstamp)
+database/rrdcalctemplate.$(OBJEXT): database/$(am__dirstamp) \
+ database/$(DEPDIR)/$(am__dirstamp)
+database/rrddim.$(OBJEXT): database/$(am__dirstamp) \
+ database/$(DEPDIR)/$(am__dirstamp)
+database/rrddimvar.$(OBJEXT): database/$(am__dirstamp) \
+ database/$(DEPDIR)/$(am__dirstamp)
+database/rrdfamily.$(OBJEXT): database/$(am__dirstamp) \
+ database/$(DEPDIR)/$(am__dirstamp)
+database/rrdhost.$(OBJEXT): database/$(am__dirstamp) \
+ database/$(DEPDIR)/$(am__dirstamp)
+database/rrd.$(OBJEXT): database/$(am__dirstamp) \
+ database/$(DEPDIR)/$(am__dirstamp)
+database/rrdset.$(OBJEXT): database/$(am__dirstamp) \
+ database/$(DEPDIR)/$(am__dirstamp)
+database/rrdsetvar.$(OBJEXT): database/$(am__dirstamp) \
+ database/$(DEPDIR)/$(am__dirstamp)
+database/rrdvar.$(OBJEXT): database/$(am__dirstamp) \
+ database/$(DEPDIR)/$(am__dirstamp)
+database/engine/$(am__dirstamp):
+ @$(MKDIR_P) database/engine
+ @: > database/engine/$(am__dirstamp)
+database/engine/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) database/engine/$(DEPDIR)
+ @: > database/engine/$(DEPDIR)/$(am__dirstamp)
+database/engine/rrdengine.$(OBJEXT): database/engine/$(am__dirstamp) \
+ database/engine/$(DEPDIR)/$(am__dirstamp)
+database/engine/datafile.$(OBJEXT): database/engine/$(am__dirstamp) \
+ database/engine/$(DEPDIR)/$(am__dirstamp)
+database/engine/journalfile.$(OBJEXT): \
+ database/engine/$(am__dirstamp) \
+ database/engine/$(DEPDIR)/$(am__dirstamp)
+database/engine/rrdenginelib.$(OBJEXT): \
+ database/engine/$(am__dirstamp) \
+ database/engine/$(DEPDIR)/$(am__dirstamp)
+database/engine/rrdengineapi.$(OBJEXT): \
+ database/engine/$(am__dirstamp) \
+ database/engine/$(DEPDIR)/$(am__dirstamp)
+database/engine/pagecache.$(OBJEXT): database/engine/$(am__dirstamp) \
+ database/engine/$(DEPDIR)/$(am__dirstamp)
+database/engine/rrdenglocking.$(OBJEXT): \
+ database/engine/$(am__dirstamp) \
+ database/engine/$(DEPDIR)/$(am__dirstamp)
+streaming/$(am__dirstamp):
+ @$(MKDIR_P) streaming
+ @: > streaming/$(am__dirstamp)
+streaming/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) streaming/$(DEPDIR)
+ @: > streaming/$(DEPDIR)/$(am__dirstamp)
+streaming/rrdpush.$(OBJEXT): streaming/$(am__dirstamp) \
+ streaming/$(DEPDIR)/$(am__dirstamp)
+collectors/statsd.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/statsd.plugin
+ @: > collectors/statsd.plugin/$(am__dirstamp)
+collectors/statsd.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/statsd.plugin/$(DEPDIR)
+ @: > collectors/statsd.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/statsd.plugin/statsd.$(OBJEXT): \
+ collectors/statsd.plugin/$(am__dirstamp) \
+ collectors/statsd.plugin/$(DEPDIR)/$(am__dirstamp)
+web/server/$(am__dirstamp):
+ @$(MKDIR_P) web/server
+ @: > web/server/$(am__dirstamp)
+web/server/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/server/$(DEPDIR)
+ @: > web/server/$(DEPDIR)/$(am__dirstamp)
+web/server/web_client.$(OBJEXT): web/server/$(am__dirstamp) \
+ web/server/$(DEPDIR)/$(am__dirstamp)
+web/server/web_server.$(OBJEXT): web/server/$(am__dirstamp) \
+ web/server/$(DEPDIR)/$(am__dirstamp)
+web/server/web_client_cache.$(OBJEXT): web/server/$(am__dirstamp) \
+ web/server/$(DEPDIR)/$(am__dirstamp)
+web/server/static/$(am__dirstamp):
+ @$(MKDIR_P) web/server/static
+ @: > web/server/static/$(am__dirstamp)
+web/server/static/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) web/server/static/$(DEPDIR)
+ @: > web/server/static/$(DEPDIR)/$(am__dirstamp)
+web/server/static/static-threaded.$(OBJEXT): \
+ web/server/static/$(am__dirstamp) \
+ web/server/static/$(DEPDIR)/$(am__dirstamp)
+collectors/freebsd.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/freebsd.plugin
+ @: > collectors/freebsd.plugin/$(am__dirstamp)
+collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/freebsd.plugin/$(DEPDIR)
+ @: > collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/freebsd.plugin/plugin_freebsd.$(OBJEXT): \
+ collectors/freebsd.plugin/$(am__dirstamp) \
+ collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/freebsd.plugin/freebsd_sysctl.$(OBJEXT): \
+ collectors/freebsd.plugin/$(am__dirstamp) \
+ collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/freebsd.plugin/freebsd_getmntinfo.$(OBJEXT): \
+ collectors/freebsd.plugin/$(am__dirstamp) \
+ collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/freebsd.plugin/freebsd_getifaddrs.$(OBJEXT): \
+ collectors/freebsd.plugin/$(am__dirstamp) \
+ collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/freebsd.plugin/freebsd_devstat.$(OBJEXT): \
+ collectors/freebsd.plugin/$(am__dirstamp) \
+ collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/freebsd.plugin/freebsd_kstat_zfs.$(OBJEXT): \
+ collectors/freebsd.plugin/$(am__dirstamp) \
+ collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/freebsd.plugin/freebsd_ipfw.$(OBJEXT): \
+ collectors/freebsd.plugin/$(am__dirstamp) \
+ collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/proc.plugin
+ @: > collectors/proc.plugin/$(am__dirstamp)
+collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/proc.plugin/$(DEPDIR)
+ @: > collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/zfs_common.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/macos.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/macos.plugin
+ @: > collectors/macos.plugin/$(am__dirstamp)
+collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/macos.plugin/$(DEPDIR)
+ @: > collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/macos.plugin/plugin_macos.$(OBJEXT): \
+ collectors/macos.plugin/$(am__dirstamp) \
+ collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/macos.plugin/macos_sysctl.$(OBJEXT): \
+ collectors/macos.plugin/$(am__dirstamp) \
+ collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/macos.plugin/macos_mach_smi.$(OBJEXT): \
+ collectors/macos.plugin/$(am__dirstamp) \
+ collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/macos.plugin/macos_fw.$(OBJEXT): \
+ collectors/macos.plugin/$(am__dirstamp) \
+ collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/cgroups.plugin/sys_fs_cgroup.$(OBJEXT): \
+ collectors/cgroups.plugin/$(am__dirstamp) \
+ collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/diskspace.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/diskspace.plugin
+ @: > collectors/diskspace.plugin/$(am__dirstamp)
+collectors/diskspace.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/diskspace.plugin/$(DEPDIR)
+ @: > collectors/diskspace.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/diskspace.plugin/plugin_diskspace.$(OBJEXT): \
+ collectors/diskspace.plugin/$(am__dirstamp) \
+ collectors/diskspace.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/ipc.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/plugin_proc.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_diskstats.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_mdstat.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_interrupts.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_softirqs.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_loadavg.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_meminfo.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_net_dev.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_net_ip_vs_stats.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_net_netstat.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_net_rpc_nfs.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_net_rpc_nfsd.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_net_snmp.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_net_snmp6.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_net_sctp_snmp.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_net_sockstat.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_net_sockstat6.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_net_softnet_stat.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_net_stat_conntrack.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_net_stat_synproxy.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_self_mountinfo.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_spl_kstat_zfs.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_stat.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_vmstat.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/proc_uptime.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/sys_kernel_mm_ksm.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/sys_block_zram.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/sys_devices_system_edac_mc.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/sys_devices_system_node.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/sys_fs_btrfs.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/proc.plugin/sys_class_power_supply.$(OBJEXT): \
+ collectors/proc.plugin/$(am__dirstamp) \
+ collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/tc.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/tc.plugin
+ @: > collectors/tc.plugin/$(am__dirstamp)
+collectors/tc.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/tc.plugin/$(DEPDIR)
+ @: > collectors/tc.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/tc.plugin/plugin_tc.$(OBJEXT): \
+ collectors/tc.plugin/$(am__dirstamp) \
+ collectors/tc.plugin/$(DEPDIR)/$(am__dirstamp)
+backends/aws_kinesis/$(am__dirstamp):
+ @$(MKDIR_P) backends/aws_kinesis
+ @: > backends/aws_kinesis/$(am__dirstamp)
+backends/aws_kinesis/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) backends/aws_kinesis/$(DEPDIR)
+ @: > backends/aws_kinesis/$(DEPDIR)/$(am__dirstamp)
+backends/aws_kinesis/aws_kinesis.$(OBJEXT): \
+ backends/aws_kinesis/$(am__dirstamp) \
+ backends/aws_kinesis/$(DEPDIR)/$(am__dirstamp)
+backends/aws_kinesis/aws_kinesis_put_record.$(OBJEXT): \
+ backends/aws_kinesis/$(am__dirstamp) \
+ backends/aws_kinesis/$(DEPDIR)/$(am__dirstamp)
+backends/prometheus/remote_write/$(am__dirstamp):
+ @$(MKDIR_P) backends/prometheus/remote_write
+ @: > backends/prometheus/remote_write/$(am__dirstamp)
+backends/prometheus/remote_write/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) backends/prometheus/remote_write/$(DEPDIR)
+ @: > backends/prometheus/remote_write/$(DEPDIR)/$(am__dirstamp)
+backends/prometheus/remote_write/remote_write.$(OBJEXT): \
+ backends/prometheus/remote_write/$(am__dirstamp) \
+ backends/prometheus/remote_write/$(DEPDIR)/$(am__dirstamp)
+backends/mongodb/$(am__dirstamp):
+ @$(MKDIR_P) backends/mongodb
+ @: > backends/mongodb/$(am__dirstamp)
+backends/mongodb/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) backends/mongodb/$(DEPDIR)
+ @: > backends/mongodb/$(DEPDIR)/$(am__dirstamp)
+backends/mongodb/mongodb.$(OBJEXT): backends/mongodb/$(am__dirstamp) \
+ backends/mongodb/$(DEPDIR)/$(am__dirstamp)
+backends/prometheus/remote_write/remote_write.pb.$(OBJEXT): \
+ backends/prometheus/remote_write/$(am__dirstamp) \
+ backends/prometheus/remote_write/$(DEPDIR)/$(am__dirstamp)
+
+netdata$(EXEEXT): $(netdata_OBJECTS) $(netdata_DEPENDENCIES) $(EXTRA_netdata_DEPENDENCIES)
+ @rm -f netdata$(EXEEXT)
+ $(AM_V_GEN)$(netdata_LINK) $(netdata_OBJECTS) $(netdata_LDADD) $(LIBS)
+collectors/nfacct.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/nfacct.plugin
+ @: > collectors/nfacct.plugin/$(am__dirstamp)
+collectors/nfacct.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/nfacct.plugin/$(DEPDIR)
+ @: > collectors/nfacct.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/nfacct.plugin/plugin_nfacct.$(OBJEXT): \
+ collectors/nfacct.plugin/$(am__dirstamp) \
+ collectors/nfacct.plugin/$(DEPDIR)/$(am__dirstamp)
+
+nfacct.plugin$(EXEEXT): $(nfacct_plugin_OBJECTS) $(nfacct_plugin_DEPENDENCIES) $(EXTRA_nfacct_plugin_DEPENDENCIES)
+ @rm -f nfacct.plugin$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(nfacct_plugin_OBJECTS) $(nfacct_plugin_LDADD) $(LIBS)
+collectors/perf.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/perf.plugin
+ @: > collectors/perf.plugin/$(am__dirstamp)
+collectors/perf.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/perf.plugin/$(DEPDIR)
+ @: > collectors/perf.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/perf.plugin/perf_plugin.$(OBJEXT): \
+ collectors/perf.plugin/$(am__dirstamp) \
+ collectors/perf.plugin/$(DEPDIR)/$(am__dirstamp)
+
+perf.plugin$(EXEEXT): $(perf_plugin_OBJECTS) $(perf_plugin_DEPENDENCIES) $(EXTRA_perf_plugin_DEPENDENCIES)
+ @rm -f perf.plugin$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(perf_plugin_OBJECTS) $(perf_plugin_LDADD) $(LIBS)
+collectors/xenstat.plugin/$(am__dirstamp):
+ @$(MKDIR_P) collectors/xenstat.plugin
+ @: > collectors/xenstat.plugin/$(am__dirstamp)
+collectors/xenstat.plugin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) collectors/xenstat.plugin/$(DEPDIR)
+ @: > collectors/xenstat.plugin/$(DEPDIR)/$(am__dirstamp)
+collectors/xenstat.plugin/xenstat_plugin.$(OBJEXT): \
+ collectors/xenstat.plugin/$(am__dirstamp) \
+ collectors/xenstat.plugin/$(DEPDIR)/$(am__dirstamp)
+
+xenstat.plugin$(EXEEXT): $(xenstat_plugin_OBJECTS) $(xenstat_plugin_DEPENDENCIES) $(EXTRA_xenstat_plugin_DEPENDENCIES)
+ @rm -f xenstat.plugin$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(xenstat_plugin_OBJECTS) $(xenstat_plugin_LDADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+ -rm -f backends/*.$(OBJEXT)
+ -rm -f backends/aws_kinesis/*.$(OBJEXT)
+ -rm -f backends/graphite/*.$(OBJEXT)
+ -rm -f backends/json/*.$(OBJEXT)
+ -rm -f backends/mongodb/*.$(OBJEXT)
+ -rm -f backends/opentsdb/*.$(OBJEXT)
+ -rm -f backends/prometheus/*.$(OBJEXT)
+ -rm -f backends/prometheus/remote_write/*.$(OBJEXT)
+ -rm -f collectors/apps.plugin/*.$(OBJEXT)
+ -rm -f collectors/cgroups.plugin/*.$(OBJEXT)
+ -rm -f collectors/checks.plugin/*.$(OBJEXT)
+ -rm -f collectors/cups.plugin/*.$(OBJEXT)
+ -rm -f collectors/diskspace.plugin/*.$(OBJEXT)
+ -rm -f collectors/freebsd.plugin/*.$(OBJEXT)
+ -rm -f collectors/freeipmi.plugin/*.$(OBJEXT)
+ -rm -f collectors/idlejitter.plugin/*.$(OBJEXT)
+ -rm -f collectors/macos.plugin/*.$(OBJEXT)
+ -rm -f collectors/nfacct.plugin/*.$(OBJEXT)
+ -rm -f collectors/perf.plugin/*.$(OBJEXT)
+ -rm -f collectors/plugins.d/*.$(OBJEXT)
+ -rm -f collectors/proc.plugin/*.$(OBJEXT)
+ -rm -f collectors/statsd.plugin/*.$(OBJEXT)
+ -rm -f collectors/tc.plugin/*.$(OBJEXT)
+ -rm -f collectors/xenstat.plugin/*.$(OBJEXT)
+ -rm -f daemon/*.$(OBJEXT)
+ -rm -f database/*.$(OBJEXT)
+ -rm -f database/engine/*.$(OBJEXT)
+ -rm -f health/*.$(OBJEXT)
+ -rm -f libnetdata/*.$(OBJEXT)
+ -rm -f libnetdata/adaptive_resortable_list/*.$(OBJEXT)
+ -rm -f libnetdata/avl/*.$(OBJEXT)
+ -rm -f libnetdata/buffer/*.$(OBJEXT)
+ -rm -f libnetdata/clocks/*.$(OBJEXT)
+ -rm -f libnetdata/config/*.$(OBJEXT)
+ -rm -f libnetdata/dictionary/*.$(OBJEXT)
+ -rm -f libnetdata/eval/*.$(OBJEXT)
+ -rm -f libnetdata/health/*.$(OBJEXT)
+ -rm -f libnetdata/json/*.$(OBJEXT)
+ -rm -f libnetdata/locks/*.$(OBJEXT)
+ -rm -f libnetdata/log/*.$(OBJEXT)
+ -rm -f libnetdata/popen/*.$(OBJEXT)
+ -rm -f libnetdata/procfile/*.$(OBJEXT)
+ -rm -f libnetdata/simple_pattern/*.$(OBJEXT)
+ -rm -f libnetdata/socket/*.$(OBJEXT)
+ -rm -f libnetdata/statistical/*.$(OBJEXT)
+ -rm -f libnetdata/storage_number/*.$(OBJEXT)
+ -rm -f libnetdata/threads/*.$(OBJEXT)
+ -rm -f libnetdata/url/*.$(OBJEXT)
+ -rm -f registry/*.$(OBJEXT)
+ -rm -f streaming/*.$(OBJEXT)
+ -rm -f web/api/*.$(OBJEXT)
+ -rm -f web/api/badges/*.$(OBJEXT)
+ -rm -f web/api/exporters/*.$(OBJEXT)
+ -rm -f web/api/exporters/shell/*.$(OBJEXT)
+ -rm -f web/api/formatters/*.$(OBJEXT)
+ -rm -f web/api/formatters/csv/*.$(OBJEXT)
+ -rm -f web/api/formatters/json/*.$(OBJEXT)
+ -rm -f web/api/formatters/ssv/*.$(OBJEXT)
+ -rm -f web/api/formatters/value/*.$(OBJEXT)
+ -rm -f web/api/health/*.$(OBJEXT)
+ -rm -f web/api/queries/*.$(OBJEXT)
+ -rm -f web/api/queries/average/*.$(OBJEXT)
+ -rm -f web/api/queries/des/*.$(OBJEXT)
+ -rm -f web/api/queries/incremental_sum/*.$(OBJEXT)
+ -rm -f web/api/queries/max/*.$(OBJEXT)
+ -rm -f web/api/queries/median/*.$(OBJEXT)
+ -rm -f web/api/queries/min/*.$(OBJEXT)
+ -rm -f web/api/queries/ses/*.$(OBJEXT)
+ -rm -f web/api/queries/stddev/*.$(OBJEXT)
+ -rm -f web/api/queries/sum/*.$(OBJEXT)
+ -rm -f web/server/*.$(OBJEXT)
+ -rm -f web/server/static/*.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@backends/$(DEPDIR)/backends.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@backends/aws_kinesis/$(DEPDIR)/aws_kinesis.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@backends/aws_kinesis/$(DEPDIR)/aws_kinesis_put_record.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@backends/graphite/$(DEPDIR)/graphite.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@backends/json/$(DEPDIR)/json.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@backends/mongodb/$(DEPDIR)/mongodb.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@backends/opentsdb/$(DEPDIR)/opentsdb.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@backends/prometheus/$(DEPDIR)/backend_prometheus.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@backends/prometheus/remote_write/$(DEPDIR)/remote_write.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@backends/prometheus/remote_write/$(DEPDIR)/remote_write.pb.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/apps.plugin/$(DEPDIR)/apps_plugin.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/cgroups.plugin/$(DEPDIR)/cgroup-network.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/cgroups.plugin/$(DEPDIR)/sys_fs_cgroup.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/checks.plugin/$(DEPDIR)/plugin_checks.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/cups.plugin/$(DEPDIR)/cups_plugin.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/diskspace.plugin/$(DEPDIR)/plugin_diskspace.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_devstat.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_getifaddrs.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_getmntinfo.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_ipfw.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_kstat_zfs.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_sysctl.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/plugin_freebsd.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/freeipmi.plugin/$(DEPDIR)/freeipmi_plugin.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/idlejitter.plugin/$(DEPDIR)/plugin_idlejitter.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/macos.plugin/$(DEPDIR)/macos_fw.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/macos.plugin/$(DEPDIR)/macos_mach_smi.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/macos.plugin/$(DEPDIR)/macos_sysctl.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/macos.plugin/$(DEPDIR)/plugin_macos.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/nfacct.plugin/$(DEPDIR)/plugin_nfacct.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/perf.plugin/$(DEPDIR)/perf_plugin.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/plugins.d/$(DEPDIR)/plugins_d.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/ipc.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/plugin_proc.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_diskstats.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_interrupts.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_loadavg.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_mdstat.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_meminfo.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_dev.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_ip_vs_stats.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_netstat.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_rpc_nfs.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_rpc_nfsd.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_sctp_snmp.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_snmp.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_snmp6.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_sockstat.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_sockstat6.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_softnet_stat.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_stat_conntrack.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_stat_synproxy.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_self_mountinfo.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_softirqs.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_spl_kstat_zfs.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_stat.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_sys_kernel_random_entropy_avail.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_uptime.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_vmstat.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_block_zram.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_class_power_supply.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_devices_system_edac_mc.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_devices_system_node.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_fs_btrfs.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_kernel_mm_ksm.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/zfs_common.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/statsd.plugin/$(DEPDIR)/statsd.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/tc.plugin/$(DEPDIR)/plugin_tc.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@collectors/xenstat.plugin/$(DEPDIR)/xenstat_plugin.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/common.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/daemon.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/global_statistics.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/main.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/signals.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/unit_test.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrd.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdcalc.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdcalctemplate.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrddim.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrddimvar.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdfamily.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdhost.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdset.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdsetvar.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdvar.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/engine/$(DEPDIR)/datafile.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/engine/$(DEPDIR)/journalfile.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/engine/$(DEPDIR)/pagecache.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/engine/$(DEPDIR)/rrdengine.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/engine/$(DEPDIR)/rrdengineapi.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/engine/$(DEPDIR)/rrdenginelib.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@database/engine/$(DEPDIR)/rrdenglocking.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@health/$(DEPDIR)/health.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@health/$(DEPDIR)/health_config.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@health/$(DEPDIR)/health_json.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@health/$(DEPDIR)/health_log.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/$(DEPDIR)/libnetdata.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/$(DEPDIR)/os.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/adaptive_resortable_list/$(DEPDIR)/adaptive_resortable_list.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/avl/$(DEPDIR)/avl.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/buffer/$(DEPDIR)/buffer.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/clocks/$(DEPDIR)/clocks.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/config/$(DEPDIR)/appconfig.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/dictionary/$(DEPDIR)/dictionary.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/eval/$(DEPDIR)/eval.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/health/$(DEPDIR)/health.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/json/$(DEPDIR)/jsmn.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/json/$(DEPDIR)/json.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/locks/$(DEPDIR)/locks.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/log/$(DEPDIR)/log.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/popen/$(DEPDIR)/popen.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/procfile/$(DEPDIR)/procfile.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/simple_pattern/$(DEPDIR)/simple_pattern.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/socket/$(DEPDIR)/security.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/socket/$(DEPDIR)/socket.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/statistical/$(DEPDIR)/statistical.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/storage_number/$(DEPDIR)/storage_number.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/threads/$(DEPDIR)/threads.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/url/$(DEPDIR)/url.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_db.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_init.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_internals.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_log.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_machine.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_person.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_url.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@streaming/$(DEPDIR)/rrdpush.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/$(DEPDIR)/web_api_v1.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/badges/$(DEPDIR)/web_buffer_svg.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/exporters/$(DEPDIR)/allmetrics.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/exporters/shell/$(DEPDIR)/allmetrics_shell.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/$(DEPDIR)/charts2json.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/$(DEPDIR)/json_wrapper.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/$(DEPDIR)/rrd2json.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/$(DEPDIR)/rrdset2json.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/csv/$(DEPDIR)/csv.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/json/$(DEPDIR)/json.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/ssv/$(DEPDIR)/ssv.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/value/$(DEPDIR)/value.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/health/$(DEPDIR)/health_cmdapi.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/$(DEPDIR)/query.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/$(DEPDIR)/rrdr.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/average/$(DEPDIR)/average.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/des/$(DEPDIR)/des.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/incremental_sum/$(DEPDIR)/incremental_sum.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/max/$(DEPDIR)/max.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/median/$(DEPDIR)/median.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/min/$(DEPDIR)/min.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/ses/$(DEPDIR)/ses.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/stddev/$(DEPDIR)/stddev.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/sum/$(DEPDIR)/sum.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/server/$(DEPDIR)/web_client.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/server/$(DEPDIR)/web_client_cache.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/server/$(DEPDIR)/web_server.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@web/server/static/$(DEPDIR)/static-threaded.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
+@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\
+@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.cc.o:
+@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\
+@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
+@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $<
+
+.cc.obj:
+@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\
+@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\
+@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+install-dist_cacheDATA: $(dist_cache_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_cache_DATA)'; test -n "$(cachedir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(cachedir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(cachedir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(cachedir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(cachedir)" || exit $$?; \
+ done
+
+uninstall-dist_cacheDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_cache_DATA)'; test -n "$(cachedir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(cachedir)'; $(am__uninstall_files_from_dir)
+install-dist_logDATA: $(dist_log_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_log_DATA)'; test -n "$(logdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(logdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(logdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(logdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(logdir)" || exit $$?; \
+ done
+
+uninstall-dist_logDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_log_DATA)'; test -n "$(logdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(logdir)'; $(am__uninstall_files_from_dir)
+install-dist_registryDATA: $(dist_registry_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_registry_DATA)'; test -n "$(registrydir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(registrydir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(registrydir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(registrydir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(registrydir)" || exit $$?; \
+ done
+
+uninstall-dist_registryDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_registry_DATA)'; test -n "$(registrydir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(registrydir)'; $(am__uninstall_files_from_dir)
+install-dist_varlibDATA: $(dist_varlib_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_varlib_DATA)'; test -n "$(varlibdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(varlibdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(varlibdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(varlibdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(varlibdir)" || exit $$?; \
+ done
+
+uninstall-dist_varlibDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_varlib_DATA)'; test -n "$(varlibdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(varlibdir)'; $(am__uninstall_files_from_dir)
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscope: cscope.files
+ test ! -s cscope.files \
+ || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS)
+clean-cscope:
+ -rm -f cscope.files
+cscope.files: clean-cscope cscopelist
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+ -rm -f cscope.out cscope.in.out cscope.po.out cscope.files
+
+distdir: $(DISTFILES)
+ $(am__remove_distdir)
+ test -d "$(distdir)" || mkdir "$(distdir)"
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+ -test -n "$(am__skip_mode_fix)" \
+ || find "$(distdir)" -type d ! -perm -755 \
+ -exec chmod u+rwx,go+rx {} \; -o \
+ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \
+ || chmod -R a+r "$(distdir)"
+dist-gzip: distdir
+ tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz
+ $(am__post_remove_distdir)
+
+dist-bzip2: distdir
+ tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2
+ $(am__post_remove_distdir)
+
+dist-lzip: distdir
+ tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz
+ $(am__post_remove_distdir)
+
+dist-xz: distdir
+ tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz
+ $(am__post_remove_distdir)
+
+dist-tarZ: distdir
+ @echo WARNING: "Support for distribution archives compressed with" \
+ "legacy program 'compress' is deprecated." >&2
+ @echo WARNING: "It will be removed altogether in Automake 2.0" >&2
+ tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
+ $(am__post_remove_distdir)
+
+dist-shar: distdir
+ @echo WARNING: "Support for shar distribution archives is" \
+ "deprecated." >&2
+ @echo WARNING: "It will be removed altogether in Automake 2.0" >&2
+ shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz
+ $(am__post_remove_distdir)
+
+dist-zip: distdir
+ -rm -f $(distdir).zip
+ zip -rq $(distdir).zip $(distdir)
+ $(am__post_remove_distdir)
+
+dist dist-all:
+ $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:'
+ $(am__post_remove_distdir)
+
+# This target untars the dist file and tries a VPATH configuration. Then
+# it guarantees that the distribution is self-contained by making another
+# tarfile.
+distcheck: dist
+ case '$(DIST_ARCHIVES)' in \
+ *.tar.gz*) \
+ eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\
+ *.tar.bz2*) \
+ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\
+ *.tar.lz*) \
+ lzip -dc $(distdir).tar.lz | $(am__untar) ;;\
+ *.tar.xz*) \
+ xz -dc $(distdir).tar.xz | $(am__untar) ;;\
+ *.tar.Z*) \
+ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\
+ *.shar.gz*) \
+ eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\
+ *.zip*) \
+ unzip $(distdir).zip ;;\
+ esac
+ chmod -R a-w $(distdir)
+ chmod u+w $(distdir)
+ mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst
+ chmod a-w $(distdir)
+ test -d $(distdir)/_build || exit 0; \
+ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
+ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
+ && am__cwd=`pwd` \
+ && $(am__cd) $(distdir)/_build/sub \
+ && ../../configure \
+ $(AM_DISTCHECK_CONFIGURE_FLAGS) \
+ $(DISTCHECK_CONFIGURE_FLAGS) \
+ --srcdir=../.. --prefix="$$dc_install_base" \
+ && $(MAKE) $(AM_MAKEFLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) dvi \
+ && $(MAKE) $(AM_MAKEFLAGS) check \
+ && $(MAKE) $(AM_MAKEFLAGS) install \
+ && $(MAKE) $(AM_MAKEFLAGS) installcheck \
+ && $(MAKE) $(AM_MAKEFLAGS) uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \
+ distuninstallcheck \
+ && chmod -R a-w "$$dc_install_base" \
+ && ({ \
+ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \
+ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \
+ } || { rm -rf "$$dc_destdir"; exit 1; }) \
+ && rm -rf "$$dc_destdir" \
+ && $(MAKE) $(AM_MAKEFLAGS) dist \
+ && rm -rf $(DIST_ARCHIVES) \
+ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \
+ && cd "$$am__cwd" \
+ || exit 1
+ $(am__post_remove_distdir)
+ @(echo "$(distdir) archives ready for distribution: "; \
+ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \
+ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x'
+distuninstallcheck:
+ @test -n '$(distuninstallcheck_dir)' || { \
+ echo 'ERROR: trying to run $@ with an empty' \
+ '$$(distuninstallcheck_dir)' >&2; \
+ exit 1; \
+ }; \
+ $(am__cd) '$(distuninstallcheck_dir)' || { \
+ echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \
+ exit 1; \
+ }; \
+ test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \
+ || { echo "ERROR: files left after uninstall:" ; \
+ if test -n "$(DESTDIR)"; then \
+ echo " (check DESTDIR support)"; \
+ fi ; \
+ $(distuninstallcheck_listfiles) ; \
+ exit 1; } >&2
+distcleancheck: distclean
+ @if test '$(srcdir)' = . ; then \
+ echo "ERROR: distcleancheck can only run from a VPATH build" ; \
+ exit 1 ; \
+ fi
+ @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \
+ || { echo "ERROR: files left in build directory after distclean:" ; \
+ $(distcleancheck_listfiles) ; \
+ exit 1; } >&2
+check-am: all-am
+check: $(BUILT_SOURCES)
+ $(MAKE) $(AM_MAKEFLAGS) check-recursive
+all-am: Makefile $(PROGRAMS) $(SCRIPTS) $(DATA) config.h
+installdirs: installdirs-recursive
+installdirs-am:
+ for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(cachedir)" "$(DESTDIR)$(logdir)" "$(DESTDIR)$(registrydir)" "$(DESTDIR)$(varlibdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: $(BUILT_SOURCES)
+ $(MAKE) $(AM_MAKEFLAGS) install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+ -rm -f backends/$(DEPDIR)/$(am__dirstamp)
+ -rm -f backends/$(am__dirstamp)
+ -rm -f backends/aws_kinesis/$(DEPDIR)/$(am__dirstamp)
+ -rm -f backends/aws_kinesis/$(am__dirstamp)
+ -rm -f backends/graphite/$(DEPDIR)/$(am__dirstamp)
+ -rm -f backends/graphite/$(am__dirstamp)
+ -rm -f backends/json/$(DEPDIR)/$(am__dirstamp)
+ -rm -f backends/json/$(am__dirstamp)
+ -rm -f backends/mongodb/$(DEPDIR)/$(am__dirstamp)
+ -rm -f backends/mongodb/$(am__dirstamp)
+ -rm -f backends/opentsdb/$(DEPDIR)/$(am__dirstamp)
+ -rm -f backends/opentsdb/$(am__dirstamp)
+ -rm -f backends/prometheus/$(DEPDIR)/$(am__dirstamp)
+ -rm -f backends/prometheus/$(am__dirstamp)
+ -rm -f backends/prometheus/remote_write/$(DEPDIR)/$(am__dirstamp)
+ -rm -f backends/prometheus/remote_write/$(am__dirstamp)
+ -rm -f collectors/apps.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/apps.plugin/$(am__dirstamp)
+ -rm -f collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/cgroups.plugin/$(am__dirstamp)
+ -rm -f collectors/checks.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/checks.plugin/$(am__dirstamp)
+ -rm -f collectors/cups.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/cups.plugin/$(am__dirstamp)
+ -rm -f collectors/diskspace.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/diskspace.plugin/$(am__dirstamp)
+ -rm -f collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/freebsd.plugin/$(am__dirstamp)
+ -rm -f collectors/freeipmi.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/freeipmi.plugin/$(am__dirstamp)
+ -rm -f collectors/idlejitter.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/idlejitter.plugin/$(am__dirstamp)
+ -rm -f collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/macos.plugin/$(am__dirstamp)
+ -rm -f collectors/nfacct.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/nfacct.plugin/$(am__dirstamp)
+ -rm -f collectors/perf.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/perf.plugin/$(am__dirstamp)
+ -rm -f collectors/plugins.d/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/plugins.d/$(am__dirstamp)
+ -rm -f collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/proc.plugin/$(am__dirstamp)
+ -rm -f collectors/statsd.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/statsd.plugin/$(am__dirstamp)
+ -rm -f collectors/tc.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/tc.plugin/$(am__dirstamp)
+ -rm -f collectors/xenstat.plugin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f collectors/xenstat.plugin/$(am__dirstamp)
+ -rm -f daemon/$(DEPDIR)/$(am__dirstamp)
+ -rm -f daemon/$(am__dirstamp)
+ -rm -f database/$(DEPDIR)/$(am__dirstamp)
+ -rm -f database/$(am__dirstamp)
+ -rm -f database/engine/$(DEPDIR)/$(am__dirstamp)
+ -rm -f database/engine/$(am__dirstamp)
+ -rm -f health/$(DEPDIR)/$(am__dirstamp)
+ -rm -f health/$(am__dirstamp)
+ -rm -f libnetdata/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/$(am__dirstamp)
+ -rm -f libnetdata/adaptive_resortable_list/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/adaptive_resortable_list/$(am__dirstamp)
+ -rm -f libnetdata/avl/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/avl/$(am__dirstamp)
+ -rm -f libnetdata/buffer/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/buffer/$(am__dirstamp)
+ -rm -f libnetdata/clocks/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/clocks/$(am__dirstamp)
+ -rm -f libnetdata/config/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/config/$(am__dirstamp)
+ -rm -f libnetdata/dictionary/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/dictionary/$(am__dirstamp)
+ -rm -f libnetdata/eval/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/eval/$(am__dirstamp)
+ -rm -f libnetdata/health/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/health/$(am__dirstamp)
+ -rm -f libnetdata/json/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/json/$(am__dirstamp)
+ -rm -f libnetdata/locks/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/locks/$(am__dirstamp)
+ -rm -f libnetdata/log/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/log/$(am__dirstamp)
+ -rm -f libnetdata/popen/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/popen/$(am__dirstamp)
+ -rm -f libnetdata/procfile/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/procfile/$(am__dirstamp)
+ -rm -f libnetdata/simple_pattern/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/simple_pattern/$(am__dirstamp)
+ -rm -f libnetdata/socket/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/socket/$(am__dirstamp)
+ -rm -f libnetdata/statistical/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/statistical/$(am__dirstamp)
+ -rm -f libnetdata/storage_number/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/storage_number/$(am__dirstamp)
+ -rm -f libnetdata/threads/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/threads/$(am__dirstamp)
+ -rm -f libnetdata/url/$(DEPDIR)/$(am__dirstamp)
+ -rm -f libnetdata/url/$(am__dirstamp)
+ -rm -f registry/$(DEPDIR)/$(am__dirstamp)
+ -rm -f registry/$(am__dirstamp)
+ -rm -f streaming/$(DEPDIR)/$(am__dirstamp)
+ -rm -f streaming/$(am__dirstamp)
+ -rm -f web/api/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/$(am__dirstamp)
+ -rm -f web/api/badges/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/badges/$(am__dirstamp)
+ -rm -f web/api/exporters/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/exporters/$(am__dirstamp)
+ -rm -f web/api/exporters/shell/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/exporters/shell/$(am__dirstamp)
+ -rm -f web/api/formatters/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/formatters/$(am__dirstamp)
+ -rm -f web/api/formatters/csv/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/formatters/csv/$(am__dirstamp)
+ -rm -f web/api/formatters/json/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/formatters/json/$(am__dirstamp)
+ -rm -f web/api/formatters/ssv/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/formatters/ssv/$(am__dirstamp)
+ -rm -f web/api/formatters/value/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/formatters/value/$(am__dirstamp)
+ -rm -f web/api/health/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/health/$(am__dirstamp)
+ -rm -f web/api/queries/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/queries/$(am__dirstamp)
+ -rm -f web/api/queries/average/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/queries/average/$(am__dirstamp)
+ -rm -f web/api/queries/des/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/queries/des/$(am__dirstamp)
+ -rm -f web/api/queries/incremental_sum/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/queries/incremental_sum/$(am__dirstamp)
+ -rm -f web/api/queries/max/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/queries/max/$(am__dirstamp)
+ -rm -f web/api/queries/median/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/queries/median/$(am__dirstamp)
+ -rm -f web/api/queries/min/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/queries/min/$(am__dirstamp)
+ -rm -f web/api/queries/ses/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/queries/ses/$(am__dirstamp)
+ -rm -f web/api/queries/stddev/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/queries/stddev/$(am__dirstamp)
+ -rm -f web/api/queries/sum/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/api/queries/sum/$(am__dirstamp)
+ -rm -f web/server/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/server/$(am__dirstamp)
+ -rm -f web/server/static/$(DEPDIR)/$(am__dirstamp)
+ -rm -f web/server/static/$(am__dirstamp)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(BUILT_SOURCES)" || rm -f $(BUILT_SOURCES)
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic clean-pluginsPROGRAMS clean-sbinPROGRAMS \
+ mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -rf backends/$(DEPDIR) backends/aws_kinesis/$(DEPDIR) backends/graphite/$(DEPDIR) backends/json/$(DEPDIR) backends/mongodb/$(DEPDIR) backends/opentsdb/$(DEPDIR) backends/prometheus/$(DEPDIR) backends/prometheus/remote_write/$(DEPDIR) collectors/apps.plugin/$(DEPDIR) collectors/cgroups.plugin/$(DEPDIR) collectors/checks.plugin/$(DEPDIR) collectors/cups.plugin/$(DEPDIR) collectors/diskspace.plugin/$(DEPDIR) collectors/freebsd.plugin/$(DEPDIR) collectors/freeipmi.plugin/$(DEPDIR) collectors/idlejitter.plugin/$(DEPDIR) collectors/macos.plugin/$(DEPDIR) collectors/nfacct.plugin/$(DEPDIR) collectors/perf.plugin/$(DEPDIR) collectors/plugins.d/$(DEPDIR) collectors/proc.plugin/$(DEPDIR) collectors/statsd.plugin/$(DEPDIR) collectors/tc.plugin/$(DEPDIR) collectors/xenstat.plugin/$(DEPDIR) daemon/$(DEPDIR) database/$(DEPDIR) database/engine/$(DEPDIR) health/$(DEPDIR) libnetdata/$(DEPDIR) libnetdata/adaptive_resortable_list/$(DEPDIR) libnetdata/avl/$(DEPDIR) libnetdata/buffer/$(DEPDIR) libnetdata/clocks/$(DEPDIR) libnetdata/config/$(DEPDIR) libnetdata/dictionary/$(DEPDIR) libnetdata/eval/$(DEPDIR) libnetdata/health/$(DEPDIR) libnetdata/json/$(DEPDIR) libnetdata/locks/$(DEPDIR) libnetdata/log/$(DEPDIR) libnetdata/popen/$(DEPDIR) libnetdata/procfile/$(DEPDIR) libnetdata/simple_pattern/$(DEPDIR) libnetdata/socket/$(DEPDIR) libnetdata/statistical/$(DEPDIR) libnetdata/storage_number/$(DEPDIR) libnetdata/threads/$(DEPDIR) libnetdata/url/$(DEPDIR) registry/$(DEPDIR) streaming/$(DEPDIR) web/api/$(DEPDIR) web/api/badges/$(DEPDIR) web/api/exporters/$(DEPDIR) web/api/exporters/shell/$(DEPDIR) web/api/formatters/$(DEPDIR) web/api/formatters/csv/$(DEPDIR) web/api/formatters/json/$(DEPDIR) web/api/formatters/ssv/$(DEPDIR) web/api/formatters/value/$(DEPDIR) web/api/health/$(DEPDIR) web/api/queries/$(DEPDIR) web/api/queries/average/$(DEPDIR) web/api/queries/des/$(DEPDIR) web/api/queries/incremental_sum/$(DEPDIR) web/api/queries/max/$(DEPDIR) web/api/queries/median/$(DEPDIR) web/api/queries/min/$(DEPDIR) web/api/queries/ses/$(DEPDIR) web/api/queries/stddev/$(DEPDIR) web/api/queries/sum/$(DEPDIR) web/server/$(DEPDIR) web/server/static/$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-hdr distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am: install-dist_cacheDATA install-dist_logDATA \
+ install-dist_registryDATA install-dist_varlibDATA \
+ install-pluginsPROGRAMS
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am: install-sbinPROGRAMS
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -rf $(top_srcdir)/autom4te.cache
+ -rm -rf backends/$(DEPDIR) backends/aws_kinesis/$(DEPDIR) backends/graphite/$(DEPDIR) backends/json/$(DEPDIR) backends/mongodb/$(DEPDIR) backends/opentsdb/$(DEPDIR) backends/prometheus/$(DEPDIR) backends/prometheus/remote_write/$(DEPDIR) collectors/apps.plugin/$(DEPDIR) collectors/cgroups.plugin/$(DEPDIR) collectors/checks.plugin/$(DEPDIR) collectors/cups.plugin/$(DEPDIR) collectors/diskspace.plugin/$(DEPDIR) collectors/freebsd.plugin/$(DEPDIR) collectors/freeipmi.plugin/$(DEPDIR) collectors/idlejitter.plugin/$(DEPDIR) collectors/macos.plugin/$(DEPDIR) collectors/nfacct.plugin/$(DEPDIR) collectors/perf.plugin/$(DEPDIR) collectors/plugins.d/$(DEPDIR) collectors/proc.plugin/$(DEPDIR) collectors/statsd.plugin/$(DEPDIR) collectors/tc.plugin/$(DEPDIR) collectors/xenstat.plugin/$(DEPDIR) daemon/$(DEPDIR) database/$(DEPDIR) database/engine/$(DEPDIR) health/$(DEPDIR) libnetdata/$(DEPDIR) libnetdata/adaptive_resortable_list/$(DEPDIR) libnetdata/avl/$(DEPDIR) libnetdata/buffer/$(DEPDIR) libnetdata/clocks/$(DEPDIR) libnetdata/config/$(DEPDIR) libnetdata/dictionary/$(DEPDIR) libnetdata/eval/$(DEPDIR) libnetdata/health/$(DEPDIR) libnetdata/json/$(DEPDIR) libnetdata/locks/$(DEPDIR) libnetdata/log/$(DEPDIR) libnetdata/popen/$(DEPDIR) libnetdata/procfile/$(DEPDIR) libnetdata/simple_pattern/$(DEPDIR) libnetdata/socket/$(DEPDIR) libnetdata/statistical/$(DEPDIR) libnetdata/storage_number/$(DEPDIR) libnetdata/threads/$(DEPDIR) libnetdata/url/$(DEPDIR) registry/$(DEPDIR) streaming/$(DEPDIR) web/api/$(DEPDIR) web/api/badges/$(DEPDIR) web/api/exporters/$(DEPDIR) web/api/exporters/shell/$(DEPDIR) web/api/formatters/$(DEPDIR) web/api/formatters/csv/$(DEPDIR) web/api/formatters/json/$(DEPDIR) web/api/formatters/ssv/$(DEPDIR) web/api/formatters/value/$(DEPDIR) web/api/health/$(DEPDIR) web/api/queries/$(DEPDIR) web/api/queries/average/$(DEPDIR) web/api/queries/des/$(DEPDIR) web/api/queries/incremental_sum/$(DEPDIR) web/api/queries/max/$(DEPDIR) web/api/queries/median/$(DEPDIR) web/api/queries/min/$(DEPDIR) web/api/queries/ses/$(DEPDIR) web/api/queries/stddev/$(DEPDIR) web/api/queries/sum/$(DEPDIR) web/server/$(DEPDIR) web/server/static/$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am: uninstall-dist_cacheDATA uninstall-dist_logDATA \
+ uninstall-dist_registryDATA uninstall-dist_varlibDATA \
+ uninstall-pluginsPROGRAMS uninstall-sbinPROGRAMS
+
+.MAKE: $(am__recursive_targets) all check install install-am \
+ install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \
+ am--refresh check check-am clean clean-cscope clean-generic \
+ clean-pluginsPROGRAMS clean-sbinPROGRAMS cscope cscopelist-am \
+ ctags ctags-am dist dist-all dist-bzip2 dist-gzip dist-lzip \
+ dist-shar dist-tarZ dist-xz dist-zip distcheck distclean \
+ distclean-compile distclean-generic distclean-hdr \
+ distclean-tags distcleancheck distdir distuninstallcheck dvi \
+ dvi-am html html-am info info-am install install-am \
+ install-data install-data-am install-dist_cacheDATA \
+ install-dist_logDATA install-dist_registryDATA \
+ install-dist_varlibDATA install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-pluginsPROGRAMS install-ps \
+ install-ps-am install-sbinPROGRAMS install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-compile \
+ mostlyclean-generic pdf pdf-am ps ps-am tags tags-am uninstall \
+ uninstall-am uninstall-dist_cacheDATA uninstall-dist_logDATA \
+ uninstall-dist_registryDATA uninstall-dist_varlibDATA \
+ uninstall-pluginsPROGRAMS uninstall-sbinPROGRAMS
+
+.PRECIOUS: Makefile
+
+
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@backends/prometheus/remote_write/remote_write.pb.cc \
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@backends/prometheus/remote_write/remote_write.pb.h: backends/prometheus/remote_write/remote_write.proto
+@ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE@ $(PROTOC) --proto_path=$(srcdir) --cpp_out=$(builddir) $^
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/README.md b/README.md
index eb62a575..867fcd10 100644
--- a/README.md
+++ b/README.md
@@ -1,14 +1,14 @@
-# Netdata [![Build Status](https://travis-ci.com/netdata/netdata.svg?branch=master)](https://travis-ci.com/netdata/netdata) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2231/badge)](https://bestpractices.coreinfrastructure.org/projects/2231) [![License: GPL v3+](https://img.shields.io/badge/License-GPL%20v3%2B-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Freadme&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+# Netdata [![Build Status](https://travis-ci.com/netdata/netdata.svg?branch=master)](https://travis-ci.com/netdata/netdata) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2231/badge)](https://bestpractices.coreinfrastructure.org/projects/2231) [![License: GPL v3+](https://img.shields.io/badge/License-GPL%20v3%2B-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Freadme&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
-[![Code Climate](https://codeclimate.com/github/netdata/netdata/badges/gpa.svg)](https://codeclimate.com/github/netdata/netdata) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498)](https://www.codacy.com/app/netdata/netdata?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=netdata/netdata&amp;utm_campaign=Badge_Grade) [![LGTM C](https://img.shields.io/lgtm/grade/cpp/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:cpp) [![LGTM JS](https://img.shields.io/lgtm/grade/javascript/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:javascript) [![LGTM PYTHON](https://img.shields.io/lgtm/grade/python/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:python)
+[![Code Climate](https://codeclimate.com/github/netdata/netdata/badges/gpa.svg)](https://codeclimate.com/github/netdata/netdata) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498)](https://www.codacy.com/app/netdata/netdata?utm_source=github.com&utm_medium=referral&utm_content=netdata/netdata&utm_campaign=Badge_Grade) [![LGTM C](https://img.shields.io/lgtm/grade/cpp/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:cpp) [![LGTM JS](https://img.shields.io/lgtm/grade/javascript/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:javascript) [![LGTM PYTHON](https://img.shields.io/lgtm/grade/python/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:python)
---
**Netdata** is **distributed, real-time, performance and health monitoring for systems and applications**. It is a highly optimized monitoring agent you install on all your systems and containers.
-Netdata provides **unparalleled insights**, **in real-time**, of everything happening on the systems it runs (including web servers, databases, applications), using **highly interactive web dashboards**. It can run autonomously, without any third party components, or it can be integrated to existing monitoring tool chains (Prometheus, Graphite, OpenTSDB, Kafka, Grafana, etc).
+Netdata provides **unparalleled insights**, **in real-time**, of everything happening on the systems it runs (including web servers, databases, applications), using **highly interactive web dashboards**. It can run autonomously, without any third party components, or it can be integrated to existing monitoring tool chains (Prometheus, Graphite, OpenTSDB, Kafka, Grafana, etc).
-_Netdata is **fast** and **efficient**, designed to permanently run on all systems (**physical** & **virtual** servers, **containers**, **IoT** devices), without disrupting their core function._
+*Netdata is **fast** and **efficient**, designed to permanently run on all systems (**physical** & **virtual** servers, **containers**, **IoT** devices), without disrupting their core function.*
Netdata is **free, open-source software** and it currently runs on **Linux**, **FreeBSD**, and **MacOS**.
@@ -23,18 +23,17 @@ Once you use it on your systems, **there is no going back**! *You have been warn
[![Tweet about Netdata!](https://img.shields.io/twitter/url/http/shields.io.svg?style=social&label=Tweet%20about%20netdata)](https://twitter.com/intent/tweet?text=Netdata,%20real-time%20performance%20and%20health%20monitoring,%20done%20right!&url=https://my-netdata.io/&via=linuxnetdata&hashtags=netdata,monitoring)
-
## Contents
-1. [How it looks](#how-it-looks) - have a quick look at it
-2. [User base](#user-base) - who uses Netdata?
-3. [Quick Start](#quick-start) - try it now on your systems
-4. [Why Netdata](#why-netdata) - why people love Netdata, how it compares with other solutions
-5. [News](#news) - latest news about Netdata
-6. [How it works](#how-it-works) - high level diagram of how Netdata works
-7. [infographic](#infographic) - everything about Netdata, in a page
-8. [Features](#features) - what features does it have
-9. [Visualization](#visualization) - unique visualization features
+1. [How it looks](#how-it-looks) - have a quick look at it
+2. [User base](#user-base) - who uses Netdata?
+3. [Quick Start](#quick-start) - try it now on your systems
+4. [Why Netdata](#why-netdata) - why people love Netdata, how it compares with other solutions
+5. [News](#news) - latest news about Netdata
+6. [How it works](#how-it-works) - high level diagram of how Netdata works
+7. [infographic](#infographic) - everything about Netdata, in a page
+8. [Features](#features) - what features does it have
+9. [Visualization](#visualization) - unique visualization features
10. [What does it monitor](#what-does-it-monitor) - which metrics it collects
11. [Documentation](#documentation) - read the docs
12. [Community](#community) - discuss with others and get support
@@ -62,11 +61,13 @@ You will find people working for **Amazon**, **Atos**, **Baidu**, **Cisco System
**Vimeo**, and many more!
### Docker pulls
+
We provide docker images for the most common architectures. These are statistics reported by docker hub:
[![netdata/netdata (official)](https://img.shields.io/docker/pulls/netdata/netdata.svg?label=netdata/netdata+%28official%29)](https://hub.docker.com/r/netdata/netdata/) [![firehol/netdata (deprecated)](https://img.shields.io/docker/pulls/firehol/netdata.svg?label=firehol/netdata+%28deprecated%29)](https://hub.docker.com/r/firehol/netdata/) [![titpetric/netdata (donated)](https://img.shields.io/docker/pulls/titpetric/netdata.svg?label=titpetric/netdata+%28third+party%29)](https://hub.docker.com/r/titpetric/netdata/)
### Registry
+
When you install multiple Netdata, they are integrated into **one distributed application**, via a [Netdata registry](registry/#registry). This is a web browser feature and it allows us to count the number of unique users and unique Netdata servers installed. The following information comes from the global public Netdata registry we run:
[![User Base](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=M&value_color=blue&precision=2&divide=1000000&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Monitored Servers](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=k&divide=1000&value_color=orange&precision=2&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Served](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=M&value_color=yellowgreen&precision=2&divide=1000000&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry)
@@ -74,6 +75,7 @@ When you install multiple Netdata, they are integrated into **one distributed ap
*in the last 24 hours:*<br/> [![New Users Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![New Machines Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry)
## Quick Start
+
![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0)
To install Netdata from source on any Linux system (physical, virtual, container, IoT, edge) and keep it up to date with our **nightly releases** automatically, run the following:
@@ -90,14 +92,14 @@ To learn more about the pros and cons of using *nightly* vs. *stable* releases,
The above command will:
-- Install any required packages on your system (it will ask you to confirm before doing so)
-- Compile it, install it, and start it.
+- Install any required packages on your system (it will ask you to confirm before doing so)
+- Compile it, install it, and start it.
More installation methods and additional options can be found at the [installation page](packaging/installer/#installation).
To try Netdata in a docker container, run this:
-```
+```sh
docker run -d --name=netdata \
-p 19999:19999 \
-v /etc/passwd:/host/etc/passwd:ro \
@@ -122,25 +124,25 @@ Netdata has a quite different approach to monitoring.
Netdata is a monitoring agent you install on all your systems. It is:
-- a **metrics collector** - for system and application metrics (including web servers, databases, containers, etc)
-- a **time-series database** - all stored in memory (does not touch the disks while it runs)
-- a **metrics visualizer** - super fast, interactive, modern, optimized for anomaly detection
-- an **alarms notification engine** - an advanced watchdog for detecting performance and availability issues
+- a **metrics collector** - for system and application metrics (including web servers, databases, containers, etc)
+- a **time-series database** - all stored in memory (does not touch the disks while it runs)
+- a **metrics visualizer** - super fast, interactive, modern, optimized for anomaly detection
+- an **alarms notification engine** - an advanced watchdog for detecting performance and availability issues
All the above, are packaged together in a very flexible, extremely modular, distributed application.
This is how Netdata compares to other monitoring solutions:
-Netdata|others (open-source and commercial)
-:---:|:---:
-**High resolution metrics** (1s granularity)|Low resolution metrics (10s granularity at best)
-Monitors everything, **thousands of metrics per node**|Monitor just a few metrics
-UI is super fast, optimized for **anomaly detection**|UI is good for just an abstract view
-**Meaningful presentation**, to help you understand the metrics|You have to know the metrics before you start
-Install and get results **immediately**|Long preparation is required to get any useful results
-Use it for **troubleshooting** performance problems|Use them to get *statistics of past performance*
-**Kills the console** for tracing performance issues|The console is always required for troubleshooting
-Requires **zero dedicated resources**|Require large dedicated resources
+|Netdata|others (open-source and commercial)|
+|:-----:|:---------------------------------:|
+|**High resolution metrics** (1s granularity)|Low resolution metrics (10s granularity at best)|
+|Monitors everything, **thousands of metrics per node**|Monitor just a few metrics|
+|UI is super fast, optimized for **anomaly detection**|UI is good for just an abstract view|
+|**Meaningful presentation**, to help you understand the metrics|You have to know the metrics before you start|
+|Install and get results **immediately**|Long preparation is required to get any useful results|
+|Use it for **troubleshooting** performance problems|Use them to get *statistics of past performance*|
+|**Kills the console** for tracing performance issues|The console is always required for troubleshooting|
+|Requires **zero dedicated resources**|Require large dedicated resources|
Netdata is **open-source**, **free**, super **fast**, very **easy**, completely **open**, extremely **efficient**,
**flexible** and integrate-able.
@@ -154,23 +156,23 @@ not just visualize metrics.
Release v1.16.0 contains 40 bug fixes, 31 improvements and 20 documentation updates
-**Binary distributions.** To improve the security, speed and reliability of new netdata installations, we are delivering our own, industry standard installation method, with binary package distributions. The RPM binaries for the most common OSs are already available on packagecloud and we’ll have the DEB ones available very soon. All distributions are considered in Beta and, as always, we depend on our amazing community for feedback on improvements.
+**Binary distributions.** To improve the security, speed and reliability of new Netdata installations, we are delivering our own, industry standard installation method, with binary package distributions. The RPM binaries for the most common OSs are already available on packagecloud and we’ll have the DEB ones available very soon. All distributions are considered in Beta and, as always, we depend on our amazing community for feedback on improvements.
- - Our stable distributions are at [netdata/netdata @ packagecloud.io](https://packagecloud.io/netdata/netdata)
- - The nightly builds are at [netdata/netdata-edge @ packagecloud.io](https://packagecloud.io/netdata/netdata-edge)
+- Our stable distributions are at [netdata/netdata @ packagecloud.io](https://packagecloud.io/netdata/netdata)
+- The nightly builds are at [netdata/netdata-edge @ packagecloud.io](https://packagecloud.io/netdata/netdata-edge)
**Netdata now supports TLS encryption!** You can secure the communication to the [web server](https://docs.netdata.cloud/web/server/#enabling-tls-support), the [streaming connections from slaves to the master](https://docs.netdata.cloud/streaming/#securing-the-communication) and the connection to an [openTSDB backend](https://docs.netdata.cloud/backends/opentsdb/#https).
-**This version also brings two long-awaited features to netdata’s health monitoring:**
+**This version also brings two long-awaited features to Netdata’s health monitoring:**
- - The [health management API](https://docs.netdata.cloud/web/api/health/#health-management-api) introduced in v1.12 allowed you to easily disable alarms and/or notifications while netdata was running. However, those changes were not persisted across netdata restarts. Since part of routine maintenance activities may involve completely restarting a monitoring node, netdata now saves these configurations to disk, every time you issue a command to change the silencer settings. The new [LIST command](https://docs.netdata.cloud/web/api/health/#list-silencers) of the API allows you to view at any time which alarms are currently disabled or silenced.
- - A way for netdata to [repeatedly send alarm notifications](https://docs.netdata.cloud/health/#alarm-line-repeat) for some, or all active alarms, at a frequency of your choosing. As a result, you will no longer have to worry about missing a notification, forgetting about a raised alarm. The default is still to only send a single notification, so that existing users are not surprised by a different behavior.
+- The [health management API](https://docs.netdata.cloud/web/api/health/#health-management-api) introduced in v1.12 allowed you to easily disable alarms and/or notifications while Netdata was running. However, those changes were not persisted across Netdata restarts. Since part of routine maintenance activities may involve completely restarting a monitoring node, Netdata now saves these configurations to disk, every time you issue a command to change the silencer settings. The new [LIST command](https://docs.netdata.cloud/web/api/health/#list-silencers) of the API allows you to view at any time which alarms are currently disabled or silenced.
+- A way for Netdata to [repeatedly send alarm notifications](https://docs.netdata.cloud/health/#alarm-line-repeat) for some, or all active alarms, at a frequency of your choosing. As a result, you will no longer have to worry about missing a notification, forgetting about a raised alarm. The default is still to only send a single notification, so that existing users are not surprised by a different behavior.
As always, we’ve introduced new collectors, 5 of them this time:
- - Of special interest to people with Windows servers in their infrastructure is the [WMI collector](https://docs.netdata.cloud/collectors/go.d.plugin/modules/wmi/), though we are fully aware that we need to continue our efforts to do a proper port to Windows.
- - The new `perf` plugin collects system-wide CPU performance statistics from Performance Monitoring Units (PMU) using the `perf_event_open()` system call. You can read a wonderful article on why this is useful [here](http://www.brendangregg.com/blog/2017-05-09/cpu-utilization-is-wrong.html).
- - The other three are collectors to monitor [Dnsmasq DHCP leases](https://docs.netdata.cloud/collectors/go.d.plugin/modules/dnsmasq_dhcp/), [Riak KV servers](https://docs.netdata.cloud/collectors/python.d.plugin/riakkv/) and [Pihole instances](https://docs.netdata.cloud/collectors/go.d.plugin/modules/pihole/).
+- Of special interest to people with Windows servers in their infrastructure is the [WMI collector](https://docs.netdata.cloud/collectors/go.d.plugin/modules/wmi/), though we are fully aware that we need to continue our efforts to do a proper port to Windows.
+- The new `perf` plugin collects system-wide CPU performance statistics from Performance Monitoring Units (PMU) using the `perf_event_open()` system call. You can read a wonderful article on why this is useful [here](http://www.brendangregg.com/blog/2017-05-09/cpu-utilization-is-wrong.html).
+- The other three are collectors to monitor [Dnsmasq DHCP leases](https://docs.netdata.cloud/collectors/go.d.plugin/modules/dnsmasq_dhcp/), [Riak KV servers](https://docs.netdata.cloud/collectors/python.d.plugin/riakkv/) and [Pihole instances](https://docs.netdata.cloud/collectors/go.d.plugin/modules/pihole/).
Finally, the DB Engine introduced in v1.15.0 now uses much less memory and is more robust than before.
@@ -182,15 +184,15 @@ Release v1.15.0 contains 11 bug fixes and 30 improvements.
We are very happy and proud to be able to include two major improvements in this release: The aggregated node view and the [new database engine](https://docs.netdata.cloud/database/engine/).
-*Aggregated node view*
+_Aggregated node view_
The No. 1 request from our community has been a better way to view and manage their Netdata installations, via an aggregated view. The node menu with the simple list of hosts on the agent UI just didn't do it for people with hundreds, or thousands of instances. This release introduces the node view, which uses the power of [Netdata Cloud](https://blog.netdata.cloud/posts/netdata-cloud-announcement/) to deliver powerful views of a Netdata-based monitoring infrastructure. You can read more about Netdata Cloud and the future of Netdata [here](https://blog.netdata.cloud/posts/netdata-cloud-announcement/).
-*New database engine*
+_New database engine_
Historically, Netdata has required a lot of memory for long-term metrics storage. To mitigate this we've been building a new DB engine for several months and will continue improving until it can become the default `memory mode` for new Netdata installations. The version included in release v1.15.0 already permits longer-term storage of compressed data and we'll continue reducing the required memory in following releases.
-*Other major additions*
+_Other major additions_
We have added support for the [AWS Kinesis backend](https://docs.netdata.cloud/backends/aws_kinesis/) and new collectors for [OpenVPN](https://docs.netdata.cloud/collectors/go.d.plugin/modules/openvpn/), the [Tengine web server](https://docs.netdata.cloud/collectors/go.d.plugin/modules/tengine/), [ScaleIO (VxFlex OS)](https://docs.netdata.cloud/collectors/go.d.plugin/modules/scaleio/), [ioping-like latency metrics](https://docs.netdata.cloud/collectors/ioping.plugin/) and [Energi Core node instances](https://docs.netdata.cloud/collectors/python.d.plugin/energid/).
@@ -207,7 +209,7 @@ Finally, we built a process to quickly replace any problematic nightly builds an
Release 1.14 contains 14 bug fixes and 24 improvements.
The release introduces major additions to Kubernetes monitoring, with tens of new charts for [Kubelet](https://docs.netdata.cloud/collectors/go.d.plugin/modules/k8s_kubelet/), [kube-proxy](https://docs.netdata.cloud/collectors/go.d.plugin/modules/k8s_kubeproxy/) and [coredns](https://github.com/netdata/go.d.plugin/tree/master/modules/coredns) metrics, as well as significant improvements to the Netdata [helm chart](https://github.com/netdata/helmchart/).
-
+
Two new collectors were added, to monitor [Docker hub](https://docs.netdata.cloud/collectors/go.d.plugin/modules/dockerhub/) and [Docker engine](https://docs.netdata.cloud/collectors/go.d.plugin/modules/docker_engine/) metrics.
Finally, v1.14 adds support for [version 2 cgroups](https://github.com/netdata/netdata/pull/5407), [OpenLDAP over TLS](https://github.com/netdata/netdata/pull/5859), [NVIDIA SMI free and per process memory](https://github.com/netdata/netdata/pull/5796/files) and [configurable syslog facilities](https://github.com/netdata/netdata/pull/5792).
@@ -248,23 +250,23 @@ Patch release 1.12.1 contains 22 bug fixes and 8 improvements.
Release 1.12 is made out of 211 pull requests and 22 bug fixes.
The key improvements are:
-- Introducing `netdata.cloud`, the free Netdata service for all Netdata users
-- High performance plugins with go.d.plugin (data collection orchestrator written in Go)
-- 7 new data collectors and 11 rewrites of existing data collectors for improved performance
-- A new management API for all Netdata servers
-- Bind different functions of the Netdata APIs to different ports
-- Improved installation and updates
+- Introducing `netdata.cloud`, the free Netdata service for all Netdata users
+- High performance plugins with go.d.plugin (data collection orchestrator written in Go)
+- 7 new data collectors and 11 rewrites of existing data collectors for improved performance
+- A new management API for all Netdata servers
+- Bind different functions of the Netdata APIs to different ports
+- Improved installation and updates
---
`Nov 22nd, 2018` - **[Netdata v1.11.1 released!](https://github.com/netdata/netdata/releases)**
-- Improved internal database to support values above 64bit.
-- New data collection plugins: [`openldap`](collectors/python.d.plugin/openldap/), [`tor`](collectors/python.d.plugin/tor/), [`nvidia_smi`](collectors/python.d.plugin/nvidia_smi/).
-- Improved data collection plugins: Netdata now supports monitoring network interface aliases, [`smartd_log`](collectors/python.d.plugin/smartd_log/), [`cpufreq`](collectors/proc.plugin/README.md#cpu-frequency), [`sensors`](collectors/python.d.plugin/sensors/).
-- Health monitoring improvements: network interface congestion alarm restored, [`alerta.io`](health/notifications/alerta/), `conntrack_max`.
-- `my-netdata`menu has been refactored.
-- Packaging: `openrc` service definition got a few improvements.
+- Improved internal database to support values above 64bit.
+- New data collection plugins: [`openldap`](collectors/python.d.plugin/openldap/), [`tor`](collectors/python.d.plugin/tor/), [`nvidia_smi`](collectors/python.d.plugin/nvidia_smi/).
+- Improved data collection plugins: Netdata now supports monitoring network interface aliases, [`smartd_log`](collectors/python.d.plugin/smartd_log/), [`cpufreq`](collectors/proc.plugin/README.md#cpu-frequency), [`sensors`](collectors/python.d.plugin/sensors/).
+- Health monitoring improvements: network interface congestion alarm restored, [`alerta.io`](health/notifications/alerta/), `conntrack_max`.
+- `my-netdata`menu has been refactored.
+- Packaging: `openrc` service definition got a few improvements.
---
@@ -282,14 +284,14 @@ Netdata is a highly efficient, highly modular, metrics management engine. Its lo
This is how it works:
-Function|Description|Documentation
-:---:|:---|:---:
-**Collect**|Multiple independent data collection workers are collecting metrics from their sources using the optimal protocol for each application and push the metrics to the database. Each data collection worker has lockless write access to the metrics it collects.|[`collectors`](collectors/#data-collection-plugins)
-**Store**|Metrics are stored in RAM in a round robin database (ring buffer), using a custom made floating point number for minimal footprint.|[`database`](database/#database)
-**Check**|A lockless independent watchdog is evaluating **health checks** on the collected metrics, triggers alarms, maintains a health transaction log and dispatches alarm notifications.|[`health`](health/#health-monitoring)
-**Stream**|An lockless independent worker is streaming metrics, in full detail and in real-time, to remote Netdata servers, as soon as they are collected.|[`streaming`](streaming/#streaming-and-replication)
-**Archive**|A lockless independent worker is down-sampling the metrics and pushes them to **backend** time-series databases.|[`backends`](backends/)
-**Query**|Multiple independent workers are attached to the [internal web server](web/server/#web-server), servicing API requests, including [data queries](web/api/queries/#database-queries).|[`web/api`](web/api/#api)
+|Function|Description|Documentation|
+|:------:|:----------|:-----------:|
+|**Collect**|Multiple independent data collection workers are collecting metrics from their sources using the optimal protocol for each application and push the metrics to the database. Each data collection worker has lockless write access to the metrics it collects.|[`collectors`](collectors/#data-collection-plugins)|
+|**Store**|Metrics are stored in RAM in a round robin database (ring buffer), using a custom made floating point number for minimal footprint.|[`database`](database/#database)|
+|**Check**|A lockless independent watchdog is evaluating **health checks** on the collected metrics, triggers alarms, maintains a health transaction log and dispatches alarm notifications.|[`health`](health/#health-monitoring)|
+|**Stream**|An lockless independent worker is streaming metrics, in full detail and in real-time, to remote Netdata servers, as soon as they are collected.|[`streaming`](streaming/#streaming-and-replication)|
+|**Archive**|A lockless independent worker is down-sampling the metrics and pushes them to **backend** time-series databases.|[`backends`](backends/)|
+|**Query**|Multiple independent workers are attached to the [internal web server](web/server/#web-server), servicing API requests, including [data queries](web/api/queries/#database-queries).|[`web/api`](web/api/#api)|
The result is a highly efficient, low latency system, supporting multiple readers and one writer on each metric.
@@ -300,7 +302,6 @@ Click it to to interact with it (it has direct links to documentation).
[![image](https://user-images.githubusercontent.com/43294513/60951037-8ba5d180-a2f8-11e9-906e-e27356f168bc.png)](https://my-netdata.io/infographic.html)
-
## Features
![finger-video](https://user-images.githubusercontent.com/2662304/48346998-96cf3180-e685-11e8-9f4e-059d23aa3aa5.gif)
@@ -308,31 +309,34 @@ Click it to to interact with it (it has direct links to documentation).
This is what you should expect from Netdata:
### General
-- **1s granularity** - the highest possible resolution for all metrics.
-- **Unlimited metrics** - collects all the available metrics, the more the better.
-- **1% CPU utilization of a single core** - it is super fast, unbelievably optimized.
-- **A few MB of RAM** - by default it uses 25MB RAM. [You size it](database).
-- **Zero disk I/O** - while it runs, it does not load or save anything (except `error` and `access` logs).
-- **Zero configuration** - auto-detects everything, it can collect up to 10000 metrics per server out of the box.
-- **Zero maintenance** - You just run it, it does the rest.
-- **Zero dependencies** - it is even its own web server, for its static web files and its web API (though its plugins may require additional libraries, depending on the applications monitored).
-- **Scales to infinity** - you can install it on all your servers, containers, VMs and IoTs. Metrics are not centralized by default, so there is no limit.
-- **Several operating modes** - Autonomous host monitoring (the default), headless data collector, forwarding proxy, store and forward proxy, central multi-host monitoring, in all possible configurations. Each node may have different metrics retention policy and run with or without health monitoring.
+
+- **1s granularity** - the highest possible resolution for all metrics.
+- **Unlimited metrics** - collects all the available metrics, the more the better.
+- **1% CPU utilization of a single core** - it is super fast, unbelievably optimized.
+- **A few MB of RAM** - by default it uses 25MB RAM. [You size it](database).
+- **Zero disk I/O** - while it runs, it does not load or save anything (except `error` and `access` logs).
+- **Zero configuration** - auto-detects everything, it can collect up to 10000 metrics per server out of the box.
+- **Zero maintenance** - You just run it, it does the rest.
+- **Zero dependencies** - it is even its own web server, for its static web files and its web API (though its plugins may require additional libraries, depending on the applications monitored).
+- **Scales to infinity** - you can install it on all your servers, containers, VMs and IoTs. Metrics are not centralized by default, so there is no limit.
+- **Several operating modes** - Autonomous host monitoring (the default), headless data collector, forwarding proxy, store and forward proxy, central multi-host monitoring, in all possible configurations. Each node may have different metrics retention policy and run with or without health monitoring.
### Health Monitoring & Alarms
-- **Sophisticated alerting** - comes with hundreds of alarms, **out of the box**! Supports dynamic thresholds, hysteresis, alarm templates, multiple role-based notification methods.
-- **Notifications**: [alerta.io](health/notifications/alerta/), [amazon sns](health/notifications/awssns/), [discordapp.com](health/notifications/discord/), [email](health/notifications/email/), [flock.com](health/notifications/flock/), [irc](health/notifications/irc/), [kavenegar.com](health/notifications/kavenegar/), [messagebird.com](health/notifications/messagebird/), [pagerduty.com](health/notifications/pagerduty/), [prowl](health/notifications/prowl/), [pushbullet.com](health/notifications/pushbullet/), [pushover.net](health/notifications/pushover/), [rocket.chat](health/notifications/rocketchat/), [slack.com](health/notifications/slack/), [smstools3](health/notifications/smstools3/), [syslog](health/notifications/syslog/), [telegram.org](health/notifications/telegram/), [twilio.com](health/notifications/twilio/), [web](health/notifications/web/) and [custom notifications](health/notifications/custom/).
+
+- **Sophisticated alerting** - comes with hundreds of alarms, **out of the box**! Supports dynamic thresholds, hysteresis, alarm templates, multiple role-based notification methods.
+- **Notifications**: [alerta.io](health/notifications/alerta/), [amazon sns](health/notifications/awssns/), [discordapp.com](health/notifications/discord/), [email](health/notifications/email/), [flock.com](health/notifications/flock/), [irc](health/notifications/irc/), [kavenegar.com](health/notifications/kavenegar/), [messagebird.com](health/notifications/messagebird/), [pagerduty.com](health/notifications/pagerduty/), [prowl](health/notifications/prowl/), [pushbullet.com](health/notifications/pushbullet/), [pushover.net](health/notifications/pushover/), [rocket.chat](health/notifications/rocketchat/), [slack.com](health/notifications/slack/), [smstools3](health/notifications/smstools3/), [syslog](health/notifications/syslog/), [telegram.org](health/notifications/telegram/), [twilio.com](health/notifications/twilio/), [web](health/notifications/web/) and [custom notifications](health/notifications/custom/).
### Integrations
-- **time-series dbs** - can archive its metrics to **Graphite**, **OpenTSDB**, **Prometheus**, **AWS Kinesis**, **JSON document DBs**, in the same or lower resolution (lower: to prevent it from congesting these servers due to the amount of data collected). Netdata also supports **Prometheus remote write API** which allows storing metrics to **Elasticsearch**, **Gnocchi**, **InfluxDB**, **Kafka**, **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics** and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
+
+- **time-series dbs** - can archive its metrics to **Graphite**, **OpenTSDB**, **Prometheus**, **AWS Kinesis**, **MongoDB**, **JSON document DBs**, in the same or lower resolution (lower: to prevent it from congesting these servers due to the amount of data collected). Netdata also supports **Prometheus remote write API** which allows storing metrics to **Elasticsearch**, **Gnocchi**, **InfluxDB**, **Kafka**, **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics** and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
## Visualization
-- **Stunning interactive dashboards** - mouse, touchpad and touch-screen friendly in 2 themes: `slate` (dark) and `white`.
-- **Amazingly fast visualization** - responds to all queries in less than 1 ms per metric, even on low-end hardware.
-- **Visual anomaly detection** - the dashboards are optimized for detecting anomalies visually.
-- **Embeddable** - its charts can be embedded on your web pages, wikis and blogs. You can even use [Atlassian's Confluence as a monitoring dashboard](web/gui/confluence/).
-- **Customizable** - custom dashboards can be built using simple HTML (no javascript necessary).
+- **Stunning interactive dashboards** - mouse, touchpad and touch-screen friendly in 2 themes: `slate` (dark) and `white`.
+- **Amazingly fast visualization** - responds to all queries in less than 1 ms per metric, even on low-end hardware.
+- **Visual anomaly detection** - the dashboards are optimized for detecting anomalies visually.
+- **Embeddable** - its charts can be embedded on your web pages, wikis and blogs. You can even use [Atlassian's Confluence as a monitoring dashboard](web/gui/confluence/).
+- **Customizable** - custom dashboards can be built using simple HTML (no javascript necessary).
### Positive and negative values
@@ -340,7 +344,7 @@ To improve clarity on charts, Netdata dashboards present **positive** values for
![positive-and-negative-values](https://user-images.githubusercontent.com/2662304/48309090-7c5c6180-e57a-11e8-8e03-3a7538c14223.gif)
-*Netdata charts showing the bandwidth and packets of a network interface. `received` is positive and `sent` is negative.*
+_Netdata charts showing the bandwidth and packets of a network interface. `received` is positive and `sent` is negative._
### Autoscaled y-axis
@@ -348,7 +352,7 @@ Netdata charts automatically zoom vertically, to visualize the variation of each
![non-zero-based](https://user-images.githubusercontent.com/2662304/48309139-3d2f1000-e57c-11e8-9a44-b91758134b00.gif)
-*A zero based `stacked` chart, automatically switches to an auto-scaled `area` chart when a single dimension is selected.*
+_A zero based `stacked` chart, automatically switches to an auto-scaled `area` chart when a single dimension is selected._
### Charts are synchronized
@@ -356,7 +360,7 @@ Charts on Netdata dashboards are synchronized to each other. There is no master
![charts-are-synchronized](https://user-images.githubusercontent.com/2662304/48309003-b4fb3b80-e578-11e8-86f6-f505c7059c15.gif)
-*Charts are panned by dragging them with the mouse. Charts can be zoomed in/out with`SHIFT` + `mouse wheel` while the mouse pointer is over a chart.*
+_Charts are panned by dragging them with the mouse. Charts can be zoomed in/out with`SHIFT` + `mouse wheel` while the mouse pointer is over a chart._
> The visible time-frame (pan and zoom) is propagated from Netdata server to Netdata server, when navigating via the [node menu](registry#registry).
@@ -366,196 +370,225 @@ To improve visual anomaly detection across charts, the user can highlight a time
![highlighted-timeframe](https://user-images.githubusercontent.com/2662304/48311876-f9093300-e5ae-11e8-9c74-e3e291741990.gif)
-*A highlighted time-frame can be given by pressing `ALT` + `mouse selection` on any chart. Netdata will highlight the same range on all charts.*
+_A highlighted time-frame can be given by pressing `ALT` + `mouse selection` on any chart. Netdata will highlight the same range on all charts._
> Highlighted ranges are propagated from Netdata server to Netdata server, when navigating via the [node menu](registry#registry).
-
## What does it monitor
Netdata data collection is **extensible** - you can monitor anything you can get a metric for.
Its [Plugin API](collectors/plugins.d/) supports all programing languages (anything can be a Netdata plugin, BASH, python, perl, node.js, java, Go, ruby, etc).
-- For better performance, most system related plugins (cpu, memory, disks, filesystems, networking, etc) have been written in `C`.
-- For faster development and easier contributions, most application related plugins (databases, web servers, etc) have been written in `python`.
+- For better performance, most system related plugins (cpu, memory, disks, filesystems, networking, etc) have been written in `C`.
+- For faster development and easier contributions, most application related plugins (databases, web servers, etc) have been written in `python`.
#### APM (Application Performance Monitoring)
-- **[statsd](collectors/statsd.plugin/)** - Netdata is a fully featured statsd server.
-- **[Go expvar](collectors/python.d.plugin/go_expvar/)** - collects metrics exposed by applications written in the Go programming language using the expvar package.
-- **[Spring Boot](collectors/python.d.plugin/springboot/)** - monitors running Java Spring Boot applications that expose their metrics with the use of the Spring Boot Actuator included in Spring Boot library.
-- **[uWSGI](collectors/python.d.plugin/uwsgi/)** - collects performance metrics from uWSGI applications.
+
+- **[statsd](collectors/statsd.plugin/)** - Netdata is a fully featured statsd server.
+- **[Go expvar](collectors/python.d.plugin/go_expvar/)** - collects metrics exposed by applications written in the Go programming language using the expvar package.
+- **[Spring Boot](collectors/python.d.plugin/springboot/)** - monitors running Java Spring Boot applications that expose their metrics with the use of the Spring Boot Actuator included in Spring Boot library.
+- **[uWSGI](collectors/python.d.plugin/uwsgi/)** - collects performance metrics from uWSGI applications.
#### System Resources
-- **[CPU Utilization](collectors/proc.plugin/)** - total and per core CPU usage.
-- **[Interrupts](collectors/proc.plugin/)** - total and per core CPU interrupts.
-- **[SoftIRQs](collectors/proc.plugin/)** - total and per core SoftIRQs.
-- **[SoftNet](collectors/proc.plugin/)** - total and per core SoftIRQs related to network activity.
-- **[CPU Throttling](collectors/proc.plugin/)** - collects per core CPU throttling.
-- **[CPU Frequency](collectors/proc.plugin/)** - collects the current CPU frequency.
-- **[CPU Idle](collectors/proc.plugin/)** - collects the time spent per processor state.
-- **[IdleJitter](collectors/idlejitter.plugin/)** - measures CPU latency.
-- **[Entropy](collectors/proc.plugin/)** - random numbers pool, using in cryptography.
-- **[Interprocess Communication - IPC](collectors/proc.plugin/)** - such as semaphores and semaphores arrays.
+
+- **[CPU Utilization](collectors/proc.plugin/)** - total and per core CPU usage.
+- **[Interrupts](collectors/proc.plugin/)** - total and per core CPU interrupts.
+- **[SoftIRQs](collectors/proc.plugin/)** - total and per core SoftIRQs.
+- **[SoftNet](collectors/proc.plugin/)** - total and per core SoftIRQs related to network activity.
+- **[CPU Throttling](collectors/proc.plugin/)** - collects per core CPU throttling.
+- **[CPU Frequency](collectors/proc.plugin/)** - collects the current CPU frequency.
+- **[CPU Idle](collectors/proc.plugin/)** - collects the time spent per processor state.
+- **[IdleJitter](collectors/idlejitter.plugin/)** - measures CPU latency.
+- **[Entropy](collectors/proc.plugin/)** - random numbers pool, using in cryptography.
+- **[Interprocess Communication - IPC](collectors/proc.plugin/)** - such as semaphores and semaphores arrays.
#### Memory
-- **[ram](collectors/proc.plugin/)** - collects info about RAM usage.
-- **[swap](collectors/proc.plugin/)** - collects info about swap memory usage.
-- **[available memory](collectors/proc.plugin/)** - collects the amount of RAM available for userspace processes.
-- **[committed memory](collectors/proc.plugin/)** - collects the amount of RAM committed to userspace processes.
-- **[Page Faults](collectors/proc.plugin/)** - collects the system page faults (major and minor).
-- **[writeback memory](collectors/proc.plugin/)** - collects the system dirty memory and writeback activity.
-- **[huge pages](collectors/proc.plugin/)** - collects the amount of RAM used for huge pages.
-- **[KSM](collectors/proc.plugin/)** - collects info about Kernel Same Merging (memory dedupper).
-- **[Numa](collectors/proc.plugin/)** - collects Numa info on systems that support it.
-- **[slab](collectors/proc.plugin/)** - collects info about the Linux kernel memory usage.
+
+- **[ram](collectors/proc.plugin/)** - collects info about RAM usage.
+- **[swap](collectors/proc.plugin/)** - collects info about swap memory usage.
+- **[available memory](collectors/proc.plugin/)** - collects the amount of RAM available for userspace processes.
+- **[committed memory](collectors/proc.plugin/)** - collects the amount of RAM committed to userspace processes.
+- **[Page Faults](collectors/proc.plugin/)** - collects the system page faults (major and minor).
+- **[writeback memory](collectors/proc.plugin/)** - collects the system dirty memory and writeback activity.
+- **[huge pages](collectors/proc.plugin/)** - collects the amount of RAM used for huge pages.
+- **[KSM](collectors/proc.plugin/)** - collects info about Kernel Same Merging (memory dedupper).
+- **[Numa](collectors/proc.plugin/)** - collects Numa info on systems that support it.
+- **[slab](collectors/proc.plugin/)** - collects info about the Linux kernel memory usage.
#### Disks
-- **[block devices](collectors/proc.plugin/)** - per disk: I/O, operations, backlog, utilization, space, etc.
-- **[BCACHE](collectors/proc.plugin/)** - detailed performance of SSD caching devices.
-- **[DiskSpace](collectors/proc.plugin/)** - monitors disk space usage.
-- **[mdstat](collectors/proc.plugin/)** - software RAID.
-- **[hddtemp](collectors/python.d.plugin/hddtemp/)** - disk temperatures.
-- **[smartd](collectors/python.d.plugin/smartd_log/)** - disk S.M.A.R.T. values.
-- **[device mapper](collectors/proc.plugin/)** - naming disks.
-- **[Veritas Volume Manager](collectors/proc.plugin/)** - naming disks.
-- **[megacli](collectors/python.d.plugin/megacli/)** - adapter, physical drives and battery stats.
-- **[adaptec_raid](collectors/python.d.plugin/adaptec_raid/)** - logical and physical devices health metrics.
-- **[ioping](collectors/ioping.plugin/)** - to measure disk read/write latency.
+
+- **[block devices](collectors/proc.plugin/)** - per disk: I/O, operations, backlog, utilization, space, etc.
+- **[BCACHE](collectors/proc.plugin/)** - detailed performance of SSD caching devices.
+- **[DiskSpace](collectors/proc.plugin/)** - monitors disk space usage.
+- **[mdstat](collectors/proc.plugin/)** - software RAID.
+- **[hddtemp](collectors/python.d.plugin/hddtemp/)** - disk temperatures.
+- **[smartd](collectors/python.d.plugin/smartd_log/)** - disk S.M.A.R.T. values.
+- **[device mapper](collectors/proc.plugin/)** - naming disks.
+- **[Veritas Volume Manager](collectors/proc.plugin/)** - naming disks.
+- **[megacli](collectors/python.d.plugin/megacli/)** - adapter, physical drives and battery stats.
+- **[adaptec_raid](collectors/python.d.plugin/adaptec_raid/)** - logical and physical devices health metrics.
+- **[ioping](collectors/ioping.plugin/)** - to measure disk read/write latency.
#### Filesystems
-- **[BTRFS](collectors/proc.plugin/)** - detailed disk space allocation and usage.
-- **[Ceph](collectors/python.d.plugin/ceph/)** - OSD usage, Pool usage, number of objects, etc.
-- **[NFS file servers and clients](collectors/proc.plugin/)** - NFS v2, v3, v4: I/O, cache, read ahead, RPC calls
-- **[Samba](collectors/python.d.plugin/samba/)** - performance metrics of Samba SMB2 file sharing.
-- **[ZFS](collectors/proc.plugin/)** - detailed performance and resource usage.
+
+- **[BTRFS](collectors/proc.plugin/)** - detailed disk space allocation and usage.
+- **[Ceph](collectors/python.d.plugin/ceph/)** - OSD usage, Pool usage, number of objects, etc.
+- **[NFS file servers and clients](collectors/proc.plugin/)** - NFS v2, v3, v4: I/O, cache, read ahead, RPC calls
+- **[Samba](collectors/python.d.plugin/samba/)** - performance metrics of Samba SMB2 file sharing.
+- **[ZFS](collectors/proc.plugin/)** - detailed performance and resource usage.
#### Networking
-- **[Network Stack](collectors/proc.plugin/)** - everything about the networking stack (both IPv4 and IPv6 for all protocols: TCP, UDP, SCTP, UDPLite, ICMP, Multicast, Broadcast, etc), and all network interfaces (per interface: bandwidth, packets, errors, drops).
-- **[Netfilter](collectors/proc.plugin/)** - everything about the netfilter connection tracker.
-- **[SynProxy](collectors/proc.plugin/)** - collects performance data about the linux SYNPROXY (DDoS).
-- **[NFacct](collectors/nfacct.plugin/)** - collects accounting data from iptables.
-- **[Network QoS](collectors/tc.plugin/)** - the only tool that visualizes network `tc` classes in real-time
-- **[FPing](collectors/fping.plugin/)** - to measure latency and packet loss between any number of hosts.
-- **[ISC dhcpd](collectors/python.d.plugin/isc_dhcpd/)** - pools utilization, leases, etc.
-- **[AP](collectors/charts.d.plugin/ap/)** - collects Linux access point performance data (`hostapd`).
-- **[SNMP](collectors/node.d.plugin/snmp/)** - SNMP devices can be monitored too (although you will need to configure these).
-- **[port_check](collectors/python.d.plugin/portcheck/)** - checks TCP ports for availability and response time.
+
+- **[Network Stack](collectors/proc.plugin/)** - everything about the networking stack (both IPv4 and IPv6 for all protocols: TCP, UDP, SCTP, UDPLite, ICMP, Multicast, Broadcast, etc), and all network interfaces (per interface: bandwidth, packets, errors, drops).
+- **[Netfilter](collectors/proc.plugin/)** - everything about the netfilter connection tracker.
+- **[SynProxy](collectors/proc.plugin/)** - collects performance data about the linux SYNPROXY (DDoS).
+- **[NFacct](collectors/nfacct.plugin/)** - collects accounting data from iptables.
+- **[Network QoS](collectors/tc.plugin/)** - the only tool that visualizes network `tc` classes in real-time.
+- **[FPing](collectors/fping.plugin/)** - to measure latency and packet loss between any number of hosts.
+- **[ISC dhcpd](collectors/python.d.plugin/isc_dhcpd/)** - pools utilization, leases, etc.
+- **[AP](collectors/charts.d.plugin/ap/)** - collects Linux access point performance data (`hostapd`).
+- **[SNMP](collectors/node.d.plugin/snmp/)** - SNMP devices can be monitored too (although you will need to configure these).
+- **[port_check](collectors/python.d.plugin/portcheck/)** - checks TCP ports for availability and response time.
#### Virtual Private Networks
-- **[OpenVPN](collectors/python.d.plugin/ovpn_status_log/)** - collects status per tunnel.
-- **[LibreSwan](collectors/charts.d.plugin/libreswan/)** - collects metrics per IPSEC tunnel.
-- **[Tor](collectors/python.d.plugin/tor/)** - collects Tor traffic statistics.
+
+- **[OpenVPN](collectors/python.d.plugin/ovpn_status_log/)** - collects status per tunnel.
+- **[LibreSwan](collectors/charts.d.plugin/libreswan/)** - collects metrics per IPSEC tunnel.
+- **[Tor](collectors/python.d.plugin/tor/)** - collects Tor traffic statistics.
#### Processes
-- **[System Processes](collectors/proc.plugin/)** - running, blocked, forks, active.
-- **[Applications](collectors/apps.plugin/)** - by grouping the process tree and reporting CPU, memory, disk reads, disk writes, swap, threads, pipes, sockets - per process group.
-- **[systemd](collectors/cgroups.plugin/)** - monitors systemd services using CGROUPS.
+
+- **[System Processes](collectors/proc.plugin/)** - running, blocked, forks, active.
+- **[Applications](collectors/apps.plugin/)** - by grouping the process tree and reporting CPU, memory, disk reads, disk writes, swap, threads, pipes, sockets - per process group.
+- **[systemd](collectors/cgroups.plugin/)** - monitors systemd services using CGROUPS.
#### Users
-- **[Users and User Groups resource usage](collectors/apps.plugin/)** - by summarizing the process tree per user and group, reporting: CPU, memory, disk reads, disk writes, swap, threads, pipes, sockets
-- **[logind](collectors/python.d.plugin/logind/)** - collects sessions, users and seats connected.
+
+- **[Users and User Groups resource usage](collectors/apps.plugin/)** - by summarizing the process tree per user and group, reporting: CPU, memory, disk reads, disk writes, swap, threads, pipes, sockets.
+- **[logind](collectors/python.d.plugin/logind/)** - collects sessions, users and seats connected.
#### Containers and VMs
-- **[Containers](collectors/cgroups.plugin/)** - collects resource usage for all kinds of containers, using CGROUPS (systemd-nspawn, lxc, lxd, docker, kubernetes, etc).
-- **[libvirt VMs](collectors/cgroups.plugin/)** - collects resource usage for all kinds of VMs, using CGROUPS.
-- **[dockerd](collectors/python.d.plugin/dockerd/)** - collects docker health metrics.
+
+- **[Containers](collectors/cgroups.plugin/)** - collects resource usage for all kinds of containers, using CGROUPS (systemd-nspawn, lxc, lxd, docker, kubernetes, etc).
+- **[libvirt VMs](collectors/cgroups.plugin/)** - collects resource usage for all kinds of VMs, using CGROUPS.
+- **[dockerd](collectors/python.d.plugin/dockerd/)** - collects docker health metrics.
#### Web Servers
-- **[Apache and lighttpd](collectors/python.d.plugin/apache/)** - `mod-status` (v2.2, v2.4) and cache log statistics, for multiple servers.
-- **[IPFS](collectors/python.d.plugin/ipfs/)** - bandwidth, peers.
-- **[LiteSpeed](collectors/python.d.plugin/litespeed/)** - reads the litespeed rtreport files to collect metrics.
-- **[Nginx](collectors/python.d.plugin/nginx/)** - `stub-status`, for multiple servers.
-- **[Nginx+](collectors/python.d.plugin/nginx_plus/)** - connects to multiple nginx_plus servers (local or remote) to collect real-time performance metrics.
-- **[PHP-FPM](collectors/python.d.plugin/phpfpm/)** - multiple instances, each reporting connections, requests, performance, etc.
-- **[Tomcat](collectors/python.d.plugin/tomcat/)** - accesses, threads, free memory, volume, etc.
-- **[web server `access.log` files](collectors/python.d.plugin/web_log/)** - extracting in real-time, web server and proxy performance metrics and applying several health checks, etc.
-- **[HTTP check](collectors/python.d.plugin/httpcheck/)** - checks one or more web servers for HTTP status code and returned content.
+
+- **[Apache and lighttpd](collectors/python.d.plugin/apache/)** - `mod-status` (v2.2, v2.4) and cache log statistics, for multiple servers.
+- **[IPFS](collectors/python.d.plugin/ipfs/)** - bandwidth, peers.
+- **[LiteSpeed](collectors/python.d.plugin/litespeed/)** - reads the litespeed rtreport files to collect metrics.
+- **[Nginx](collectors/python.d.plugin/nginx/)** - `stub-status`, for multiple servers.
+- **[Nginx+](collectors/python.d.plugin/nginx_plus/)** - connects to multiple nginx_plus servers (local or remote) to collect real-time performance metrics.
+- **[PHP-FPM](collectors/python.d.plugin/phpfpm/)** - multiple instances, each reporting connections, requests, performance, etc.
+- **[Tomcat](collectors/python.d.plugin/tomcat/)** - accesses, threads, free memory, volume, etc.
+- **[web server `access.log` files](collectors/python.d.plugin/web_log/)** - extracting in real-time, web server and proxy performance metrics and applying several health checks, etc.
+- **[HTTP check](collectors/python.d.plugin/httpcheck/)** - checks one or more web servers for HTTP status code and returned content.
#### Proxies, Balancers, Accelerators
-- **[HAproxy](collectors/python.d.plugin/haproxy/)** - bandwidth, sessions, backends, etc.
-- **[Squid](collectors/python.d.plugin/squid/)** - multiple servers, each showing: clients bandwidth and requests, servers bandwidth and requests.
-- **[Traefik](collectors/python.d.plugin/traefik/)** - connects to multiple traefik instances (local or remote) to collect API metrics (response status code, response time, average response time and server uptime).
-- **[Varnish](collectors/python.d.plugin/varnish/)** - threads, sessions, hits, objects, backends, etc.
-- **[IPVS](collectors/proc.plugin/)** - collects metrics from the Linux IPVS load balancer.
+
+- **[HAproxy](collectors/python.d.plugin/haproxy/)** - bandwidth, sessions, backends, etc.
+- **[Squid](collectors/python.d.plugin/squid/)** - multiple servers, each showing: clients bandwidth and requests, servers bandwidth and requests.
+- **[Traefik](collectors/python.d.plugin/traefik/)** - connects to multiple traefik instances (local or remote) to collect API metrics (response status code, response time, average response time and server uptime).
+- **[Varnish](collectors/python.d.plugin/varnish/)** - threads, sessions, hits, objects, backends, etc.
+- **[IPVS](collectors/proc.plugin/)** - collects metrics from the Linux IPVS load balancer.
#### Database Servers
-- **[CouchDB](collectors/python.d.plugin/couchdb/)** - reads/writes, request methods, status codes, tasks, replication, per-db, etc.
-- **[MemCached](collectors/python.d.plugin/memcached/)** - multiple servers, each showing: bandwidth, connections, items, etc.
-- **[MongoDB](collectors/python.d.plugin/mongodb/)** - operations, clients, transactions, cursors, connections, asserts, locks, etc.
-- **[MySQL and mariadb](collectors/python.d.plugin/mysql/)** - multiple servers, each showing: bandwidth, queries/s, handlers, locks, issues, tmp operations, connections, binlog metrics, threads, innodb metrics, and more.
-- **[PostgreSQL](collectors/python.d.plugin/postgres/)** - multiple servers, each showing: per database statistics (connections, tuples read - written - returned, transactions, locks), backend processes, indexes, tables, write ahead, background writer and more.
-- **[Proxy SQL](collectors/python.d.plugin/proxysql/)** - collects Proxy SQL backend and frontend performance metrics.
-- **[Redis](collectors/python.d.plugin/redis/)** - multiple servers, each showing: operations, hit rate, memory, keys, clients, slaves.
-- **[RethinkDB](collectors/python.d.plugin/rethinkdbs/)** - connects to multiple rethinkdb servers (local or remote) to collect real-time metrics.
+
+- **[CouchDB](collectors/python.d.plugin/couchdb/)** - reads/writes, request methods, status codes, tasks, replication, per-db, etc.
+- **[MemCached](collectors/python.d.plugin/memcached/)** - multiple servers, each showing: bandwidth, connections, items, etc.
+- **[MongoDB](collectors/python.d.plugin/mongodb/)** - operations, clients, transactions, cursors, connections, asserts, locks, etc.
+- **[MySQL and mariadb](collectors/python.d.plugin/mysql/)** - multiple servers, each showing: bandwidth, queries/s, handlers, locks, issues, tmp operations, connections, binlog metrics, threads, innodb metrics, and more.
+- **[PostgreSQL](collectors/python.d.plugin/postgres/)** - multiple servers, each showing: per database statistics (connections, tuples read - written - returned, transactions, locks), backend processes, indexes, tables, write ahead, background writer and more.
+- **[Proxy SQL](collectors/python.d.plugin/proxysql/)** - collects Proxy SQL backend and frontend performance metrics.
+- **[Redis](collectors/python.d.plugin/redis/)** - multiple servers, each showing: operations, hit rate, memory, keys, clients, slaves.
+- **[RethinkDB](collectors/python.d.plugin/rethinkdbs/)** - connects to multiple rethinkdb servers (local or remote) to collect real-time metrics.
#### Message Brokers
-- **[beanstalkd](collectors/python.d.plugin/beanstalk/)** - global and per tube monitoring.
-- **[RabbitMQ](collectors/python.d.plugin/rabbitmq/)** - performance and health metrics.
+
+- **[beanstalkd](collectors/python.d.plugin/beanstalk/)** - global and per tube monitoring.
+- **[RabbitMQ](collectors/python.d.plugin/rabbitmq/)** - performance and health metrics.
#### Search and Indexing
-- **[ElasticSearch](collectors/python.d.plugin/elasticsearch/)** - search and index performance, latency, timings, cluster statistics, threads statistics, etc.
+
+- **[ElasticSearch](collectors/python.d.plugin/elasticsearch/)** - search and index performance, latency, timings, cluster statistics, threads statistics, etc.
#### DNS Servers
-- **[bind_rndc](collectors/python.d.plugin/bind_rndc/)** - parses `named.stats` dump file to collect real-time performance metrics. All versions of bind after 9.6 are supported.
-- **[dnsdist](collectors/python.d.plugin/dnsdist/)** - performance and health metrics.
-- **[ISC Bind (named)](collectors/node.d.plugin/named/)** - multiple servers, each showing: clients, requests, queries, updates, failures and several per view metrics. All versions of bind after 9.9.10 are supported.
-- **[NSD](collectors/python.d.plugin/nsd/)** - queries, zones, protocols, query types, transfers, etc.
-- **[PowerDNS](collectors/python.d.plugin/powerdns/)** - queries, answers, cache, latency, etc.
-- **[unbound](collectors/python.d.plugin/unbound/)** - performance and resource usage metrics.
-- **[dns_query_time](collectors/python.d.plugin/dns_query_time/)** - DNS query time statistics.
+
+- **[bind_rndc](collectors/python.d.plugin/bind_rndc/)** - parses `named.stats` dump file to collect real-time performance metrics. All versions of bind after 9.6 are supported.
+- **[dnsdist](collectors/python.d.plugin/dnsdist/)** - performance and health metrics.
+- **[ISC Bind (named)](collectors/node.d.plugin/named/)** - multiple servers, each showing: clients, requests, queries, updates, failures and several per view metrics. All versions of bind after 9.9.10 are supported.
+- **[NSD](collectors/python.d.plugin/nsd/)** - queries, zones, protocols, query types, transfers, etc.
+- **[PowerDNS](collectors/python.d.plugin/powerdns/)** - queries, answers, cache, latency, etc.
+- **[unbound](collectors/python.d.plugin/unbound/)** - performance and resource usage metrics.
+- **[dns_query_time](collectors/python.d.plugin/dns_query_time/)** - DNS query time statistics.
#### Time Servers
-- **[chrony](collectors/python.d.plugin/chrony/)** - uses the `chronyc` command to collect chrony statistics (Frequency, Last offset, RMS offset, Residual freq, Root delay, Root dispersion, Skew, System time).
-- **[ntpd](collectors/python.d.plugin/ntpd/)** - connects to multiple ntpd servers (local or remote) to provide statistics of system variables and optional also peer variables.
+
+- **[chrony](collectors/python.d.plugin/chrony/)** - uses the `chronyc` command to collect chrony statistics (Frequency, Last offset, RMS offset, Residual freq, Root delay, Root dispersion, Skew, System time).
+- **[ntpd](collectors/python.d.plugin/ntpd/)** - connects to multiple ntpd servers (local or remote) to provide statistics of system variables and optional also peer variables.
#### Mail Servers
-- **[Dovecot](collectors/python.d.plugin/dovecot/)** - POP3/IMAP servers.
-- **[Exim](collectors/python.d.plugin/exim/)** - message queue (emails queued).
-- **[Postfix](collectors/python.d.plugin/postfix/)** - message queue (entries, size).
+
+- **[Dovecot](collectors/python.d.plugin/dovecot/)** - POP3/IMAP servers.
+- **[Exim](collectors/python.d.plugin/exim/)** - message queue (emails queued).
+- **[Postfix](collectors/python.d.plugin/postfix/)** - message queue (entries, size).
#### Hardware Sensors
-- **[IPMI](collectors/freeipmi.plugin/)** - enterprise hardware sensors and events.
-- **[lm-sensors](collectors/python.d.plugin/sensors/)** - temperature, voltage, fans, power, humidity, etc.
-- **[Nvidia](collectors/python.d.plugin/nvidia_smi/)** - collects information for Nvidia GPUs.
-- **[RPi](collectors/charts.d.plugin/sensors/)** - Raspberry Pi temperature sensors.
-- **[w1sensor](collectors/python.d.plugin/w1sensor/)** - collects data from connected 1-Wire sensors.
+
+- **[IPMI](collectors/freeipmi.plugin/)** - enterprise hardware sensors and events.
+- **[lm-sensors](collectors/python.d.plugin/sensors/)** - temperature, voltage, fans, power, humidity, etc.
+- **[Nvidia](collectors/python.d.plugin/nvidia_smi/)** - collects information for Nvidia GPUs.
+- **[RPi](collectors/charts.d.plugin/sensors/)** - Raspberry Pi temperature sensors.
+- **[w1sensor](collectors/python.d.plugin/w1sensor/)** - collects data from connected 1-Wire sensors.
#### UPSes
-- **[apcupsd](collectors/charts.d.plugin/apcupsd/)** - load, charge, battery voltage, temperature, utility metrics, output metrics
-- **[NUT](collectors/charts.d.plugin/nut/)** - load, charge, battery voltage, temperature, utility metrics, output metrics
-- **[Linux Power Supply](collectors/proc.plugin/)** - collects metrics reported by power supply drivers on Linux.
+
+- **[apcupsd](collectors/charts.d.plugin/apcupsd/)** - load, charge, battery voltage, temperature, utility metrics, output metrics.
+- **[NUT](collectors/charts.d.plugin/nut/)** - load, charge, battery voltage, temperature, utility metrics, output metrics.
+- **[Linux Power Supply](collectors/proc.plugin/)** - collects metrics reported by power supply drivers on Linux.
#### Social Sharing Servers
-- **[RetroShare](collectors/python.d.plugin/retroshare/)** - connects to multiple retroshare servers (local or remote) to collect real-time performance metrics.
+
+- **[RetroShare](collectors/python.d.plugin/retroshare/)** - connects to multiple retroshare servers (local or remote) to collect real-time performance metrics.
#### Security
-- **[Fail2Ban](collectors/python.d.plugin/fail2ban/)** - monitors the fail2ban log file to check all bans for all active jails.
+
+- **[Fail2Ban](collectors/python.d.plugin/fail2ban/)** - monitors the fail2ban log file to check all bans for all active jails.
#### Authentication, Authorization, Accounting (AAA, RADIUS, LDAP) Servers
-- **[FreeRadius](collectors/python.d.plugin/freeradius/)** - uses the `radclient` command to provide freeradius statistics (authentication, accounting, proxy-authentication, proxy-accounting).
+
+- **[FreeRadius](collectors/python.d.plugin/freeradius/)** - uses the `radclient` command to provide freeradius statistics (authentication, accounting, proxy-authentication, proxy-accounting).
#### Telephony Servers
-- **[opensips](collectors/charts.d.plugin/opensips/)** - connects to an opensips server (localhost only) to collect real-time performance metrics.
+
+- **[opensips](collectors/charts.d.plugin/opensips/)** - connects to an opensips server (localhost only) to collect real-time performance metrics.
#### Household Appliances
-- **[SMA webbox](collectors/node.d.plugin/sma_webbox/)** - connects to multiple remote SMA webboxes to collect real-time performance metrics of the photovoltaic (solar) power generation.
-- **[Fronius](collectors/node.d.plugin/fronius/)** - connects to multiple remote Fronius Symo servers to collect real-time performance metrics of the photovoltaic (solar) power generation.
-- **[StiebelEltron](collectors/node.d.plugin/stiebeleltron/)** - collects the temperatures and other metrics from your Stiebel Eltron heating system using their Internet Service Gateway (ISG web).
+
+- **[SMA webbox](collectors/node.d.plugin/sma_webbox/)** - connects to multiple remote SMA webboxes to collect real-time performance metrics of the photovoltaic (solar) power generation.
+- **[Fronius](collectors/node.d.plugin/fronius/)** - connects to multiple remote Fronius Symo servers to collect real-time performance metrics of the photovoltaic (solar) power generation.
+- **[StiebelEltron](collectors/node.d.plugin/stiebeleltron/)** - collects the temperatures and other metrics from your Stiebel Eltron heating system using their Internet Service Gateway (ISG web).
#### Game Servers
-- **[SpigotMC](collectors/python.d.plugin/spigotmc/)** - monitors Spigot Minecraft server ticks per second and number of online players using the Minecraft remote console.
+
+- **[SpigotMC](collectors/python.d.plugin/spigotmc/)** - monitors Spigot Minecraft server ticks per second and number of online players using the Minecraft remote console.
#### Distributed Computing
-- **[BOINC](collectors/python.d.plugin/boinc/)** - monitors task states for local and remote BOINC client software using the remote GUI RPC interface. Also provides alarms for a handful of error conditions.
+
+- **[BOINC](collectors/python.d.plugin/boinc/)** - monitors task states for local and remote BOINC client software using the remote GUI RPC interface. Also provides alarms for a handful of error conditions.
#### Media Streaming Servers
-- **[IceCast](collectors/python.d.plugin/icecast/)** - collects the number of listeners for active sources.
+
+- **[IceCast](collectors/python.d.plugin/icecast/)** - collects the number of listeners for active sources.
### Monitoring Systems
-- **[Monit](collectors/python.d.plugin/monit/)** - collects metrics about monit targets (filesystems, applications, networks).
+
+- **[Monit](collectors/python.d.plugin/monit/)** - collects metrics about monit targets (filesystems, applications, networks).
#### Provisioning Systems
-- **[Puppet](collectors/python.d.plugin/puppet/)** - connects to multiple Puppet Server and Puppet DB instances (local or remote) to collect real-time status metrics.
+
+- **[Puppet](collectors/python.d.plugin/puppet/)** - connects to multiple Puppet Server and Puppet DB instances (local or remote) to collect real-time status metrics.
You can easily extend Netdata, by writing plugins that collect data from any source, using any computer language.
@@ -563,23 +596,23 @@ You can easily extend Netdata, by writing plugins that collect data from any sou
## Documentation
-The Netdata documentation is at [https://docs.netdata.cloud](https://docs.netdata.cloud). But you can also find it inside the repo, so by just navigating the repo on github you can find all the documentation.
+The Netdata documentation is at <https://docs.netdata.cloud>. But you can also find it inside the repo, so by just navigating the repo on github you can find all the documentation.
Here is a quick list:
-Directory|Description
-:---|:---
-[`installer`](packaging/installer/)|Instructions to install Netdata on your systems.
-[`docker`](packaging/docker/)|Instructions to install Netdata using docker.
-[`daemon`](daemon/)|Information about the Netdata daemon and its configuration.
-[`collectors`](collectors/)|Information about data collection plugins.
-[`health`](health/)|How Netdata's health monitoring works, how to create your own alarms and how to configure alarm notification methods.
-[`streaming`](streaming/)|How to build hierarchies of Netdata servers, by streaming metrics between them.
-[`backends`](backends/)|Long term archiving of metrics to industry standard time-series databases, like `prometheus`, `graphite`, `opentsdb`.
-[`web/api`](web/api/)|Learn how to query the Netdata API and the queries it supports.
-[`web/api/badges`](web/api/badges/)|Learn how to generate badges (SVG images) from live data.
-[`web/gui/custom`](web/gui/custom/)|Learn how to create custom Netdata dashboards.
-[`web/gui/confluence`](web/gui/confluence/)|Learn how to create Netdata dashboards on Atlassian's Confluence.
+|Directory|Description|
+|:--------|:----------|
+|[`installer`](packaging/installer/)|Instructions to install Netdata on your systems.|
+|[`docker`](packaging/docker/)|Instructions to install Netdata using docker.|
+|[`daemon`](daemon/)|Information about the Netdata daemon and its configuration.|
+|[`collectors`](collectors/)|Information about data collection plugins.|
+|[`health`](health/)|How Netdata's health monitoring works, how to create your own alarms and how to configure alarm notification methods.|
+|[`streaming`](streaming/)|How to build hierarchies of Netdata servers, by streaming metrics between them.|
+|[`backends`](backends/)|Long term archiving of metrics to industry standard time-series databases, like `prometheus`, `graphite`, `opentsdb`.|
+|[`web/api`](web/api/)|Learn how to query the Netdata API and the queries it supports.|
+|[`web/api/badges`](web/api/badges/)|Learn how to generate badges (SVG images) from live data.|
+|[`web/gui/custom`](web/gui/custom/)|Learn how to create custom Netdata dashboards.|
+|[`web/gui/confluence`](web/gui/confluence/)|Learn how to create Netdata dashboards on Atlassian's Confluence.|
You can also check all the other directories. Most of them have plenty of documentation.
@@ -591,11 +624,11 @@ To report bugs, or get help, use [GitHub Issues](https://github.com/netdata/netd
You can also find Netdata on:
-- [Facebook](https://www.facebook.com/linuxnetdata/)
-- [Twitter](https://twitter.com/linuxnetdata)
-- [OpenHub](https://www.openhub.net/p/netdata)
-- [Repology](https://repology.org/metapackage/netdata/versions)
-- [StackShare](https://stackshare.io/netdata)
+- [Facebook](https://www.facebook.com/linuxnetdata/)
+- [Twitter](https://twitter.com/linuxnetdata)
+- [OpenHub](https://www.openhub.net/p/netdata)
+- [Repology](https://repology.org/metapackage/netdata/versions)
+- [StackShare](https://stackshare.io/netdata)
## License
@@ -607,7 +640,7 @@ Netdata re-distributes other open-source tools and libraries. Please check the [
Yes.
-*When people first hear about a new product, they frequently ask if it is any good. A Hacker News user [remarked](https://news.ycombinator.com/item?id=3067434):*
+_When people first hear about a new product, they frequently ask if it is any good. A Hacker News user [remarked](https://news.ycombinator.com/item?id=3067434):_
> Note to self: Starting immediately, all raganwald projects will have a “Is it any good?” section in the readme, and the answer shall be “yes.".
diff --git a/REDISTRIBUTED.md b/REDISTRIBUTED.md
index b0fac2e7..24c6d6c6 100644
--- a/REDISTRIBUTED.md
+++ b/REDISTRIBUTED.md
@@ -1,203 +1,174 @@
# Redistributed software
-netdata copyright info:
+Netdata copyright info:
Copyright 2016-2018, Costa Tsaousis.
Copyright 2018, Netdata Inc.
Released under [GPL v3 or later](LICENSE).
-netdata uses SPDX license tags to identify the license for its files.
+Netdata uses SPDX license tags to identify the license for its files.
Individual licenses referenced in the tags are available on the [SPDX project site](http://spdx.org/licenses/).
-netdata redistributes the following third-party software.
+Netdata redistributes the following third-party software.
We have decided to redistribute all these, instead of using them
-through a CDN, to allow netdata to work in cases where Internet
+through a CDN, to allow Netdata to work in cases where Internet
connectivity is not available.
-- [Dygraphs](http://dygraphs.com/)
+- [Dygraphs](http://dygraphs.com/)
- Copyright 2009, Dan Vanderkam
- [MIT License](http://dygraphs.com/legal.html)
+ Copyright 2009, Dan Vanderkam
+ [MIT License](http://dygraphs.com/legal.html)
+- [Easy Pie Chart](https://rendro.github.io/easy-pie-chart/)
-- [Easy Pie Chart](https://rendro.github.io/easy-pie-chart/)
+ Copyright 2013, Robert Fleischmann
+ [MIT License](https://github.com/rendro/easy-pie-chart/blob/master/LICENSE)
- Copyright 2013, Robert Fleischmann
- [MIT License](https://github.com/rendro/easy-pie-chart/blob/master/LICENSE)
+- [Gauge.js](http://bernii.github.io/gauge.js/)
+ Copyright, Bernard Kobos
+ [MIT License](https://github.com/getgauge/gauge-js/blob/master/LICENSE)
-- [Gauge.js](http://bernii.github.io/gauge.js/)
+- [d3pie](https://github.com/benkeen/d3pie)
- Copyright, Bernard Kobos
- [MIT License](https://github.com/getgauge/gauge-js/blob/master/LICENSE)
+ Copyright (c) 2014-2015 Benjamin Keen
+ [MIT License](https://github.com/benkeen/d3pie/blob/master/LICENSE)
+- [jQuery Sparklines](http://omnipotent.net/jquery.sparkline/)
-- [d3pie](https://github.com/benkeen/d3pie)
+ Copyright 2009-2012, Splunk Inc.
+ [New BSD License](http://opensource.org/licenses/BSD-3-Clause)
- Copyright (c) 2014-2015 Benjamin Keen
- [MIT License](https://github.com/benkeen/d3pie/blob/master/LICENSE)
+- [Peity](http://benpickles.github.io/peity/)
+ Copyright 2009-2015, Ben Pickles
+ [MIT License](https://github.com/benpickles/peity/blob/master/LICENCE)
-- [jQuery Sparklines](http://omnipotent.net/jquery.sparkline/)
+- [morris.js](http://morrisjs.github.io/morris.js/)
- Copyright 2009-2012, Splunk Inc.
- [New BSD License](http://opensource.org/licenses/BSD-3-Clause)
+ Copyright 2013, Olly Smith
+ [Simplified BSD License](http://morrisjs.github.io/morris.js/)
+- [Raphaël](http://dmitrybaranovskiy.github.io/raphael/)
-- [Peity](http://benpickles.github.io/peity/)
+ Copyright 2008, Dmitry Baranovskiy
+ [MIT License](http://dmitrybaranovskiy.github.io/raphael/license.html)
- Copyright 2009-2015, Ben Pickles
- [MIT License](https://github.com/benpickles/peity/blob/master/LICENCE)
+- [C3](http://c3js.org/)
+ Copyright 2013, Masayuki Tanaka
+ [MIT License](https://github.com/masayuki0812/c3/blob/master/LICENSE)
-- [morris.js](http://morrisjs.github.io/morris.js/)
+- [D3](http://d3js.org/)
- Copyright 2013, Olly Smith
- [Simplified BSD License](http://morrisjs.github.io/morris.js/)
+ Copyright 2015, Mike Bostock
+ [BSD License](http://opensource.org/licenses/BSD-3-Clause)
+- [jQuery](https://jquery.org/)
-- [Raphaël](http://dmitrybaranovskiy.github.io/raphael/)
+ Copyright 2015, jQuery Foundation
+ [MIT License](https://jquery.org/license/)
- Copyright 2008, Dmitry Baranovskiy
- [MIT License](http://dmitrybaranovskiy.github.io/raphael/license.html)
+- [Bootstrap](http://getbootstrap.com/getting-started/)
+ Copyright 2015, Twitter
+ [MIT License](https://github.com/twbs/bootstrap/blob/v4-dev/LICENSE)
-- [C3](http://c3js.org/)
+- [Bootstrap Toggle](http://www.bootstraptoggle.com/)
- Copyright 2013, Masayuki Tanaka
- [MIT License](https://github.com/masayuki0812/c3/blob/master/LICENSE)
+ Copyright (c) 2011-2014 Min Hur, The New York Times Company
+ [MIT License](https://github.com/minhur/bootstrap-toggle/blob/master/LICENSE)
+- [Bootstrap-slider](http://seiyria.com/bootstrap-slider/)
-- [D3](http://d3js.org/)
+ Copyright 2017 Kyle Kemp, Rohit Kalkur, and contributors
+ [MIT License](https://github.com/seiyria/bootstrap-slider/blob/master/LICENSE.md)
- Copyright 2015, Mike Bostock
- [BSD License](http://opensource.org/licenses/BSD-3-Clause)
+- [bootstrap-table](http://bootstrap-table.wenzhixin.net.cn/)
+ Copyright (c) 2012-2016 Zhixin Wen [wenzhixin2010@gmail.com](mailto:wenzhixin2010@gmail.com)
+ [MIT License](https://github.com/wenzhixin/bootstrap-table/blob/master/LICENSE)
-- [jQuery](https://jquery.org/)
+- [tableExport.jquery.plugin](https://github.com/hhurz/tableExport.jquery.plugin)
- Copyright 2015, jQuery Foundation
- [MIT License](https://jquery.org/license/)
+ Copyright (c) 2015,2016 hhurz
+ [MIT License](https://github.com/hhurz/tableExport.jquery.plugin/blob/master/LICENSE)
+- [perfect-scrollbar](https://jamesflorentino.github.io/nanoScrollerJS/)
-- [Bootstrap](http://getbootstrap.com/getting-started/)
+ Copyright 2016, Hyunje Alex Jun and other contributors
+ [MIT License](https://github.com/noraesae/perfect-scrollbar/blob/master/LICENSE)
- Copyright 2015, Twitter
- [MIT License](https://github.com/twbs/bootstrap/blob/v4-dev/LICENSE)
+- [FontAwesome](https://fortawesome.github.io/Font-Awesome/)
+ Created by Dave Gandy
+ Font license: [SIL OFL 1.1](http://scripts.sil.org/OFL)
+ Icon license [Creative Commons Attribution 4.0 (CC-BY 4.0)](https://creativecommons.org/licenses/by/4.0/)
+ Code license: [MIT License](http://opensource.org/licenses/mit-license.html)
-- [Bootstrap Toggle](http://www.bootstraptoggle.com/)
+- [node-extend](https://github.com/justmoon/node-extend)
- Copyright (c) 2011-2014 Min Hur, The New York Times Company
- [MIT License](https://github.com/minhur/bootstrap-toggle/blob/master/LICENSE)
+ Copyright 2014, Stefan Thomas
+ [MIT License](https://github.com/justmoon/node-extend/blob/master/LICENSE)
+- [node-net-snmp](https://github.com/stephenwvickers/node-net-snmp)
-- [Bootstrap-slider](http://seiyria.com/bootstrap-slider/)
+ Copyright 2013, Stephen Vickers
+ [MIT License](https://github.com/nospaceships/node-net-snmp#license)
- Copyright 2017 Kyle Kemp, Rohit Kalkur, and contributors
- [MIT License](https://github.com/seiyria/bootstrap-slider/blob/master/LICENSE.md)
+- [node-asn1-ber](https://github.com/stephenwvickers/node-asn1-ber)
+ Copyright 2017, Stephen Vickers
+ Copyright 2011, Mark Cavage
+ [MIT License](https://github.com/nospaceships/node-asn1-ber#license)
-- [bootstrap-table](http://bootstrap-table.wenzhixin.net.cn/)
+- [pixl-xml](https://github.com/jhuckaby/pixl-xml)
- Copyright (c) 2012-2016 Zhixin Wen <wenzhixin2010@gmail.com>
- [MIT License](https://github.com/wenzhixin/bootstrap-table/blob/master/LICENSE)
+ Copyright 2015, Joseph Huckaby
+ [MIT License](https://github.com/jhuckaby/pixl-xml#license)
+- [sensors](https://github.com/paroj/sensors.py)
-- [tableExport.jquery.plugin](https://github.com/hhurz/tableExport.jquery.plugin)
+ Copyright 2014, Pavel Rojtberg
+ [LGPL 2.1 License](http://opensource.org/licenses/LGPL-2.1)
- Copyright (c) 2015,2016 hhurz
- [MIT License](https://github.com/hhurz/tableExport.jquery.plugin/blob/master/LICENSE)
+- [PyYAML](https://bitbucket.org/blackjack/pysensors)
+ Copyright 2006, Kirill Simonov
+ [MIT License](https://github.com/yaml/pyyaml/blob/master/LICENSE)
-- [perfect-scrollbar](https://jamesflorentino.github.io/nanoScrollerJS/)
+- [urllib3](https://github.com/shazow/urllib3)
- Copyright 2016, Hyunje Alex Jun and other contributors
- [MIT License](https://github.com/noraesae/perfect-scrollbar/blob/master/LICENSE)
+ Copyright 2008-2016 Andrey Petrov and [contributors](https://github.com/shazow/urllib3/blob/master/CONTRIBUTORS.txt)
+ [MIT License](https://github.com/shazow/urllib3/blob/master/LICENSE.txt)
+- [lz-string](http://pieroxy.net/blog/pages/lz-string/index.html)
-- [FontAwesome](https://fortawesome.github.io/Font-Awesome/)
+ Copyright 2013 Pieroxy
+ [WTFPL License](http://pieroxy.net/blog/pages/lz-string/index.html#inline_menu_10)
- Created by Dave Gandy
- Font license: [SIL OFL 1.1](http://scripts.sil.org/OFL)
- Icon license [Creative Commons Attribution 4.0 (CC-BY 4.0)](https://creativecommons.org/licenses/by/4.0/)
- Code license: [MIT License](http://opensource.org/licenses/mit-license.html)
+- [pako](http://nodeca.github.io/pako/)
+ Copyright 2014-2017 Vitaly Puzrin and Andrei Tuputcyn
+ [MIT License](https://github.com/nodeca/pako/blob/master/LICENSE)
-- [node-extend](https://github.com/justmoon/node-extend)
+- [clipboard-polyfill](https://github.com/lgarron/clipboard-polyfill)
- Copyright 2014, Stefan Thomas
- [MIT License](https://github.com/justmoon/node-extend/blob/master/LICENSE)
+ Copyright (c) 2014 Lucas Garron
+ [MIT License](https://github.com/lgarron/clipboard-polyfill/blob/master/LICENSE.md)
+- [Utilities for writing code that runs on Python 2 and 3](collectors/python.d.plugin/python_modules/urllib3/packages/six.py)
-- [node-net-snmp](https://github.com/stephenwvickers/node-net-snmp)
+ Copyright (c) 2010-2015 Benjamin Peterson
+ [MIT License](https://github.com/benjaminp/six/blob/master/LICENSE)
- Copyright 2013, Stephen Vickers
- [MIT License](https://github.com/nospaceships/node-net-snmp#license)
+- [mcrcon](https://github.com/barneygale/MCRcon)
+ Copyright (C) 2015 Barnaby Gale
+ [MIT License](https://raw.githubusercontent.com/barneygale/MCRcon/master/COPYING.txt)
-- [node-asn1-ber](https://github.com/stephenwvickers/node-asn1-ber)
+- [monotonic](https://github.com/atdt/monotonic)
- Copyright 2017, Stephen Vickers
- Copyright 2011, Mark Cavage
- [MIT License](https://github.com/nospaceships/node-asn1-ber#license)
+ Copyright 2014, 2015, 2016 Ori Livneh [ori@wikimedia.org](mailto:ori@wikimedia.org)
+ [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0)
-
-- [pixl-xml](https://github.com/jhuckaby/pixl-xml)
-
- Copyright 2015, Joseph Huckaby
- [MIT License](https://github.com/jhuckaby/pixl-xml#license)
-
-
-- [sensors](https://github.com/paroj/sensors.py)
-
- Copyright 2014, Pavel Rojtberg
- [LGPL 2.1 License](http://opensource.org/licenses/LGPL-2.1)
-
-
-- [PyYAML](https://bitbucket.org/blackjack/pysensors)
-
- Copyright 2006, Kirill Simonov
- [MIT License](https://github.com/yaml/pyyaml/blob/master/LICENSE)
-
-
-- [urllib3](https://github.com/shazow/urllib3)
-
- Copyright 2008-2016 Andrey Petrov and [contributors](https://github.com/shazow/urllib3/blob/master/CONTRIBUTORS.txt)
- [MIT License](https://github.com/shazow/urllib3/blob/master/LICENSE.txt)
-
-
-- [lz-string](http://pieroxy.net/blog/pages/lz-string/index.html)
-
- Copyright 2013 Pieroxy
- [WTFPL License](http://pieroxy.net/blog/pages/lz-string/index.html#inline_menu_10)
-
-
-- [pako](http://nodeca.github.io/pako/)
-
- Copyright 2014-2017 Vitaly Puzrin and Andrei Tuputcyn
- [MIT License](https://github.com/nodeca/pako/blob/master/LICENSE)
-
-
-- [clipboard-polyfill](https://github.com/lgarron/clipboard-polyfill)
-
- Copyright (c) 2014 Lucas Garron
- [MIT License](https://github.com/lgarron/clipboard-polyfill/blob/master/LICENSE.md)
-
-
-- [Utilities for writing code that runs on Python 2 and 3](collectors/python.d.plugin/python_modules/urllib3/packages/six.py)
-
- Copyright (c) 2010-2015 Benjamin Peterson
- [MIT License](https://github.com/benjaminp/six/blob/master/LICENSE)
-
-
-- [mcrcon](https://github.com/barneygale/MCRcon)
-
- Copyright (C) 2015 Barnaby Gale
- [MIT License](https://raw.githubusercontent.com/barneygale/MCRcon/master/COPYING.txt)
-
-- [monotonic](https://github.com/atdt/monotonic)
-
- Copyright 2014, 2015, 2016 Ori Livneh <ori@wikimedia.org>
- [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0)
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FREDISTRIBUTED&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FREDISTRIBUTED&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/SECURITY.md b/SECURITY.md
deleted file mode 100644
index f0296893..00000000
--- a/SECURITY.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# Security Policy
-
-## Supported Versions
-
-| Version | Supported |
-| ------- | ------------------ |
-| Latest | Yes |
-
-## Reporting a Vulnerability
-
-We’re extremely grateful for security researchers and users that report vulnerabilities to Netdata Open Source Community. All reports are thoroughly investigated by a set of community volunteers.
-
-To make a report, please create a post [here](https://groups.google.com/a/netdata.cloud/forum/#!newtopic/security) with the vulnerability details and the details expected for [all Netdata bug reports](.github/ISSUE_TEMPLATE/bug_report.md).
-
-### When Should I Report a Vulnerability?
-
-- You think you discovered a potential security vulnerability in Netdata
-- You are unsure how a vulnerability affects Netdata
-- You think you discovered a vulnerability in another project that Netdata depends on (e.g. python, node, etc)
-
-### When Should I NOT Report a Vulnerability?
-
-- You need help tuning Netdata for security
-- You need help applying security related updates
-- Your issue is not security related
-
-### Security Vulnerability Response
-
-Each report is acknowledged and analyzed by Netdata Team members within 3 working days. This will set off a Security Release Process.
-
-Any vulnerability information shared with Netdata Team stays within Netdata project and will not be disseminated to other projects unless it is necessary to get the issue fixed.
-
-As the security issue moves from triage, to identified fix, to release planning we will keep the reporter updated.
-
-### Public Disclosure Timing
-
-A public disclosure date is negotiated by the Netdata team and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to a few weeks. As a basic default, we expect report date to disclosure date to be on the order of 7 days. The Netdata team holds the final say when setting a disclosure date.
-
-### Security Announcements
-
-Every time a security issue is fixed in Netdata, we immediately release a new version of it. So, to get notified of all security incidents, please subscribe to our releases on github.
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FSECURITY&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/aclocal.m4 b/aclocal.m4
new file mode 100644
index 00000000..dbfec278
--- /dev/null
+++ b/aclocal.m4
@@ -0,0 +1,1474 @@
+# generated automatically by aclocal 1.15.1 -*- Autoconf -*-
+
+# Copyright (C) 1996-2017 Free Software Foundation, Inc.
+
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])])
+m4_ifndef([AC_AUTOCONF_VERSION],
+ [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],,
+[m4_warning([this file was generated for autoconf 2.69.
+You have another version of autoconf. It may work, but is not guaranteed to.
+If you have problems, you may need to regenerate the build system entirely.
+To do so, use the procedure documented by the package, typically 'autoreconf'.])])
+
+dnl pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*-
+dnl serial 11 (pkg-config-0.29.1)
+dnl
+dnl Copyright © 2004 Scott James Remnant <scott@netsplit.com>.
+dnl Copyright © 2012-2015 Dan Nicholson <dbn.lists@gmail.com>
+dnl
+dnl This program is free software; you can redistribute it and/or modify
+dnl it under the terms of the GNU General Public License as published by
+dnl the Free Software Foundation; either version 2 of the License, or
+dnl (at your option) any later version.
+dnl
+dnl This program is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of
+dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+dnl General Public License for more details.
+dnl
+dnl You should have received a copy of the GNU General Public License
+dnl along with this program; if not, write to the Free Software
+dnl Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+dnl 02111-1307, USA.
+dnl
+dnl As a special exception to the GNU General Public License, if you
+dnl distribute this file as part of a program that contains a
+dnl configuration script generated by Autoconf, you may include it under
+dnl the same distribution terms that you use for the rest of that
+dnl program.
+
+dnl PKG_PREREQ(MIN-VERSION)
+dnl -----------------------
+dnl Since: 0.29
+dnl
+dnl Verify that the version of the pkg-config macros are at least
+dnl MIN-VERSION. Unlike PKG_PROG_PKG_CONFIG, which checks the user's
+dnl installed version of pkg-config, this checks the developer's version
+dnl of pkg.m4 when generating configure.
+dnl
+dnl To ensure that this macro is defined, also add:
+dnl m4_ifndef([PKG_PREREQ],
+dnl [m4_fatal([must install pkg-config 0.29 or later before running autoconf/autogen])])
+dnl
+dnl See the "Since" comment for each macro you use to see what version
+dnl of the macros you require.
+m4_defun([PKG_PREREQ],
+[m4_define([PKG_MACROS_VERSION], [0.29.1])
+m4_if(m4_version_compare(PKG_MACROS_VERSION, [$1]), -1,
+ [m4_fatal([pkg.m4 version $1 or higher is required but ]PKG_MACROS_VERSION[ found])])
+])dnl PKG_PREREQ
+
+dnl PKG_PROG_PKG_CONFIG([MIN-VERSION])
+dnl ----------------------------------
+dnl Since: 0.16
+dnl
+dnl Search for the pkg-config tool and set the PKG_CONFIG variable to
+dnl first found in the path. Checks that the version of pkg-config found
+dnl is at least MIN-VERSION. If MIN-VERSION is not specified, 0.9.0 is
+dnl used since that's the first version where most current features of
+dnl pkg-config existed.
+AC_DEFUN([PKG_PROG_PKG_CONFIG],
+[m4_pattern_forbid([^_?PKG_[A-Z_]+$])
+m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$])
+m4_pattern_allow([^PKG_CONFIG_(DISABLE_UNINSTALLED|TOP_BUILD_DIR|DEBUG_SPEW)$])
+AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility])
+AC_ARG_VAR([PKG_CONFIG_PATH], [directories to add to pkg-config's search path])
+AC_ARG_VAR([PKG_CONFIG_LIBDIR], [path overriding pkg-config's built-in search path])
+
+if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then
+ AC_PATH_TOOL([PKG_CONFIG], [pkg-config])
+fi
+if test -n "$PKG_CONFIG"; then
+ _pkg_min_version=m4_default([$1], [0.9.0])
+ AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version])
+ if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ PKG_CONFIG=""
+ fi
+fi[]dnl
+])dnl PKG_PROG_PKG_CONFIG
+
+dnl PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
+dnl -------------------------------------------------------------------
+dnl Since: 0.18
+dnl
+dnl Check to see whether a particular set of modules exists. Similar to
+dnl PKG_CHECK_MODULES(), but does not set variables or print errors.
+dnl
+dnl Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG])
+dnl only at the first occurence in configure.ac, so if the first place
+dnl it's called might be skipped (such as if it is within an "if", you
+dnl have to call PKG_CHECK_EXISTS manually
+AC_DEFUN([PKG_CHECK_EXISTS],
+[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
+if test -n "$PKG_CONFIG" && \
+ AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then
+ m4_default([$2], [:])
+m4_ifvaln([$3], [else
+ $3])dnl
+fi])
+
+dnl _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES])
+dnl ---------------------------------------------
+dnl Internal wrapper calling pkg-config via PKG_CONFIG and setting
+dnl pkg_failed based on the result.
+m4_define([_PKG_CONFIG],
+[if test -n "$$1"; then
+ pkg_cv_[]$1="$$1"
+ elif test -n "$PKG_CONFIG"; then
+ PKG_CHECK_EXISTS([$3],
+ [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes ],
+ [pkg_failed=yes])
+ else
+ pkg_failed=untried
+fi[]dnl
+])dnl _PKG_CONFIG
+
+dnl _PKG_SHORT_ERRORS_SUPPORTED
+dnl ---------------------------
+dnl Internal check to see if pkg-config supports short errors.
+AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED],
+[AC_REQUIRE([PKG_PROG_PKG_CONFIG])
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi[]dnl
+])dnl _PKG_SHORT_ERRORS_SUPPORTED
+
+
+dnl PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND],
+dnl [ACTION-IF-NOT-FOUND])
+dnl --------------------------------------------------------------
+dnl Since: 0.4.0
+dnl
+dnl Note that if there is a possibility the first call to
+dnl PKG_CHECK_MODULES might not happen, you should be sure to include an
+dnl explicit call to PKG_PROG_PKG_CONFIG in your configure.ac
+AC_DEFUN([PKG_CHECK_MODULES],
+[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
+AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl
+AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl
+
+pkg_failed=no
+AC_MSG_CHECKING([for $1])
+
+_PKG_CONFIG([$1][_CFLAGS], [cflags], [$2])
+_PKG_CONFIG([$1][_LIBS], [libs], [$2])
+
+m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS
+and $1[]_LIBS to avoid the need to call pkg-config.
+See the pkg-config man page for more details.])
+
+if test $pkg_failed = yes; then
+ AC_MSG_RESULT([no])
+ _PKG_SHORT_ERRORS_SUPPORTED
+ if test $_pkg_short_errors_supported = yes; then
+ $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$2" 2>&1`
+ else
+ $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$2" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD
+
+ m4_default([$4], [AC_MSG_ERROR(
+[Package requirements ($2) were not met:
+
+$$1_PKG_ERRORS
+
+Consider adjusting the PKG_CONFIG_PATH environment variable if you
+installed software in a non-standard prefix.
+
+_PKG_TEXT])[]dnl
+ ])
+elif test $pkg_failed = untried; then
+ AC_MSG_RESULT([no])
+ m4_default([$4], [AC_MSG_FAILURE(
+[The pkg-config script could not be found or is too old. Make sure it
+is in your PATH or set the PKG_CONFIG environment variable to the full
+path to pkg-config.
+
+_PKG_TEXT
+
+To get pkg-config, see <http://pkg-config.freedesktop.org/>.])[]dnl
+ ])
+else
+ $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS
+ $1[]_LIBS=$pkg_cv_[]$1[]_LIBS
+ AC_MSG_RESULT([yes])
+ $3
+fi[]dnl
+])dnl PKG_CHECK_MODULES
+
+
+dnl PKG_CHECK_MODULES_STATIC(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND],
+dnl [ACTION-IF-NOT-FOUND])
+dnl ---------------------------------------------------------------------
+dnl Since: 0.29
+dnl
+dnl Checks for existence of MODULES and gathers its build flags with
+dnl static libraries enabled. Sets VARIABLE-PREFIX_CFLAGS from --cflags
+dnl and VARIABLE-PREFIX_LIBS from --libs.
+dnl
+dnl Note that if there is a possibility the first call to
+dnl PKG_CHECK_MODULES_STATIC might not happen, you should be sure to
+dnl include an explicit call to PKG_PROG_PKG_CONFIG in your
+dnl configure.ac.
+AC_DEFUN([PKG_CHECK_MODULES_STATIC],
+[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
+_save_PKG_CONFIG=$PKG_CONFIG
+PKG_CONFIG="$PKG_CONFIG --static"
+PKG_CHECK_MODULES($@)
+PKG_CONFIG=$_save_PKG_CONFIG[]dnl
+])dnl PKG_CHECK_MODULES_STATIC
+
+
+dnl PKG_INSTALLDIR([DIRECTORY])
+dnl -------------------------
+dnl Since: 0.27
+dnl
+dnl Substitutes the variable pkgconfigdir as the location where a module
+dnl should install pkg-config .pc files. By default the directory is
+dnl $libdir/pkgconfig, but the default can be changed by passing
+dnl DIRECTORY. The user can override through the --with-pkgconfigdir
+dnl parameter.
+AC_DEFUN([PKG_INSTALLDIR],
+[m4_pushdef([pkg_default], [m4_default([$1], ['${libdir}/pkgconfig'])])
+m4_pushdef([pkg_description],
+ [pkg-config installation directory @<:@]pkg_default[@:>@])
+AC_ARG_WITH([pkgconfigdir],
+ [AS_HELP_STRING([--with-pkgconfigdir], pkg_description)],,
+ [with_pkgconfigdir=]pkg_default)
+AC_SUBST([pkgconfigdir], [$with_pkgconfigdir])
+m4_popdef([pkg_default])
+m4_popdef([pkg_description])
+])dnl PKG_INSTALLDIR
+
+
+dnl PKG_NOARCH_INSTALLDIR([DIRECTORY])
+dnl --------------------------------
+dnl Since: 0.27
+dnl
+dnl Substitutes the variable noarch_pkgconfigdir as the location where a
+dnl module should install arch-independent pkg-config .pc files. By
+dnl default the directory is $datadir/pkgconfig, but the default can be
+dnl changed by passing DIRECTORY. The user can override through the
+dnl --with-noarch-pkgconfigdir parameter.
+AC_DEFUN([PKG_NOARCH_INSTALLDIR],
+[m4_pushdef([pkg_default], [m4_default([$1], ['${datadir}/pkgconfig'])])
+m4_pushdef([pkg_description],
+ [pkg-config arch-independent installation directory @<:@]pkg_default[@:>@])
+AC_ARG_WITH([noarch-pkgconfigdir],
+ [AS_HELP_STRING([--with-noarch-pkgconfigdir], pkg_description)],,
+ [with_noarch_pkgconfigdir=]pkg_default)
+AC_SUBST([noarch_pkgconfigdir], [$with_noarch_pkgconfigdir])
+m4_popdef([pkg_default])
+m4_popdef([pkg_description])
+])dnl PKG_NOARCH_INSTALLDIR
+
+
+dnl PKG_CHECK_VAR(VARIABLE, MODULE, CONFIG-VARIABLE,
+dnl [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
+dnl -------------------------------------------
+dnl Since: 0.28
+dnl
+dnl Retrieves the value of the pkg-config variable for the given module.
+AC_DEFUN([PKG_CHECK_VAR],
+[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
+AC_ARG_VAR([$1], [value of $3 for $2, overriding pkg-config])dnl
+
+_PKG_CONFIG([$1], [variable="][$3]["], [$2])
+AS_VAR_COPY([$1], [pkg_cv_][$1])
+
+AS_VAR_IF([$1], [""], [$5], [$4])dnl
+])dnl PKG_CHECK_VAR
+
+# Copyright (C) 2002-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_AUTOMAKE_VERSION(VERSION)
+# ----------------------------
+# Automake X.Y traces this macro to ensure aclocal.m4 has been
+# generated from the m4 files accompanying Automake X.Y.
+# (This private macro should not be called outside this file.)
+AC_DEFUN([AM_AUTOMAKE_VERSION],
+[am__api_version='1.15'
+dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
+dnl require some minimum version. Point them to the right macro.
+m4_if([$1], [1.15.1], [],
+ [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
+])
+
+# _AM_AUTOCONF_VERSION(VERSION)
+# -----------------------------
+# aclocal traces this macro to find the Autoconf version.
+# This is a private macro too. Using m4_define simplifies
+# the logic in aclocal, which can simply ignore this definition.
+m4_define([_AM_AUTOCONF_VERSION], [])
+
+# AM_SET_CURRENT_AUTOMAKE_VERSION
+# -------------------------------
+# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
+# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
+AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
+[AM_AUTOMAKE_VERSION([1.15.1])dnl
+m4_ifndef([AC_AUTOCONF_VERSION],
+ [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
+
+# AM_AUX_DIR_EXPAND -*- Autoconf -*-
+
+# Copyright (C) 2001-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets
+# $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to
+# '$srcdir', '$srcdir/..', or '$srcdir/../..'.
+#
+# Of course, Automake must honor this variable whenever it calls a
+# tool from the auxiliary directory. The problem is that $srcdir (and
+# therefore $ac_aux_dir as well) can be either absolute or relative,
+# depending on how configure is run. This is pretty annoying, since
+# it makes $ac_aux_dir quite unusable in subdirectories: in the top
+# source directory, any form will work fine, but in subdirectories a
+# relative path needs to be adjusted first.
+#
+# $ac_aux_dir/missing
+# fails when called from a subdirectory if $ac_aux_dir is relative
+# $top_srcdir/$ac_aux_dir/missing
+# fails if $ac_aux_dir is absolute,
+# fails when called from a subdirectory in a VPATH build with
+# a relative $ac_aux_dir
+#
+# The reason of the latter failure is that $top_srcdir and $ac_aux_dir
+# are both prefixed by $srcdir. In an in-source build this is usually
+# harmless because $srcdir is '.', but things will broke when you
+# start a VPATH build or use an absolute $srcdir.
+#
+# So we could use something similar to $top_srcdir/$ac_aux_dir/missing,
+# iff we strip the leading $srcdir from $ac_aux_dir. That would be:
+# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"`
+# and then we would define $MISSING as
+# MISSING="\${SHELL} $am_aux_dir/missing"
+# This will work as long as MISSING is not called from configure, because
+# unfortunately $(top_srcdir) has no meaning in configure.
+# However there are other variables, like CC, which are often used in
+# configure, and could therefore not use this "fixed" $ac_aux_dir.
+#
+# Another solution, used here, is to always expand $ac_aux_dir to an
+# absolute PATH. The drawback is that using absolute paths prevent a
+# configured tree to be moved without reconfiguration.
+
+AC_DEFUN([AM_AUX_DIR_EXPAND],
+[AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl
+# Expand $ac_aux_dir to an absolute path.
+am_aux_dir=`cd "$ac_aux_dir" && pwd`
+])
+
+# AM_CONDITIONAL -*- Autoconf -*-
+
+# Copyright (C) 1997-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_CONDITIONAL(NAME, SHELL-CONDITION)
+# -------------------------------------
+# Define a conditional.
+AC_DEFUN([AM_CONDITIONAL],
+[AC_PREREQ([2.52])dnl
+ m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])],
+ [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl
+AC_SUBST([$1_TRUE])dnl
+AC_SUBST([$1_FALSE])dnl
+_AM_SUBST_NOTMAKE([$1_TRUE])dnl
+_AM_SUBST_NOTMAKE([$1_FALSE])dnl
+m4_define([_AM_COND_VALUE_$1], [$2])dnl
+if $2; then
+ $1_TRUE=
+ $1_FALSE='#'
+else
+ $1_TRUE='#'
+ $1_FALSE=
+fi
+AC_CONFIG_COMMANDS_PRE(
+[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then
+ AC_MSG_ERROR([[conditional "$1" was never defined.
+Usually this means the macro was only invoked conditionally.]])
+fi])])
+
+# Copyright (C) 1999-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+
+# There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be
+# written in clear, in which case automake, when reading aclocal.m4,
+# will think it sees a *use*, and therefore will trigger all it's
+# C support machinery. Also note that it means that autoscan, seeing
+# CC etc. in the Makefile, will ask for an AC_PROG_CC use...
+
+
+# _AM_DEPENDENCIES(NAME)
+# ----------------------
+# See how the compiler implements dependency checking.
+# NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC".
+# We try a few techniques and use that to set a single cache variable.
+#
+# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was
+# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular
+# dependency, and given that the user is not expected to run this macro,
+# just rely on AC_PROG_CC.
+AC_DEFUN([_AM_DEPENDENCIES],
+[AC_REQUIRE([AM_SET_DEPDIR])dnl
+AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl
+AC_REQUIRE([AM_MAKE_INCLUDE])dnl
+AC_REQUIRE([AM_DEP_TRACK])dnl
+
+m4_if([$1], [CC], [depcc="$CC" am_compiler_list=],
+ [$1], [CXX], [depcc="$CXX" am_compiler_list=],
+ [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'],
+ [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'],
+ [$1], [UPC], [depcc="$UPC" am_compiler_list=],
+ [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'],
+ [depcc="$$1" am_compiler_list=])
+
+AC_CACHE_CHECK([dependency style of $depcc],
+ [am_cv_$1_dependencies_compiler_type],
+[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named 'D' -- because '-MD' means "put the output
+ # in D".
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_$1_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp`
+ fi
+ am__universal=false
+ m4_case([$1], [CC],
+ [case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac],
+ [CXX],
+ [case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac])
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+ # Solaris 10 /bin/sh.
+ echo '/* dummy */' > sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with '-c' and '-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle '-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs.
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # After this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested.
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+ # This compiler won't grok '-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_$1_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_$1_dependencies_compiler_type=none
+fi
+])
+AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type])
+AM_CONDITIONAL([am__fastdep$1], [
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_$1_dependencies_compiler_type" = gcc3])
+])
+
+
+# AM_SET_DEPDIR
+# -------------
+# Choose a directory name for dependency files.
+# This macro is AC_REQUIREd in _AM_DEPENDENCIES.
+AC_DEFUN([AM_SET_DEPDIR],
+[AC_REQUIRE([AM_SET_LEADING_DOT])dnl
+AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl
+])
+
+
+# AM_DEP_TRACK
+# ------------
+AC_DEFUN([AM_DEP_TRACK],
+[AC_ARG_ENABLE([dependency-tracking], [dnl
+AS_HELP_STRING(
+ [--enable-dependency-tracking],
+ [do not reject slow dependency extractors])
+AS_HELP_STRING(
+ [--disable-dependency-tracking],
+ [speeds up one-time build])])
+if test "x$enable_dependency_tracking" != xno; then
+ am_depcomp="$ac_aux_dir/depcomp"
+ AMDEPBACKSLASH='\'
+ am__nodep='_no'
+fi
+AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno])
+AC_SUBST([AMDEPBACKSLASH])dnl
+_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl
+AC_SUBST([am__nodep])dnl
+_AM_SUBST_NOTMAKE([am__nodep])dnl
+])
+
+# Generate code to set up dependency tracking. -*- Autoconf -*-
+
+# Copyright (C) 1999-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+
+# _AM_OUTPUT_DEPENDENCY_COMMANDS
+# ------------------------------
+AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
+[{
+ # Older Autoconf quotes --file arguments for eval, but not when files
+ # are listed without --file. Let's play safe and only enable the eval
+ # if we detect the quoting.
+ case $CONFIG_FILES in
+ *\'*) eval set x "$CONFIG_FILES" ;;
+ *) set x $CONFIG_FILES ;;
+ esac
+ shift
+ for mf
+ do
+ # Strip MF so we end up with the name of the file.
+ mf=`echo "$mf" | sed -e 's/:.*$//'`
+ # Check whether this is an Automake generated Makefile or not.
+ # We used to match only the files named 'Makefile.in', but
+ # some people rename them; so instead we look at the file content.
+ # Grep'ing the first line is not enough: some people post-process
+ # each Makefile.in and add a new line on top of each file to say so.
+ # Grep'ing the whole file is not good either: AIX grep has a line
+ # limit of 2048, but all sed's we know have understand at least 4000.
+ if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
+ dirpart=`AS_DIRNAME("$mf")`
+ else
+ continue
+ fi
+ # Extract the definition of DEPDIR, am__include, and am__quote
+ # from the Makefile without running 'make'.
+ DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
+ test -z "$DEPDIR" && continue
+ am__include=`sed -n 's/^am__include = //p' < "$mf"`
+ test -z "$am__include" && continue
+ am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
+ # Find all dependency output files, they are included files with
+ # $(DEPDIR) in their names. We invoke sed twice because it is the
+ # simplest approach to changing $(DEPDIR) to its actual value in the
+ # expansion.
+ for file in `sed -n "
+ s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
+ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do
+ # Make sure the directory exists.
+ test -f "$dirpart/$file" && continue
+ fdir=`AS_DIRNAME(["$file"])`
+ AS_MKDIR_P([$dirpart/$fdir])
+ # echo "creating $dirpart/$file"
+ echo '# dummy' > "$dirpart/$file"
+ done
+ done
+}
+])# _AM_OUTPUT_DEPENDENCY_COMMANDS
+
+
+# AM_OUTPUT_DEPENDENCY_COMMANDS
+# -----------------------------
+# This macro should only be invoked once -- use via AC_REQUIRE.
+#
+# This code is only required when automatic dependency tracking
+# is enabled. FIXME. This creates each '.P' file that we will
+# need in order to bootstrap the dependency handling code.
+AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
+[AC_CONFIG_COMMANDS([depfiles],
+ [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS],
+ [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"])
+])
+
+# Do all the work for Automake. -*- Autoconf -*-
+
+# Copyright (C) 1996-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This macro actually does too much. Some checks are only needed if
+# your package does certain things. But this isn't really a big deal.
+
+dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O.
+m4_define([AC_PROG_CC],
+m4_defn([AC_PROG_CC])
+[_AM_PROG_CC_C_O
+])
+
+# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE])
+# AM_INIT_AUTOMAKE([OPTIONS])
+# -----------------------------------------------
+# The call with PACKAGE and VERSION arguments is the old style
+# call (pre autoconf-2.50), which is being phased out. PACKAGE
+# and VERSION should now be passed to AC_INIT and removed from
+# the call to AM_INIT_AUTOMAKE.
+# We support both call styles for the transition. After
+# the next Automake release, Autoconf can make the AC_INIT
+# arguments mandatory, and then we can depend on a new Autoconf
+# release and drop the old call support.
+AC_DEFUN([AM_INIT_AUTOMAKE],
+[AC_PREREQ([2.65])dnl
+dnl Autoconf wants to disallow AM_ names. We explicitly allow
+dnl the ones we care about.
+m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl
+AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl
+AC_REQUIRE([AC_PROG_INSTALL])dnl
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+ # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+ # is not polluted with repeated "-I."
+ AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl
+ # test to see if srcdir already configured
+ if test -f $srcdir/config.status; then
+ AC_MSG_ERROR([source directory already configured; run "make distclean" there first])
+ fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+ if (cygpath --version) >/dev/null 2>/dev/null; then
+ CYGPATH_W='cygpath -w'
+ else
+ CYGPATH_W=echo
+ fi
+fi
+AC_SUBST([CYGPATH_W])
+
+# Define the identity of the package.
+dnl Distinguish between old-style and new-style calls.
+m4_ifval([$2],
+[AC_DIAGNOSE([obsolete],
+ [$0: two- and three-arguments forms are deprecated.])
+m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
+ AC_SUBST([PACKAGE], [$1])dnl
+ AC_SUBST([VERSION], [$2])],
+[_AM_SET_OPTIONS([$1])dnl
+dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT.
+m4_if(
+ m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]),
+ [ok:ok],,
+ [m4_fatal([AC_INIT should be called with package and version arguments])])dnl
+ AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl
+ AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl
+
+_AM_IF_OPTION([no-define],,
+[AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package])
+ AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl
+
+# Some tools Automake needs.
+AC_REQUIRE([AM_SANITY_CHECK])dnl
+AC_REQUIRE([AC_ARG_PROGRAM])dnl
+AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}])
+AM_MISSING_PROG([AUTOCONF], [autoconf])
+AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}])
+AM_MISSING_PROG([AUTOHEADER], [autoheader])
+AM_MISSING_PROG([MAKEINFO], [makeinfo])
+AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
+AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl
+AC_REQUIRE([AC_PROG_MKDIR_P])dnl
+# For better backward compatibility. To be removed once Automake 1.9.x
+# dies out for good. For more background, see:
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
+AC_SUBST([mkdir_p], ['$(MKDIR_P)'])
+# We need awk for the "check" target (and possibly the TAP driver). The
+# system "awk" is bad on some platforms.
+AC_REQUIRE([AC_PROG_AWK])dnl
+AC_REQUIRE([AC_PROG_MAKE_SET])dnl
+AC_REQUIRE([AM_SET_LEADING_DOT])dnl
+_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])],
+ [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])],
+ [_AM_PROG_TAR([v7])])])
+_AM_IF_OPTION([no-dependencies],,
+[AC_PROVIDE_IFELSE([AC_PROG_CC],
+ [_AM_DEPENDENCIES([CC])],
+ [m4_define([AC_PROG_CC],
+ m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_CXX],
+ [_AM_DEPENDENCIES([CXX])],
+ [m4_define([AC_PROG_CXX],
+ m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_OBJC],
+ [_AM_DEPENDENCIES([OBJC])],
+ [m4_define([AC_PROG_OBJC],
+ m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_OBJCXX],
+ [_AM_DEPENDENCIES([OBJCXX])],
+ [m4_define([AC_PROG_OBJCXX],
+ m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl
+])
+AC_REQUIRE([AM_SILENT_RULES])dnl
+dnl The testsuite driver may need to know about EXEEXT, so add the
+dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This
+dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below.
+AC_CONFIG_COMMANDS_PRE(dnl
+[m4_provide_if([_AM_COMPILER_EXEEXT],
+ [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl
+
+# POSIX will say in a future version that running "rm -f" with no argument
+# is OK; and we want to be able to make that assumption in our Makefile
+# recipes. So use an aggressive probe to check that the usage we want is
+# actually supported "in the wild" to an acceptable degree.
+# See automake bug#10828.
+# To make any issue more visible, cause the running configure to be aborted
+# by default if the 'rm' program in use doesn't match our expectations; the
+# user can still override this though.
+if rm -f && rm -fr && rm -rf; then : OK; else
+ cat >&2 <<'END'
+Oops!
+
+Your 'rm' program seems unable to run without file operands specified
+on the command line, even when the '-f' option is present. This is contrary
+to the behaviour of most rm programs out there, and not conforming with
+the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
+
+Please tell bug-automake@gnu.org about your system, including the value
+of your $PATH and any error possibly output before this message. This
+can help us improve future automake versions.
+
+END
+ if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
+ echo 'Configuration will proceed anyway, since you have set the' >&2
+ echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
+ echo >&2
+ else
+ cat >&2 <<'END'
+Aborting the configuration process, to ensure you take notice of the issue.
+
+You can download and install GNU coreutils to get an 'rm' implementation
+that behaves properly: <http://www.gnu.org/software/coreutils/>.
+
+If you want to complete the configuration process using your problematic
+'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
+to "yes", and re-run configure.
+
+END
+ AC_MSG_ERROR([Your 'rm' program is bad, sorry.])
+ fi
+fi
+dnl The trailing newline in this macro's definition is deliberate, for
+dnl backward compatibility and to allow trailing 'dnl'-style comments
+dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841.
+])
+
+dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not
+dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further
+dnl mangled by Autoconf and run in a shell conditional statement.
+m4_define([_AC_COMPILER_EXEEXT],
+m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])])
+
+# When config.status generates a header, we must update the stamp-h file.
+# This file resides in the same directory as the config header
+# that is generated. The stamp files are numbered to have different names.
+
+# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the
+# loop where config.status creates the headers, so we can generate
+# our stamp files there.
+AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK],
+[# Compute $1's index in $config_headers.
+_am_arg=$1
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+ case $_am_header in
+ $_am_arg | $_am_arg:* )
+ break ;;
+ * )
+ _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+ esac
+done
+echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
+
+# Copyright (C) 2001-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_PROG_INSTALL_SH
+# ------------------
+# Define $install_sh.
+AC_DEFUN([AM_PROG_INSTALL_SH],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+if test x"${install_sh+set}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+ *)
+ install_sh="\${SHELL} $am_aux_dir/install-sh"
+ esac
+fi
+AC_SUBST([install_sh])])
+
+# Copyright (C) 2003-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# Check whether the underlying file-system supports filenames
+# with a leading dot. For instance MS-DOS doesn't.
+AC_DEFUN([AM_SET_LEADING_DOT],
+[rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+ am__leading_dot=.
+else
+ am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+AC_SUBST([am__leading_dot])])
+
+# Add --enable-maintainer-mode option to configure. -*- Autoconf -*-
+# From Jim Meyering
+
+# Copyright (C) 1996-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_MAINTAINER_MODE([DEFAULT-MODE])
+# ----------------------------------
+# Control maintainer-specific portions of Makefiles.
+# Default is to disable them, unless 'enable' is passed literally.
+# For symmetry, 'disable' may be passed as well. Anyway, the user
+# can override the default with the --enable/--disable switch.
+AC_DEFUN([AM_MAINTAINER_MODE],
+[m4_case(m4_default([$1], [disable]),
+ [enable], [m4_define([am_maintainer_other], [disable])],
+ [disable], [m4_define([am_maintainer_other], [enable])],
+ [m4_define([am_maintainer_other], [enable])
+ m4_warn([syntax], [unexpected argument to AM@&t@_MAINTAINER_MODE: $1])])
+AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles])
+ dnl maintainer-mode's default is 'disable' unless 'enable' is passed
+ AC_ARG_ENABLE([maintainer-mode],
+ [AS_HELP_STRING([--]am_maintainer_other[-maintainer-mode],
+ am_maintainer_other[ make rules and dependencies not useful
+ (and sometimes confusing) to the casual installer])],
+ [USE_MAINTAINER_MODE=$enableval],
+ [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes]))
+ AC_MSG_RESULT([$USE_MAINTAINER_MODE])
+ AM_CONDITIONAL([MAINTAINER_MODE], [test $USE_MAINTAINER_MODE = yes])
+ MAINT=$MAINTAINER_MODE_TRUE
+ AC_SUBST([MAINT])dnl
+]
+)
+
+# Check to see how 'make' treats includes. -*- Autoconf -*-
+
+# Copyright (C) 2001-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_MAKE_INCLUDE()
+# -----------------
+# Check to see how make treats includes.
+AC_DEFUN([AM_MAKE_INCLUDE],
+[am_make=${MAKE-make}
+cat > confinc << 'END'
+am__doit:
+ @echo this is the am__doit target
+.PHONY: am__doit
+END
+# If we don't find an include directive, just comment out the code.
+AC_MSG_CHECKING([for style of include used by $am_make])
+am__include="#"
+am__quote=
+_am_result=none
+# First try GNU make style include.
+echo "include confinc" > confmf
+# Ignore all kinds of additional output from 'make'.
+case `$am_make -s -f confmf 2> /dev/null` in #(
+*the\ am__doit\ target*)
+ am__include=include
+ am__quote=
+ _am_result=GNU
+ ;;
+esac
+# Now try BSD make style include.
+if test "$am__include" = "#"; then
+ echo '.include "confinc"' > confmf
+ case `$am_make -s -f confmf 2> /dev/null` in #(
+ *the\ am__doit\ target*)
+ am__include=.include
+ am__quote="\""
+ _am_result=BSD
+ ;;
+ esac
+fi
+AC_SUBST([am__include])
+AC_SUBST([am__quote])
+AC_MSG_RESULT([$_am_result])
+rm -f confinc confmf
+])
+
+# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*-
+
+# Copyright (C) 1997-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_MISSING_PROG(NAME, PROGRAM)
+# ------------------------------
+AC_DEFUN([AM_MISSING_PROG],
+[AC_REQUIRE([AM_MISSING_HAS_RUN])
+$1=${$1-"${am_missing_run}$2"}
+AC_SUBST($1)])
+
+# AM_MISSING_HAS_RUN
+# ------------------
+# Define MISSING if not defined so far and test if it is modern enough.
+# If it is, set am_missing_run to use it, otherwise, to nothing.
+AC_DEFUN([AM_MISSING_HAS_RUN],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+AC_REQUIRE_AUX_FILE([missing])dnl
+if test x"${MISSING+set}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
+ *)
+ MISSING="\${SHELL} $am_aux_dir/missing" ;;
+ esac
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --is-lightweight"; then
+ am_missing_run="$MISSING "
+else
+ am_missing_run=
+ AC_MSG_WARN(['missing' script is too old or missing])
+fi
+])
+
+# Helper functions for option handling. -*- Autoconf -*-
+
+# Copyright (C) 2001-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_MANGLE_OPTION(NAME)
+# -----------------------
+AC_DEFUN([_AM_MANGLE_OPTION],
+[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])])
+
+# _AM_SET_OPTION(NAME)
+# --------------------
+# Set option NAME. Presently that only means defining a flag for this option.
+AC_DEFUN([_AM_SET_OPTION],
+[m4_define(_AM_MANGLE_OPTION([$1]), [1])])
+
+# _AM_SET_OPTIONS(OPTIONS)
+# ------------------------
+# OPTIONS is a space-separated list of Automake options.
+AC_DEFUN([_AM_SET_OPTIONS],
+[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])])
+
+# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET])
+# -------------------------------------------
+# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
+AC_DEFUN([_AM_IF_OPTION],
+[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
+
+# Copyright (C) 1999-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_PROG_CC_C_O
+# ---------------
+# Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC
+# to automatically call this.
+AC_DEFUN([_AM_PROG_CC_C_O],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+AC_REQUIRE_AUX_FILE([compile])dnl
+AC_LANG_PUSH([C])dnl
+AC_CACHE_CHECK(
+ [whether $CC understands -c and -o together],
+ [am_cv_prog_cc_c_o],
+ [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])])
+ # Make sure it works both with $CC and with simple cc.
+ # Following AC_PROG_CC_C_O, we do the test twice because some
+ # compilers refuse to overwrite an existing .o file with -o,
+ # though they will create one.
+ am_cv_prog_cc_c_o=yes
+ for am_i in 1 2; do
+ if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \
+ && test -f conftest2.$ac_objext; then
+ : OK
+ else
+ am_cv_prog_cc_c_o=no
+ break
+ fi
+ done
+ rm -f core conftest*
+ unset am_i])
+if test "$am_cv_prog_cc_c_o" != yes; then
+ # Losing compiler, so override with the script.
+ # FIXME: It is wrong to rewrite CC.
+ # But if we don't then we get into trouble of one sort or another.
+ # A longer-term fix would be to have automake use am__CC in this case,
+ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+ CC="$am_aux_dir/compile $CC"
+fi
+AC_LANG_POP([C])])
+
+# For backward compatibility.
+AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])])
+
+# Copyright (C) 2001-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_RUN_LOG(COMMAND)
+# -------------------
+# Run COMMAND, save the exit status in ac_status, and log it.
+# (This has been adapted from Autoconf's _AC_RUN_LOG macro.)
+AC_DEFUN([AM_RUN_LOG],
+[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD
+ ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+ (exit $ac_status); }])
+
+# Check to make sure that the build environment is sane. -*- Autoconf -*-
+
+# Copyright (C) 1996-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_SANITY_CHECK
+# ---------------
+AC_DEFUN([AM_SANITY_CHECK],
+[AC_MSG_CHECKING([whether build environment is sane])
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name. Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+ *[[\\\"\#\$\&\'\`$am_lf]]*)
+ AC_MSG_ERROR([unsafe absolute working directory name]);;
+esac
+case $srcdir in
+ *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*)
+ AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);;
+esac
+
+# Do 'set' in a subshell so we don't clobber the current shell's
+# arguments. Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+ am_has_slept=no
+ for am_try in 1 2; do
+ echo "timestamp, slept: $am_has_slept" > conftest.file
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$[*]" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ if test "$[*]" != "X $srcdir/configure conftest.file" \
+ && test "$[*]" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken
+ alias in your environment])
+ fi
+ if test "$[2]" = conftest.file || test $am_try -eq 2; then
+ break
+ fi
+ # Just in case.
+ sleep 1
+ am_has_slept=yes
+ done
+ test "$[2]" = conftest.file
+ )
+then
+ # Ok.
+ :
+else
+ AC_MSG_ERROR([newly created file is older than distributed files!
+Check your system clock])
+fi
+AC_MSG_RESULT([yes])
+# If we didn't sleep, we still need to ensure time stamps of config.status and
+# generated files are strictly newer.
+am_sleep_pid=
+if grep 'slept: no' conftest.file >/dev/null 2>&1; then
+ ( sleep 1 ) &
+ am_sleep_pid=$!
+fi
+AC_CONFIG_COMMANDS_PRE(
+ [AC_MSG_CHECKING([that generated files are newer than configure])
+ if test -n "$am_sleep_pid"; then
+ # Hide warnings about reused PIDs.
+ wait $am_sleep_pid 2>/dev/null
+ fi
+ AC_MSG_RESULT([done])])
+rm -f conftest.file
+])
+
+# Copyright (C) 2009-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_SILENT_RULES([DEFAULT])
+# --------------------------
+# Enable less verbose build rules; with the default set to DEFAULT
+# ("yes" being less verbose, "no" or empty being verbose).
+AC_DEFUN([AM_SILENT_RULES],
+[AC_ARG_ENABLE([silent-rules], [dnl
+AS_HELP_STRING(
+ [--enable-silent-rules],
+ [less verbose build output (undo: "make V=1")])
+AS_HELP_STRING(
+ [--disable-silent-rules],
+ [verbose build output (undo: "make V=0")])dnl
+])
+case $enable_silent_rules in @%:@ (((
+ yes) AM_DEFAULT_VERBOSITY=0;;
+ no) AM_DEFAULT_VERBOSITY=1;;
+ *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);;
+esac
+dnl
+dnl A few 'make' implementations (e.g., NonStop OS and NextStep)
+dnl do not support nested variable expansions.
+dnl See automake bug#9928 and bug#10237.
+am_make=${MAKE-make}
+AC_CACHE_CHECK([whether $am_make supports nested variables],
+ [am_cv_make_support_nested_variables],
+ [if AS_ECHO([['TRUE=$(BAR$(V))
+BAR0=false
+BAR1=true
+V=1
+am__doit:
+ @$(TRUE)
+.PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then
+ am_cv_make_support_nested_variables=yes
+else
+ am_cv_make_support_nested_variables=no
+fi])
+if test $am_cv_make_support_nested_variables = yes; then
+ dnl Using '$V' instead of '$(V)' breaks IRIX make.
+ AM_V='$(V)'
+ AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+else
+ AM_V=$AM_DEFAULT_VERBOSITY
+ AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
+fi
+AC_SUBST([AM_V])dnl
+AM_SUBST_NOTMAKE([AM_V])dnl
+AC_SUBST([AM_DEFAULT_V])dnl
+AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl
+AC_SUBST([AM_DEFAULT_VERBOSITY])dnl
+AM_BACKSLASH='\'
+AC_SUBST([AM_BACKSLASH])dnl
+_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl
+])
+
+# Copyright (C) 2001-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_PROG_INSTALL_STRIP
+# ---------------------
+# One issue with vendor 'install' (even GNU) is that you can't
+# specify the program used to strip binaries. This is especially
+# annoying in cross-compiling environments, where the build's strip
+# is unlikely to handle the host's binaries.
+# Fortunately install-sh will honor a STRIPPROG variable, so we
+# always use install-sh in "make install-strip", and initialize
+# STRIPPROG with the value of the STRIP variable (set by the user).
+AC_DEFUN([AM_PROG_INSTALL_STRIP],
+[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
+# Installed binaries are usually stripped using 'strip' when the user
+# run "make install-strip". However 'strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the 'STRIP' environment variable to overrule this program.
+dnl Don't test for $cross_compiling = yes, because it might be 'maybe'.
+if test "$cross_compiling" != no; then
+ AC_CHECK_TOOL([STRIP], [strip], :)
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+AC_SUBST([INSTALL_STRIP_PROGRAM])])
+
+# Copyright (C) 2006-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_SUBST_NOTMAKE(VARIABLE)
+# ---------------------------
+# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in.
+# This macro is traced by Automake.
+AC_DEFUN([_AM_SUBST_NOTMAKE])
+
+# AM_SUBST_NOTMAKE(VARIABLE)
+# --------------------------
+# Public sister of _AM_SUBST_NOTMAKE.
+AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
+
+# Check how to create a tarball. -*- Autoconf -*-
+
+# Copyright (C) 2004-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_PROG_TAR(FORMAT)
+# --------------------
+# Check how to create a tarball in format FORMAT.
+# FORMAT should be one of 'v7', 'ustar', or 'pax'.
+#
+# Substitute a variable $(am__tar) that is a command
+# writing to stdout a FORMAT-tarball containing the directory
+# $tardir.
+# tardir=directory && $(am__tar) > result.tar
+#
+# Substitute a variable $(am__untar) that extract such
+# a tarball read from stdin.
+# $(am__untar) < result.tar
+#
+AC_DEFUN([_AM_PROG_TAR],
+[# Always define AMTAR for backward compatibility. Yes, it's still used
+# in the wild :-( We should find a proper way to deprecate it ...
+AC_SUBST([AMTAR], ['$${TAR-tar}'])
+
+# We'll loop over all known methods to create a tar archive until one works.
+_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none'
+
+m4_if([$1], [v7],
+ [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'],
+
+ [m4_case([$1],
+ [ustar],
+ [# The POSIX 1988 'ustar' format is defined with fixed-size fields.
+ # There is notably a 21 bits limit for the UID and the GID. In fact,
+ # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343
+ # and bug#13588).
+ am_max_uid=2097151 # 2^21 - 1
+ am_max_gid=$am_max_uid
+ # The $UID and $GID variables are not portable, so we need to resort
+ # to the POSIX-mandated id(1) utility. Errors in the 'id' calls
+ # below are definitely unexpected, so allow the users to see them
+ # (that is, avoid stderr redirection).
+ am_uid=`id -u || echo unknown`
+ am_gid=`id -g || echo unknown`
+ AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format])
+ if test $am_uid -le $am_max_uid; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ _am_tools=none
+ fi
+ AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format])
+ if test $am_gid -le $am_max_gid; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ _am_tools=none
+ fi],
+
+ [pax],
+ [],
+
+ [m4_fatal([Unknown tar format])])
+
+ AC_MSG_CHECKING([how to create a $1 tar archive])
+
+ # Go ahead even if we have the value already cached. We do so because we
+ # need to set the values for the 'am__tar' and 'am__untar' variables.
+ _am_tools=${am_cv_prog_tar_$1-$_am_tools}
+
+ for _am_tool in $_am_tools; do
+ case $_am_tool in
+ gnutar)
+ for _am_tar in tar gnutar gtar; do
+ AM_RUN_LOG([$_am_tar --version]) && break
+ done
+ am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
+ am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
+ am__untar="$_am_tar -xf -"
+ ;;
+ plaintar)
+ # Must skip GNU tar: if it does not support --format= it doesn't create
+ # ustar tarball either.
+ (tar --version) >/dev/null 2>&1 && continue
+ am__tar='tar chf - "$$tardir"'
+ am__tar_='tar chf - "$tardir"'
+ am__untar='tar xf -'
+ ;;
+ pax)
+ am__tar='pax -L -x $1 -w "$$tardir"'
+ am__tar_='pax -L -x $1 -w "$tardir"'
+ am__untar='pax -r'
+ ;;
+ cpio)
+ am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
+ am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
+ am__untar='cpio -i -H $1 -d'
+ ;;
+ none)
+ am__tar=false
+ am__tar_=false
+ am__untar=false
+ ;;
+ esac
+
+ # If the value was cached, stop now. We just wanted to have am__tar
+ # and am__untar set.
+ test -n "${am_cv_prog_tar_$1}" && break
+
+ # tar/untar a dummy directory, and stop if the command works.
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ echo GrepMe > conftest.dir/file
+ AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
+ rm -rf conftest.dir
+ if test -s conftest.tar; then
+ AM_RUN_LOG([$am__untar <conftest.tar])
+ AM_RUN_LOG([cat conftest.dir/file])
+ grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
+ fi
+ done
+ rm -rf conftest.dir
+
+ AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
+ AC_MSG_RESULT([$am_cv_prog_tar_$1])])
+
+AC_SUBST([am__tar])
+AC_SUBST([am__untar])
+]) # _AM_PROG_TAR
+
+m4_include([build/m4/ax_c___atomic.m4])
+m4_include([build/m4/ax_c__generic.m4])
+m4_include([build/m4/ax_c_lto.m4])
+m4_include([build/m4/ax_c_mallinfo.m4])
+m4_include([build/m4/ax_c_mallopt.m4])
+m4_include([build/m4/ax_check_compile_flag.m4])
+m4_include([build/m4/ax_gcc_func_attribute.m4])
+m4_include([build/m4/ax_pthread.m4])
+m4_include([build/m4/jemalloc.m4])
+m4_include([build/m4/tcmalloc.m4])
diff --git a/backends/Makefile.am b/backends/Makefile.am
index 278f057a..ea0d77e0 100644
--- a/backends/Makefile.am
+++ b/backends/Makefile.am
@@ -9,6 +9,7 @@ SUBDIRS = \
opentsdb \
prometheus \
aws_kinesis \
+ mongodb \
$(NULL)
dist_noinst_DATA = \
diff --git a/backends/Makefile.in b/backends/Makefile.in
new file mode 100644
index 00000000..3bab1510
--- /dev/null
+++ b/backends/Makefile.in
@@ -0,0 +1,710 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = backends
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_SCRIPTS) \
+ $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SCRIPTS = $(dist_noinst_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ graphite \
+ json \
+ opentsdb \
+ prometheus \
+ aws_kinesis \
+ mongodb \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ WALKTHROUGH.md \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ nc-backend.sh \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu backends/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/backends/README.md b/backends/README.md
index ef5baa1b..f93e60f5 100644
--- a/backends/README.md
+++ b/backends/README.md
@@ -1,88 +1,92 @@
# Metrics long term archiving
-netdata supports backends for archiving the metrics, or providing long term dashboards,
+Netdata supports backends for archiving the metrics, or providing long term dashboards,
using Grafana or other tools, like this:
![image](https://cloud.githubusercontent.com/assets/2662304/20649711/29f182ba-b4ce-11e6-97c8-ab2c0ab59833.png)
-Since netdata collects thousands of metrics per server per second, which would easily congest any backend
-server when several netdata servers are sending data to it, netdata allows sending metrics at a lower
+Since Netdata collects thousands of metrics per server per second, which would easily congest any backend
+server when several Netdata servers are sending data to it, Netdata allows sending metrics at a lower
frequency, by resampling them.
-So, although netdata collects metrics every second, it can send to the backend servers averages or sums every
+So, although Netdata collects metrics every second, it can send to the backend servers averages or sums every
X seconds (though, it can send them per second if you need it to).
## features
-1. Supported backends
+1. Supported backends
- - **graphite** (`plaintext interface`, used by **Graphite**, **InfluxDB**, **KairosDB**,
- **Blueflood**, **ElasticSearch** via logstash tcp input and the graphite codec, etc)
+ - **graphite** (`plaintext interface`, used by **Graphite**, **InfluxDB**, **KairosDB**,
+ **Blueflood**, **ElasticSearch** via logstash tcp input and the graphite codec, etc)
- metrics are sent to the backend server as `prefix.hostname.chart.dimension`. `prefix` is
- configured below, `hostname` is the hostname of the machine (can also be configured).
+ metrics are sent to the backend server as `prefix.hostname.chart.dimension`. `prefix` is
+ configured below, `hostname` is the hostname of the machine (can also be configured).
- - **opentsdb** (`telnet or HTTP interfaces`, used by **OpenTSDB**, **InfluxDB**, **KairosDB**, etc)
+ - **opentsdb** (`telnet or HTTP interfaces`, used by **OpenTSDB**, **InfluxDB**, **KairosDB**, etc)
- metrics are sent to opentsdb as `prefix.chart.dimension` with tag `host=hostname`.
+ metrics are sent to opentsdb as `prefix.chart.dimension` with tag `host=hostname`.
- - **json** document DBs
+ - **json** document DBs
- metrics are sent to a document db, `JSON` formatted.
+ metrics are sent to a document db, `JSON` formatted.
- - **prometheus** is described at [prometheus page](prometheus/) since it pulls data from netdata.
+ - **prometheus** is described at [prometheus page](prometheus/) since it pulls data from Netdata.
- - **prometheus remote write** (a binary snappy-compressed protocol buffer encoding over HTTP used by
- **Elasticsearch**, **Gnocchi**, **Graphite**, **InfluxDB**, **Kafka**, **OpenTSDB**,
- **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics**,
- and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage))
+ - **prometheus remote write** (a binary snappy-compressed protocol buffer encoding over HTTP used by
+ **Elasticsearch**, **Gnocchi**, **Graphite**, **InfluxDB**, **Kafka**, **OpenTSDB**,
+ **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics**,
+ and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage))
- metrics are labeled in the format, which is used by Netdata for the [plaintext prometheus protocol](prometheus/).
- Notes on using the remote write backend are [here](prometheus/remote_write/).
+ metrics are labeled in the format, which is used by Netdata for the [plaintext prometheus protocol](prometheus/).
+ Notes on using the remote write backend are [here](prometheus/remote_write/).
- - **AWS Kinesis Data Streams**
+ - **AWS Kinesis Data Streams**
- metrics are sent to the service in `JSON` format.
+ metrics are sent to the service in `JSON` format.
-2. Only one backend may be active at a time.
+ - **MongoDB**
-3. Netdata can filter metrics (at the chart level), to send only a subset of the collected metrics.
+ metrics are sent to the database in `JSON` format.
-4. Netdata supports three modes of operation for all backends:
+2. Only one backend may be active at a time.
- - `as-collected` sends to backends the metrics as they are collected, in the units they are collected.
- So, counters are sent as counters and gauges are sent as gauges, much like all data collectors do.
- For example, to calculate CPU utilization in this format, you need to know how to convert kernel ticks to percentage.
+3. Netdata can filter metrics (at the chart level), to send only a subset of the collected metrics.
- - `average` sends to backends normalized metrics from the netdata database.
- In this mode, all metrics are sent as gauges, in the units netdata uses. This abstracts data collection
- and simplifies visualization, but you will not be able to copy and paste queries from other sources to convert units.
- For example, CPU utilization percentage is calculated by netdata, so netdata will convert ticks to percentage and
- send the average percentage to the backend.
+4. Netdata supports three modes of operation for all backends:
- - `sum` or `volume`: the sum of the interpolated values shown on the netdata graphs is sent to the backend.
- So, if netdata is configured to send data to the backend every 10 seconds, the sum of the 10 values shown on the
- netdata charts will be used.
+ - `as-collected` sends to backends the metrics as they are collected, in the units they are collected.
+ So, counters are sent as counters and gauges are sent as gauges, much like all data collectors do.
+ For example, to calculate CPU utilization in this format, you need to know how to convert kernel ticks to percentage.
+
+ - `average` sends to backends normalized metrics from the Netdata database.
+ In this mode, all metrics are sent as gauges, in the units Netdata uses. This abstracts data collection
+ and simplifies visualization, but you will not be able to copy and paste queries from other sources to convert units.
+ For example, CPU utilization percentage is calculated by Netdata, so Netdata will convert ticks to percentage and
+ send the average percentage to the backend.
+
+ - `sum` or `volume`: the sum of the interpolated values shown on the Netdata graphs is sent to the backend.
+ So, if Netdata is configured to send data to the backend every 10 seconds, the sum of the 10 values shown on the
+ Netdata charts will be used.
Time-series databases suggest to collect the raw values (`as-collected`). If you plan to invest on building your monitoring around a time-series database and you already know (or you will invest in learning) how to convert units and normalize the metrics in Grafana or other visualization tools, we suggest to use `as-collected`.
-If, on the other hand, you just need long term archiving of netdata metrics and you plan to mainly work with netdata, we suggest to use `average`. It decouples visualization from data collection, so it will generally be a lot simpler. Furthermore, if you use `average`, the charts shown in the back-end will match exactly what you see in Netdata, which is not necessarily true for the other modes of operation.
+If, on the other hand, you just need long term archiving of Netdata metrics and you plan to mainly work with Netdata, we suggest to use `average`. It decouples visualization from data collection, so it will generally be a lot simpler. Furthermore, if you use `average`, the charts shown in the back-end will match exactly what you see in Netdata, which is not necessarily true for the other modes of operation.
-5. This code is smart enough, not to slow down netdata, independently of the speed of the backend server.
+5. This code is smart enough, not to slow down Netdata, independently of the speed of the backend server.
## configuration
In `/etc/netdata/netdata.conf` you should have something like this (if not download the latest version
-of `netdata.conf` from your netdata):
+of `netdata.conf` from your Netdata):
```
[backend]
enabled = yes | no
- type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | prometheus_remote_write | json | kinesis
+ type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | prometheus_remote_write | json | kinesis | mongodb
host tags = list of TAG=VALUE
destination = space separated list of [PROTOCOL:]HOST[:PORT] - the first working will be used, or a region for kinesis
data source = average | sum | as collected
- prefix = netdata
+ prefix = Netdata
hostname = my-name
update every = 10
buffer on failures = 10
@@ -92,25 +96,25 @@ of `netdata.conf` from your netdata):
send names instead of ids = yes
```
-- `enabled = yes | no`, enables or disables sending data to a backend
+- `enabled = yes | no`, enables or disables sending data to a backend
-- `type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | json | kinesis`, selects the backend type
+- `type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | json | kinesis | mongodb`, selects the backend type
-- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames,
- IPs (IPv4 and IPv6) and ports to connect to.
- Netdata will use the **first available** to send the metrics.
+- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames,
+ IPs (IPv4 and IPv6) and ports to connect to.
+ Netdata will use the **first available** to send the metrics.
- The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`.
+ The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`.
- `PROTOCOL` can be `udp` or `tcp`. `tcp` is the default and only supported by the current backends.
+ `PROTOCOL` can be `udp` or `tcp`. `tcp` is the default and only supported by the current backends.
- `IP` can be `XX.XX.XX.XX` (IPv4), or `[XX:XX...XX:XX]` (IPv6).
- For IPv6 you can to enclose the IP in `[]` to separate it from the port.
+ `IP` can be `XX.XX.XX.XX` (IPv4), or `[XX:XX...XX:XX]` (IPv6).
+ For IPv6 you can to enclose the IP in `[]` to separate it from the port.
- `PORT` can be a number of a service name. If omitted, the default port for the backend will be used
- (graphite = 2003, opentsdb = 4242).
+ `PORT` can be a number of a service name. If omitted, the default port for the backend will be used
+ (graphite = 2003, opentsdb = 4242).
- Example IPv4:
+ Example IPv4:
```
destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
@@ -122,81 +126,84 @@ of `netdata.conf` from your netdata):
destination = [ffff:...:0001]:2003 10.11.12.1:2003
```
- When multiple servers are defined, netdata will try the next one when the first one fails. This allows
- you to load-balance different servers: give your backend servers in different order on each netdata.
+ When multiple servers are defined, Netdata will try the next one when the first one fails. This allows
+ you to load-balance different servers: give your backend servers in different order on each Netdata.
- netdata also ships [`nc-backend.sh`](nc-backend.sh),
+ Netdata also ships [`nc-backend.sh`](nc-backend.sh),
a script that can be used as a fallback backend to save the metrics to disk and push them to the
time-series database when it becomes available again. It can also be used to monitor / trace / debug
- the metrics netdata generates.
+ the metrics Netdata generates.
For kinesis backend `destination` should be set to an AWS region (for example, `us-east-1`).
-- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of
- data that will be sent to the backend.
+ The MongoDB backend doesn't use the `destination` option for its configuration. It uses the `mongodb.conf`
+ [configuration file](mongodb/README.md) instead.
-- `hostname = my-name`, is the hostname to be used for sending data to the backend server. By default
- this is `[global].hostname`.
+- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of
+ data that will be sent to the backend.
-- `prefix = netdata`, is the prefix to add to all metrics.
+- `hostname = my-name`, is the hostname to be used for sending data to the backend server. By default
+ this is `[global].hostname`.
-- `update every = 10`, is the number of seconds between sending data to the backend. netdata will add
- some randomness to this number, to prevent stressing the backend server when many netdata servers send
- data to the same backend. This randomness does not affect the quality of the data, only the time they
- are sent.
+- `prefix = Netdata`, is the prefix to add to all metrics.
-- `buffer on failures = 10`, is the number of iterations (each iteration is `[backend].update every` seconds)
- to buffer data, when the backend is not available. If the backend fails to receive the data after that
- many failures, data loss on the backend is expected (netdata will also log it).
+- `update every = 10`, is the number of seconds between sending data to the backend. Netdata will add
+ some randomness to this number, to prevent stressing the backend server when many Netdata servers send
+ data to the same backend. This randomness does not affect the quality of the data, only the time they
+ are sent.
-- `timeout ms = 20000`, is the timeout in milliseconds to wait for the backend server to process the data.
- By default this is `2 * update_every * 1000`.
+- `buffer on failures = 10`, is the number of iterations (each iteration is `[backend].update every` seconds)
+ to buffer data, when the backend is not available. If the backend fails to receive the data after that
+ many failures, data loss on the backend is expected (Netdata will also log it).
-- `send hosts matching = localhost *` includes one or more space separated patterns, using ` * ` as wildcard
- (any number of times within each pattern). The patterns are checked against the hostname (the localhost
- is always checked as `localhost`), allowing us to filter which hosts will be sent to the backend when
- this netdata is a central netdata aggregating multiple hosts. A pattern starting with ` ! ` gives a
- negative match. So to match all hosts named `*db*` except hosts containing `*slave*`, use
- `!*slave* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive
- or negative).
+- `timeout ms = 20000`, is the timeout in milliseconds to wait for the backend server to process the data.
+ By default this is `2 * update_every * 1000`.
-- `send charts matching = *` includes one or more space separated patterns, using ` * ` as wildcard (any
- number of times within each pattern). The patterns are checked against both chart id and chart name.
- A pattern starting with ` ! ` gives a negative match. So to match all charts named `apps.*`
- except charts ending in `*reads`, use `!*reads apps.*` (so, the order is important: the first pattern
- matching the chart id or the chart name will be used - positive or negative).
+- `send hosts matching = localhost *` includes one or more space separated patterns, using `*` as wildcard
+ (any number of times within each pattern). The patterns are checked against the hostname (the localhost
+ is always checked as `localhost`), allowing us to filter which hosts will be sent to the backend when
+ this Netdata is a central Netdata aggregating multiple hosts. A pattern starting with `!` gives a
+ negative match. So to match all hosts named `*db*` except hosts containing `*slave*`, use
+ `!*slave* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive
+ or negative).
-- `send names instead of ids = yes | no` controls the metric names netdata should send to backend.
- netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read
- by the system and names are human friendly labels (also unique). Most charts and metrics have the same
- ID and name, but in several cases they are different: disks with device-mapper, interrupts, QoS classes,
- statsd synthetic charts, etc.
+- `send charts matching = *` includes one or more space separated patterns, using `*` as wildcard (any
+ number of times within each pattern). The patterns are checked against both chart id and chart name.
+ A pattern starting with `!` gives a negative match. So to match all charts named `apps.*`
+ except charts ending in `*reads`, use `!*reads apps.*` (so, the order is important: the first pattern
+ matching the chart id or the chart name will be used - positive or negative).
-- `host tags = list of TAG=VALUE` defines tags that should be appended on all metrics for the given host.
- These are currently only sent to opentsdb and prometheus. Please use the appropriate format for each
- time-series db. For example opentsdb likes them like `TAG1=VALUE1 TAG2=VALUE2`, but prometheus like
- `tag1="value1",tag2="value2"`. Host tags are mirrored with database replication (streaming of metrics
- between netdata servers).
+- `send names instead of ids = yes | no` controls the metric names Netdata should send to backend.
+ Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read
+ by the system and names are human friendly labels (also unique). Most charts and metrics have the same
+ ID and name, but in several cases they are different: disks with device-mapper, interrupts, QoS classes,
+ statsd synthetic charts, etc.
+
+- `host tags = list of TAG=VALUE` defines tags that should be appended on all metrics for the given host.
+ These are currently only sent to opentsdb and prometheus. Please use the appropriate format for each
+ time-series db. For example opentsdb likes them like `TAG1=VALUE1 TAG2=VALUE2`, but prometheus like
+ `tag1="value1",tag2="value2"`. Host tags are mirrored with database replication (streaming of metrics
+ between Netdata servers).
## monitoring operation
-netdata provides 5 charts:
+Netdata provides 5 charts:
-1. **Buffered metrics**, the number of metrics netdata added to the buffer for dispatching them to the
- backend server.
+1. **Buffered metrics**, the number of metrics Netdata added to the buffer for dispatching them to the
+ backend server.
-2. **Buffered data size**, the amount of data (in KB) netdata added the buffer.
+2. **Buffered data size**, the amount of data (in KB) Netdata added the buffer.
-3. ~~**Backend latency**, the time the backend server needed to process the data netdata sent.
- If there was a re-connection involved, this includes the connection time.~~
- (this chart has been removed, because it only measures the time netdata needs to give the data
- to the O/S - since the backend servers do not ack the reception, netdata does not have any means
- to measure this properly).
+3. ~~**Backend latency**, the time the backend server needed to process the data Netdata sent.
+ If there was a re-connection involved, this includes the connection time.~~
+ (this chart has been removed, because it only measures the time Netdata needs to give the data
+ to the O/S - since the backend servers do not ack the reception, Netdata does not have any means
+ to measure this properly).
-4. **Backend operations**, the number of operations performed by netdata.
+4. **Backend operations**, the number of operations performed by Netdata.
-5. **Backend thread CPU usage**, the CPU resources consumed by the netdata thread, that is responsible
- for sending the metrics to the backend server.
+5. **Backend thread CPU usage**, the CPU resources consumed by the Netdata thread, that is responsible
+ for sending the metrics to the backend server.
![image](https://cloud.githubusercontent.com/assets/2662304/20463536/eb196084-af3d-11e6-8ee5-ddbd3b4d8449.png)
@@ -204,14 +211,13 @@ netdata provides 5 charts:
The latest version of the alarms configuration for monitoring the backend is [here](../health/health.d/backend.conf)
-netdata adds 4 alarms:
+Netdata adds 4 alarms:
-1. `backend_last_buffering`, number of seconds since the last successful buffering of backend data
-2. `backend_metrics_sent`, percentage of metrics sent to the backend server
-3. `backend_metrics_lost`, number of metrics lost due to repeating failures to contact the backend server
-4. ~~`backend_slow`, the percentage of time between iterations needed by the backend time to process the data sent by netdata~~ (this was misleading and has been removed).
+1. `backend_last_buffering`, number of seconds since the last successful buffering of backend data
+2. `backend_metrics_sent`, percentage of metrics sent to the backend server
+3. `backend_metrics_lost`, number of metrics lost due to repeating failures to contact the backend server
+4. ~~`backend_slow`, the percentage of time between iterations needed by the backend time to process the data sent by Netdata~~ (this was misleading and has been removed).
![image](https://cloud.githubusercontent.com/assets/2662304/20463779/a46ed1c2-af43-11e6-91a5-07ca4533cac3.png)
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/backends/WALKTHROUGH.md b/backends/WALKTHROUGH.md
index d3666ef5..19f4ac0e 100644
--- a/backends/WALKTHROUGH.md
+++ b/backends/WALKTHROUGH.md
@@ -1,6 +1,7 @@
# Netdata, Prometheus, Grafana stack
## Intro
+
In this article I will walk you through the basics of getting Netdata,
Prometheus and Grafana all working together and monitoring your application
servers. This article will be using docker on your local workstation. We will be
@@ -11,6 +12,7 @@ without cloud accounts or access to VMs can try this out and for it’s speed of
deployment.
## Why Netdata, Prometheus, and Grafana
+
Some time ago I was introduced to Netdata by a coworker. We were attempting to
troubleshoot python code which seemed to be bottlenecked. I was instantly
impressed by the amount of metrics Netdata exposes to you. I quickly added
@@ -40,21 +42,22 @@ together to create a modern monitoring stack. This stack will offer you
visibility into your application and systems performance.
## Getting Started - Netdata
+
To begin let’s create our container which we will install Netdata on. We need
-to run a container, forward the necessary port that netdata listens on, and
+to run a container, forward the necessary port that Netdata listens on, and
attach a tty so we can interact with the bash shell on the container. But
before we do this we want name resolution between the two containers to work.
In order to accomplish this we will create a user-defined network and attach
both containers to this network. The first command we should run is:
-```
+```sh
docker network create --driver bridge netdata-tutorial
```
With this user-defined network created we can now launch our container we will
install Netdata on and point it to this network.
-```
+```sh
docker run -it --name netdata --hostname netdata --network=netdata-tutorial -p 19999:19999 centos:latest '/bin/bash'
```
@@ -68,23 +71,23 @@ be sitting inside the shell of the container.
After we have entered the shell we can install Netdata. This process could not
be easier. If you take a look at [this link](../packaging/installer/#installation), the Netdata devs give us
-several one-liners to install netdata. I have not had any issues with these one
+several one-liners to install Netdata. I have not had any issues with these one
liners and their bootstrapping scripts so far (If you guys run into anything do
share). Run the following command in your container.
-```
+```sh
bash <(curl -Ss https://my-netdata.io/kickstart.sh) --dont-wait
```
After the install completes you should be able to hit the Netdata dashboard at
-http://localhost:19999/ (replace localhost if you’re doing this on a VM or have
+<http://localhost:19999/> (replace localhost if you’re doing this on a VM or have
the docker container hosted on a machine not on your local system). If this is
your first time using Netdata I suggest you take a look around. The amount of
time I’ve spent digging through /proc and calculating my own metrics has been
greatly reduced by this tool. Take it all in.
Next I want to draw your attention to a particular endpoint. Navigate to
-http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes In your
+<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes> In your
browser. This is the endpoint which publishes all the metrics in a format which
Prometheus understands. Let’s take a look at one of these metrics.
`netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"}
@@ -97,53 +100,54 @@ Netdata dashboard.
![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%204.00.45%20PM.png)
This CHART is called ‘system.cpu’, The FAMILY is cpu, and the DIMENSION we are
-observing is “system”. You can begin to draw links between the charts in netdata
+observing is “system”. You can begin to draw links between the charts in Netdata
to the prometheus metrics format in this manner.
## Prometheus
+
We will be installing prometheus in a container for purpose of demonstration.
While prometheus does have an official container I would like to walk through
the install process and setup on a fresh container. This will allow anyone
reading to migrate this tutorial to a VM or Server of any sort.
Let’s start another container in the same fashion as we did the Netdata
-container. `docker run -it --name prometheus --hostname prometheus
---network=netdata-tutorial -p 9090:9090 centos:latest '/bin/bash'` This should
-drop you into a shell once again. Once there quickly install your favorite
-editor as we will be editing files later in this tutorial. `yum install vim -y`
+container.
+
+```sh
+docker run -it --name prometheus --hostname prometheus
+--network=netdata-tutorial -p 9090:9090 centos:latest '/bin/bash'
+```
-Prometheus provides a tarball of their latest stable versions here:
-https://prometheus.io/download/. Let’s download the latest version and install
-into your container.
+This should drop you into a shell once again. Once there quickly install your favorite editor as we will be editing files later in this tutorial.
+```sh
+yum install vim -y
```
-curl -L 'https://github.com/prometheus/prometheus/releases/download/v1.7.1/prometheus-1.7.1.linux-amd64.tar.gz' -o /tmp/prometheus.tar.gz
+
+Prometheus provides a tarball of their latest stable versions [here](https://prometheus.io/download/).
+
+Let’s download the latest version and install into your container.
+
+```sh
+cd /tmp && curl -s https://api.github.com/repos/prometheus/prometheus/releases/latest \
+| grep "browser_download_url.*linux-amd64.tar.gz" \
+| cut -d '"' -f 4 \
+| wget -qi -
mkdir /opt/prometheus
-tar -xf /tmp/prometheus.tar.gz -C /opt/prometheus/ --strip-components 1
+sudo tar -xvf /tmp/prometheus-*linux-amd64.tar.gz -C /opt/prometheus --strip=1
```
-This should get prometheus installed into the container. Let’s test that we can run
-prometheus and connect to it’s web interface. This will look similar to what
-follows:
+This should get prometheus installed into the container. Let’s test that we can run prometheus and connect to it’s web interface.
-```
-[root@prometheus prometheus]# /opt/prometheus/prometheus
-INFO[0000] Starting prometheus (version=1.7.1, branch=master, revision=3afb3fffa3a29c3de865e1172fb740442e9d0133)
- source="main.go:88"
-INFO[0000] Build context (go=go1.8.3, user=root@0aa1b7fc430d, date=20170612-11:44:05) source="main.go:89"
-INFO[0000] Host details (Linux 4.9.36-moby #1 SMP Wed Jul 12 15:29:07 UTC 2017 x86_64 prometheus (none)) source="main.go:90"
-INFO[0000] Loading configuration file prometheus.yml source="main.go:252"
-INFO[0000] Loading series map and head chunks... source="storage.go:428"
-INFO[0000] 0 series loaded. source="storage.go:439"
-INFO[0000] Starting target manager... source="targetmanager.go:63"
-INFO[0000] Listening on :9090 source="web.go:259"
+```sh
+/opt/prometheus/prometheus
```
-Now attempt to go to http://localhost:9090/. You should be presented with the
+Now attempt to go to <http://localhost:9090/>. You should be presented with the
prometheus homepage. This is a good point to talk about Prometheus’s data model
-which can be viewed here: https://prometheus.io/docs/concepts/data_model/ As
+which can be viewed here: <https://prometheus.io/docs/concepts/data_model/> As
explained we have two key elements in Prometheus metrics. We have the ‘metric’
and its ‘labels’. Labels allow for granularity between metrics. Let’s use our
previous example to further explain.
@@ -162,7 +166,7 @@ Let’s move our attention to Prometheus’s configuration. Prometheus gets it
config from the file located (in our example) at
`/opt/prometheus/prometheus.yml`. I won’t spend an extensive amount of time
going over the configuration values documented here:
-https://prometheus.io/docs/operating/configuration/. We will be adding a new
+<https://prometheus.io/docs/operating/configuration/>. We will be adding a new
“job” under the “scrape_configs”. Let’s make the “scrape_configs” section look
like this (we can use the dns name Netdata due to the custom user-defined
network we created in docker beforehand).
@@ -189,9 +193,11 @@ scrape_configs:
```
Let’s start prometheus once again by running `/opt/prometheus/prometheus`. If we
-now navigate to prometheus at ‘http://localhost:9090/targets’ we should see our
+
+now navigate to prometheus at ‘<http://localhost:9090/targets’> we should see our
+
target being successfully scraped. If we now go back to the Prometheus’s
-homepage and begin to type ‘netdata_’ Prometheus should auto complete metrics
+homepage and begin to type ‘netdata\_’ Prometheus should auto complete metrics
it is now scraping.
![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.13.43%20PM.png)
@@ -206,7 +212,7 @@ the following:
Our NetData cpu graph should be showing some activity. Let’s represent this in
Prometheus. In order to do this let’s keep our metrics page open for reference:
-http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes We are
+<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes> We are
setting out to graph the data in the CPU chart so let’s search for “system.cpu”
in the metrics page above. We come across a section of metrics with the first
comments `# COMMENT homogeneous chart "system.cpu", context "system.cpu", family
@@ -247,9 +253,9 @@ this point to read [this page](../backends/prometheus/#using-netdata-with-promet
The key point here is that NetData can export metrics from its internal DB or
can send metrics “as-collected” by specifying the ‘source=as-collected’ url
parameter like so.
-http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes&types=yes&source=as-collected
+<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes&types=yes&source=as-collected>
If you choose to use this method you will need to use Prometheus's set of
-functions here: https://prometheus.io/docs/querying/functions/ to obtain useful
+functions here: <https://prometheus.io/docs/querying/functions/> to obtain useful
metrics as you are now dealing with raw counters from the system. For example
you will have to use the `irate()` function over a counter to get that metric's
rate per second. If your graphing needs are met by using the metrics returned by
@@ -258,6 +264,7 @@ that. If you find limitations then consider re-writing your queries using the
raw data and using Prometheus functions to get the desired chart.
## Grafana
+
Finally we make it to grafana. This is the easiest part in my opinion. This time
we will actually run the official grafana docker container as all configuration
we need to do is done via the GUI. Let’s run the following command:
@@ -266,7 +273,8 @@ we need to do is done via the GUI. Let’s run the following command:
docker run -i -p 3000:3000 --network=netdata-tutorial grafana/grafana
```
-This will get grafana running at ‘http://localhost:3000/’ Let’s go there and
+This will get grafana running at ‘<http://localhost:3000/’> Let’s go there and
+
login using the credentials Admin:Admin.
The first thing we want to do is click ‘Add data source’. Let’s make it look
@@ -291,4 +299,4 @@ about the monitoring system until Prometheus cannot keep up with your scale.
Once this happens there are options presented in the Prometheus documentation
for solving this. Hope this was helpful, happy monitoring.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2FWALKTHROUGH&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2FWALKTHROUGH&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/backends/aws_kinesis/Makefile.in b/backends/aws_kinesis/Makefile.in
new file mode 100644
index 00000000..e41b87ef
--- /dev/null
+++ b/backends/aws_kinesis/Makefile.in
@@ -0,0 +1,571 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = backends/aws_kinesis
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_libconfig_DATA) \
+ $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(libconfigdir)"
+DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ aws_kinesis.conf \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/aws_kinesis/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu backends/aws_kinesis/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(libconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_libconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_libconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_libconfigDATA install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_libconfigDATA
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/backends/aws_kinesis/README.md b/backends/aws_kinesis/README.md
index 42497909..a6529f23 100644
--- a/backends/aws_kinesis/README.md
+++ b/backends/aws_kinesis/README.md
@@ -1,8 +1,8 @@
-# Using netdata with AWS Kinesis Data Streams
+# Using Netdata with AWS Kinesis Data Streams
## Prerequisites
-To use AWS Kinesis as a backend AWS SDK for C++ should be [installed](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) first. `libcrypto`, `libssl`, and `libcurl` are also required to compile netdata with Kinesis support enabled. Next, netdata should be re-installed from the source. The installer will detect that the required libraries are now available.
+To use AWS Kinesis as a backend AWS SDK for C++ should be [installed](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) first. `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled. Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.
If the AWS SDK for C++ is being installed from source, it is useful to set `-DBUILD_ONLY="kinesis"`. Otherwise, the building process could take a very long time. Take a note, that the default installation path for the libraries is `/usr/local/lib64`. Many Linux distributions don't include this path as the default one for a library search, so it is advisable to use the following options to `cmake` while building the AWS SDK:
@@ -13,15 +13,18 @@ cmake -DCMAKE_INSTALL_LIBDIR=/usr/lib -DCMAKE_INSTALL_INCLUDEDIR=/usr/include -D
## Configuration
To enable data sending to the kinesis backend set the following options in `netdata.conf`:
+
```
[backend]
enabled = yes
type = kinesis
destination = us-east-1
```
+
set the `destination` option to an AWS region.
-In the netdata configuration directory run `./edit-config aws_kinesis.conf` and set AWS credentials and stream name:
+In the Netdata configuration directory run `./edit-config aws_kinesis.conf` and set AWS credentials and stream name:
+
```
# AWS credentials
aws_access_key_id = your_access_key_id
@@ -30,9 +33,9 @@ aws_secret_access_key = your_secret_access_key
# destination stream
stream name = your_stream_name
```
-Alternatively, AWS credentials can be set for the *netdata* user using AWS SDK for C++ [standard methods](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/credentials.html).
-A partition key for every record is computed automatically by the netdata with the purpose to distribute records across available shards evenly.
+Alternatively, AWS credentials can be set for the *netdata* user using AWS SDK for C++ [standard methods](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/credentials.html).
+A partition key for every record is computed automatically by Netdata with the purpose to distribute records across available shards evenly.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Faws_kinesis%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Faws_kinesis%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/backends/aws_kinesis/aws_kinesis.c b/backends/aws_kinesis/aws_kinesis.c
index d8b79364..b1ea4786 100644
--- a/backends/aws_kinesis/aws_kinesis.c
+++ b/backends/aws_kinesis/aws_kinesis.c
@@ -63,18 +63,11 @@ int read_kinesis_conf(const char *path, char **access_key_id_p, char **secret_ac
continue;
}
- if(!value) value = "";
+ if(!value)
+ value = "";
+ else
+ value = strip_quotes(value);
- // strip quotes
- if(*value == '"' || *value == '\'') {
- value++;
-
- s = value;
- while(*s) s++;
- if(s != value) s--;
-
- if(*s == '"' || *s == '\'') *s = '\0';
- }
if(name[0] == 'a' && name[4] == 'a' && !strcmp(name, "aws_access_key_id")) {
access_key_id = strdupz(value);
}
diff --git a/backends/backends.c b/backends/backends.c
index 15a0cb41..120c6e70 100644
--- a/backends/backends.c
+++ b/backends/backends.c
@@ -131,7 +131,8 @@ calculated_number backend_calculate_value_from_stored_data(
}
*/
for(rd->state->query_ops.init(rd, &handle, after, before) ; !rd->state->query_ops.is_finished(&handle) ; ) {
- n = rd->state->query_ops.next_metric(&handle);
+ time_t curr_t;
+ n = rd->state->query_ops.next_metric(&handle, &curr_t);
if(unlikely(!does_storage_number_exist(n))) {
// not collected
@@ -249,12 +250,11 @@ static void backends_main_cleanup(void *ptr) {
/**
* Set Kinesis variables
*
- * Set the variables necessaries to work with this specific backend.
+ * Set the variables necessary to work with this specific backend.
*
- * @param default_port the default port of the backend
+ * @param default_port the default port of the backend
* @param brc function called to check the result.
- * @param brf function called to format the msessage to the backend
- * @param type the backend string selector.
+ * @param brf function called to format the message to the backend
*/
void backend_set_kinesis_variables(int *default_port,
backend_response_checker_t brc,
@@ -278,12 +278,11 @@ void backend_set_kinesis_variables(int *default_port,
/**
* Set Prometheus variables
*
- * Set the variables necessaries to work with this specific backend.
+ * Set the variables necessary to work with this specific backend.
*
- * @param default_port the default port of the backend
+ * @param default_port the default port of the backend
* @param brc function called to check the result.
- * @param brf function called to format the msessage to the backend
- * @param type the backend string selector.
+ * @param brf function called to format the message to the backend
*/
void backend_set_prometheus_variables(int *default_port,
backend_response_checker_t brc,
@@ -301,14 +300,41 @@ void backend_set_prometheus_variables(int *default_port,
}
/**
+ * Set MongoDB variables
+ *
+ * Set the variables necessary to work with this specific backend.
+ *
+ * @param default_port the default port of the backend
+ * @param brc function called to check the result.
+ * @param brf function called to format the message to the backend
+ */
+void backend_set_mongodb_variables(int *default_port,
+ backend_response_checker_t brc,
+ backend_request_formatter_t brf)
+{
+ (void)default_port;
+#ifndef HAVE_MONGOC
+ (void)brc;
+ (void)brf;
+#endif
+
+#if HAVE_MONGOC
+ *brc = process_json_response;
+ if(BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ *brf = format_dimension_collected_json_plaintext;
+ else
+ *brf = format_dimension_stored_json_plaintext;
+#endif
+}
+
+/**
* Set JSON variables
*
- * Set the variables necessaries to work with this specific backend.
+ * Set the variables necessary to work with this specific backend.
*
- * @param default_port the default port of the backend
+ * @param default_port the default port of the backend
* @param brc function called to check the result.
- * @param brf function called to format the msessage to the backend
- * @param type the backend string selector.
+ * @param brf function called to format the message to the backend
*/
void backend_set_json_variables(int *default_port,
backend_response_checker_t brc,
@@ -326,12 +352,11 @@ void backend_set_json_variables(int *default_port,
/**
* Set OpenTSDB HTTP variables
*
- * Set the variables necessaries to work with this specific backend.
+ * Set the variables necessary to work with this specific backend.
*
- * @param default_port the default port of the backend
+ * @param default_port the default port of the backend
* @param brc function called to check the result.
- * @param brf function called to format the msessage to the backend
- * @param type the backend string selector.
+ * @param brf function called to format the message to the backend
*/
void backend_set_opentsdb_http_variables(int *default_port,
backend_response_checker_t brc,
@@ -350,12 +375,11 @@ void backend_set_opentsdb_http_variables(int *default_port,
/**
* Set OpenTSDB Telnet variables
*
- * Set the variables necessaries to work with this specific backend.
+ * Set the variables necessary to work with this specific backend.
*
- * @param default_port the default port of the backend
+ * @param default_port the default port of the backend
* @param brc function called to check the result.
- * @param brf function called to format the msessage to the backend
- * @param type the backend string selector.
+ * @param brf function called to format the message to the backend
*/
void backend_set_opentsdb_telnet_variables(int *default_port,
backend_response_checker_t brc,
@@ -373,12 +397,11 @@ void backend_set_opentsdb_telnet_variables(int *default_port,
/**
* Set Graphite variables
*
- * Set the variables necessaries to work with this specific backend.
+ * Set the variables necessary to work with this specific backend.
*
- * @param default_port the default port of the backend
+ * @param default_port the default port of the backend
* @param brc function called to check the result.
- * @param brf function called to format the msessage to the backend
- * @param type the backend string selector.
+ * @param brf function called to format the message to the backend
*/
void backend_set_graphite_variables(int *default_port,
backend_response_checker_t brc,
@@ -421,6 +444,9 @@ BACKEND_TYPE backend_select_type(const char *type) {
else if (!strcmp(type, "kinesis") || !strcmp(type, "kinesis:plaintext")) {
return BACKEND_TYPE_KINESIS;
}
+ else if (!strcmp(type, "mongodb") || !strcmp(type, "mongodb:plaintext")) {
+ return BACKEND_TYPE_MONGODB;
+ }
return BACKEND_TYPE_UNKNOWN;
}
@@ -450,7 +476,18 @@ void *backends_main(void *ptr) {
#if ENABLE_PROMETHEUS_REMOTE_WRITE
int do_prometheus_remote_write = 0;
- BUFFER *http_request_header = buffer_create(1);
+ BUFFER *http_request_header = NULL;
+#endif
+
+#if HAVE_MONGOC
+ int do_mongodb = 0;
+ char *mongodb_uri = NULL;
+ char *mongodb_database = NULL;
+ char *mongodb_collection = NULL;
+
+ // set the default socket timeout in ms
+ int32_t mongodb_default_socket_timeout = (int32_t)(global_backend_update_every >= 2)?(global_backend_update_every * MSEC_PER_SEC - 500):1000;
+
#endif
#ifdef ENABLE_HTTPS
@@ -524,6 +561,7 @@ void *backends_main(void *ptr) {
#if ENABLE_PROMETHEUS_REMOTE_WRITE
do_prometheus_remote_write = 1;
+ http_request_header = buffer_create(1);
init_write_request();
#else
error("BACKEND: Prometheus remote write support isn't compiled");
@@ -547,6 +585,30 @@ void *backends_main(void *ptr) {
backend_set_kinesis_variables(&default_port,&backend_response_checker,&backend_request_formatter);
break;
}
+ case BACKEND_TYPE_MONGODB: {
+#if HAVE_MONGOC
+ if(unlikely(read_mongodb_conf(netdata_configured_user_config_dir,
+ &mongodb_uri,
+ &mongodb_database,
+ &mongodb_collection))) {
+ error("BACKEND: mongodb backend type is set but cannot read its configuration from %s/mongodb.conf",
+ netdata_configured_user_config_dir);
+ goto cleanup;
+ }
+
+ if(likely(!mongodb_init(mongodb_uri, mongodb_database, mongodb_collection, mongodb_default_socket_timeout))) {
+ backend_set_mongodb_variables(&default_port, &backend_response_checker, &backend_request_formatter);
+ do_mongodb = 1;
+ }
+ else {
+ error("BACKEND: cannot initialize MongoDB backend");
+ goto cleanup;
+ }
+#else
+ error("BACKEND: MongoDB support isn't compiled");
+#endif // HAVE_MONGOC
+ break;
+ }
case BACKEND_TYPE_GRAPHITE: {
backend_set_graphite_variables(&default_port,&backend_response_checker,&backend_request_formatter);
break;
@@ -658,7 +720,8 @@ void *backends_main(void *ptr) {
size_t count_dims_total = 0;
#if ENABLE_PROMETHEUS_REMOTE_WRITE
- clear_write_request();
+ if(do_prometheus_remote_write)
+ clear_write_request();
#endif
rrd_rdlock();
RRDHOST *host;
@@ -833,12 +896,53 @@ void *backends_main(void *ptr) {
chart_sent_metrics = chart_buffered_metrics;
buffer_flush(b);
- }
- else {
-#else
- {
+ } else
#endif /* HAVE_KINESIS */
+#if HAVE_MONGOC
+ if(do_mongodb) {
+ size_t buffer_len = buffer_strlen(b);
+ size_t sent = 0;
+
+ while(sent < buffer_len) {
+ const char *first_char = buffer_tostring(b);
+
+ debug(D_BACKEND, "BACKEND: mongodb_insert(): uri = %s, database = %s, collection = %s, \
+ buffer = %zu", mongodb_uri, mongodb_database, mongodb_collection, buffer_len);
+
+ if(likely(!mongodb_insert((char *)first_char, (size_t)chart_buffered_metrics))) {
+ sent += buffer_len;
+ chart_transmission_successes++;
+ chart_receptions++;
+ }
+ else {
+ // oops! we couldn't send (all or some of the) data
+ error("BACKEND: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zu bytes.",
+ mongodb_uri, buffer_len, 0UL);
+
+ chart_transmission_failures++;
+ chart_data_lost_events++;
+ chart_lost_bytes += buffer_len;
+
+ // estimate the number of lost metrics
+ chart_lost_metrics += (collected_number)chart_buffered_metrics;
+
+ break;
+ }
+
+ if(unlikely(netdata_exit)) break;
+ }
+
+ chart_sent_bytes += sent;
+ if(likely(sent == buffer_len))
+ chart_sent_metrics = chart_buffered_metrics;
+
+ buffer_flush(b);
+ } else
+#endif /* HAVE_MONGOC */
+
+ {
+
// ------------------------------------------------------------------------
// if we are connected, receive a response, without blocking
@@ -1027,14 +1131,14 @@ void *backends_main(void *ptr) {
#if ENABLE_PROMETHEUS_REMOTE_WRITE
- if(failures) {
+ if(do_prometheus_remote_write && failures) {
(void) buffer_on_failures;
failures = 0;
chart_lost_bytes = chart_buffered_bytes = get_write_request_size(); // estimated write request size
chart_data_lost_events++;
chart_lost_metrics = chart_buffered_metrics;
- }
-#else
+ } else
+#endif
if(failures > buffer_on_failures) {
// too bad! we are going to lose data
chart_lost_bytes += buffer_strlen(b);
@@ -1044,7 +1148,6 @@ void *backends_main(void *ptr) {
chart_data_lost_events++;
chart_lost_metrics = chart_buffered_metrics;
}
-#endif /* ENABLE_PROMETHEUS_REMOTE_WRITE */
if(unlikely(netdata_exit)) break;
@@ -1101,9 +1204,17 @@ cleanup:
#endif
#if ENABLE_PROMETHEUS_REMOTE_WRITE
- if(do_prometheus_remote_write) {
- buffer_free(http_request_header);
+ buffer_free(http_request_header);
+ if(do_prometheus_remote_write)
protocol_buffers_shutdown();
+#endif
+
+#if HAVE_MONGOC
+ if(do_mongodb) {
+ mongodb_cleanup();
+ freez(mongodb_uri);
+ freez(mongodb_database);
+ freez(mongodb_collection);
}
#endif
diff --git a/backends/backends.h b/backends/backends.h
index 8d0bda41..9da04cce 100644
--- a/backends/backends.h
+++ b/backends/backends.h
@@ -16,13 +16,14 @@ typedef enum backend_options {
} BACKEND_OPTIONS;
typedef enum backend_types {
- BACKEND_TYPE_UNKNOWN, //Invalid type
- BACKEND_TYPE_GRAPHITE, //Send plain text to Graphite
- BACKEND_TYPE_OPENTSDB_USING_TELNET, //Send data to OpenTSDB using telnet API
- BACKEND_TYPE_OPENTSDB_USING_HTTP, //Send data to OpenTSDB using HTTP API
- BACKEND_TYPE_JSON, //Stores the data using JSON.
- BACKEND_TYPE_PROMETEUS, //The user selected to use Prometheus backend
- BACKEND_TYPE_KINESIS //Send message to AWS Kinesis
+ BACKEND_TYPE_UNKNOWN, // Invalid type
+ BACKEND_TYPE_GRAPHITE, // Send plain text to Graphite
+ BACKEND_TYPE_OPENTSDB_USING_TELNET, // Send data to OpenTSDB using telnet API
+ BACKEND_TYPE_OPENTSDB_USING_HTTP, // Send data to OpenTSDB using HTTP API
+ BACKEND_TYPE_JSON, // Stores the data using JSON.
+ BACKEND_TYPE_PROMETEUS, // The user selected to use Prometheus backend
+ BACKEND_TYPE_KINESIS, // Send message to AWS Kinesis
+ BACKEND_TYPE_MONGODB // Send data to MongoDB collection
} BACKEND_TYPE;
@@ -56,6 +57,22 @@ extern calculated_number backend_calculate_value_from_stored_data(
extern size_t backend_name_copy(char *d, const char *s, size_t usable);
extern int discard_response(BUFFER *b, const char *backend);
+static inline char *strip_quotes(char *str) {
+ if(*str == '"' || *str == '\'') {
+ char *s;
+
+ str++;
+
+ s = str;
+ while(*s) s++;
+ if(s != str) s--;
+
+ if(*s == '"' || *s == '\'') *s = '\0';
+ }
+
+ return str;
+}
+
#endif // BACKENDS_INTERNALS
#include "backends/prometheus/backend_prometheus.h"
@@ -71,4 +88,8 @@ extern int discard_response(BUFFER *b, const char *backend);
#include "backends/prometheus/remote_write/remote_write.h"
#endif
+#if HAVE_MONGOC
+#include "backends/mongodb/mongodb.h"
+#endif
+
#endif /* NETDATA_BACKENDS_H */
diff --git a/backends/graphite/Makefile.in b/backends/graphite/Makefile.in
new file mode 100644
index 00000000..e13c5ec3
--- /dev/null
+++ b/backends/graphite/Makefile.in
@@ -0,0 +1,507 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = backends/graphite
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/graphite/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu backends/graphite/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/backends/json/Makefile.in b/backends/json/Makefile.in
new file mode 100644
index 00000000..2d766ddd
--- /dev/null
+++ b/backends/json/Makefile.in
@@ -0,0 +1,507 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = backends/json
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/json/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu backends/json/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/backends/mongodb/Makefile.am b/backends/mongodb/Makefile.am
new file mode 100644
index 00000000..89e14a8d
--- /dev/null
+++ b/backends/mongodb/Makefile.am
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ mongodb.conf \
+ $(NULL)
diff --git a/backends/mongodb/Makefile.in b/backends/mongodb/Makefile.in
new file mode 100644
index 00000000..aa138707
--- /dev/null
+++ b/backends/mongodb/Makefile.in
@@ -0,0 +1,571 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = backends/mongodb
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_libconfig_DATA) \
+ $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(libconfigdir)"
+DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ mongodb.conf \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/mongodb/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu backends/mongodb/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(libconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_libconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_libconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_libconfigDATA install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_libconfigDATA
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/backends/mongodb/README.md b/backends/mongodb/README.md
new file mode 100644
index 00000000..7538fe8b
--- /dev/null
+++ b/backends/mongodb/README.md
@@ -0,0 +1,32 @@
+# MongoDB backend
+
+## Prerequisites
+
+To use MongoDB as a backend, `libmongoc` 1.7.0 or higher should be [installed](http://mongoc.org/libmongoc/current/installing.html) first. Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.
+
+## Configuration
+
+To enable data sending to the MongoDB backend set the following options in `netdata.conf`:
+
+```
+[backend]
+ enabled = yes
+ type = mongodb
+```
+
+In the Netdata configuration directory run `./edit-config mongodb.conf` and set [MongoDB URI](https://docs.mongodb.com/manual/reference/connection-string/), database name, and collection name:
+
+```
+# URI
+uri = mongodb://<hostname>
+
+# database name
+database = your_database_name
+
+# collection name
+collection = your_collection_name
+```
+
+The default socket timeout depends on the backend update interval. The timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the `sockettimeoutms` MongoDB URI option.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Fmongodb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/backends/mongodb/mongodb.c b/backends/mongodb/mongodb.c
new file mode 100644
index 00000000..f40d8fa5
--- /dev/null
+++ b/backends/mongodb/mongodb.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define BACKENDS_INTERNALS
+#include "mongodb.h"
+#include <mongoc.h>
+
+#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2)
+
+mongoc_client_t *mongodb_client;
+mongoc_collection_t *mongodb_collection;
+
+int mongodb_init(const char *uri_string,
+ const char *database_string,
+ const char *collection_string,
+ int32_t default_socket_timeout) {
+ mongoc_uri_t *uri;
+ bson_error_t error;
+
+ mongoc_init();
+
+ uri = mongoc_uri_new_with_error(uri_string, &error);
+ if(unlikely(!uri)) {
+ error("BACKEND: failed to parse URI: %s. Error message: %s", uri_string, error.message);
+ return 1;
+ }
+
+ int32_t socket_timeout = mongoc_uri_get_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, default_socket_timeout);
+ if(!mongoc_uri_set_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout)) {
+ error("BACKEND: failed to set %s to the value %d", MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout);
+ return 1;
+ };
+
+ mongodb_client = mongoc_client_new_from_uri(uri);
+ if(unlikely(!mongodb_client)) {
+ error("BACKEND: failed to create a new client");
+ return 1;
+ }
+
+ if(!mongoc_client_set_appname(mongodb_client, "netdata")) {
+ error("BACKEND: failed to set client appname");
+ };
+
+ mongodb_collection = mongoc_client_get_collection(mongodb_client, database_string, collection_string);
+
+ mongoc_uri_destroy(uri);
+
+ return 0;
+}
+
+void free_bson(bson_t **insert, size_t n_documents) {
+ size_t i;
+
+ for(i = 0; i < n_documents; i++)
+ bson_destroy(insert[i]);
+
+ free(insert);
+}
+
+int mongodb_insert(char *data, size_t n_metrics) {
+ bson_t **insert = calloc(n_metrics, sizeof(bson_t *));
+ bson_error_t error;
+ char *start = data, *end = data;
+ size_t n_documents = 0;
+
+ while(*end && n_documents <= n_metrics) {
+ while(*end && *end != '\n') end++;
+
+ if(likely(*end)) {
+ *end = '\0';
+ end++;
+ }
+ else {
+ break;
+ }
+
+ insert[n_documents] = bson_new_from_json((const uint8_t *)start, -1, &error);
+
+ if(unlikely(!insert[n_documents])) {
+ error("BACKEND: %s", error.message);
+ free_bson(insert, n_documents);
+ return 1;
+ }
+
+ start = end;
+
+ n_documents++;
+ }
+
+ if(unlikely(!mongoc_collection_insert_many(mongodb_collection, (const bson_t **)insert, n_documents, NULL, NULL, &error))) {
+ error("BACKEND: %s", error.message);
+ free_bson(insert, n_documents);
+ return 1;
+ }
+
+ free_bson(insert, n_documents);
+
+ return 0;
+}
+
+void mongodb_cleanup() {
+ mongoc_collection_destroy(mongodb_collection);
+ mongoc_client_destroy(mongodb_client);
+ mongoc_cleanup();
+
+ return;
+}
+
+int read_mongodb_conf(const char *path, char **uri_p, char **database_p, char **collection_p) {
+ char *uri = *uri_p;
+ char *database = *database_p;
+ char *collection = *collection_p;
+
+ if(unlikely(uri)) freez(uri);
+ if(unlikely(database)) freez(database);
+ if(unlikely(collection)) freez(collection);
+ uri = NULL;
+ database = NULL;
+ collection = NULL;
+
+ int line = 0;
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/mongodb.conf", path);
+
+ char buffer[CONFIG_FILE_LINE_MAX + 1], *s;
+
+ debug(D_BACKEND, "BACKEND: opening config file '%s'", filename);
+
+ FILE *fp = fopen(filename, "r");
+ if(!fp) {
+ return 1;
+ }
+
+ while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) {
+ buffer[CONFIG_FILE_LINE_MAX] = '\0';
+ line++;
+
+ s = trim(buffer);
+ if(!s || *s == '#') {
+ debug(D_BACKEND, "BACKEND: ignoring line %d of file '%s', it is empty.", line, filename);
+ continue;
+ }
+
+ char *name = s;
+ char *value = strchr(s, '=');
+ if(unlikely(!value)) {
+ error("BACKEND: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename);
+ continue;
+ }
+ *value = '\0';
+ value++;
+
+ name = trim(name);
+ value = trim(value);
+
+ if(unlikely(!name || *name == '#')) {
+ error("BACKEND: ignoring line %d of file '%s', name is empty.", line, filename);
+ continue;
+ }
+
+ if(!value)
+ value = "";
+ else
+ value = strip_quotes(value);
+
+ if(name[0] == 'u' && !strcmp(name, "uri")) {
+ uri = strdupz(value);
+ }
+ else if(name[0] == 'd' && !strcmp(name, "database")) {
+ database = strdupz(value);
+ }
+ else if(name[0] == 'c' && !strcmp(name, "collection")) {
+ collection = strdupz(value);
+ }
+ }
+
+ fclose(fp);
+
+ if(unlikely(!collection || !*collection)) {
+ error("BACKEND: collection name is a mandatory MongoDB parameter, but it is not configured");
+ return 1;
+ }
+
+ *uri_p = uri;
+ *database_p = database;
+ *collection_p = collection;
+
+ return 0;
+}
diff --git a/backends/mongodb/mongodb.conf b/backends/mongodb/mongodb.conf
new file mode 100644
index 00000000..11ea6efb
--- /dev/null
+++ b/backends/mongodb/mongodb.conf
@@ -0,0 +1,12 @@
+# MongoDB backend configuration
+#
+# All options in this file are mandatory
+
+# URI
+uri =
+
+# database name
+database =
+
+# collection name
+collection =
diff --git a/backends/mongodb/mongodb.h b/backends/mongodb/mongodb.h
new file mode 100644
index 00000000..ffb27690
--- /dev/null
+++ b/backends/mongodb/mongodb.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BACKEND_MONGODB_H
+#define NETDATA_BACKEND_MONGODB_H
+
+#include "backends/backends.h"
+
+extern int mongodb_init(const char *uri_string, const char *database_string, const char *collection_string, const int32_t socket_timeout);
+
+extern int mongodb_insert(char *data, size_t n_metrics);
+
+extern void mongodb_cleanup();
+
+extern int read_mongodb_conf(const char *path, char **uri_p, char **database_p, char **collection_p);
+
+#endif //NETDATA_BACKEND_MONGODB_H
diff --git a/backends/opentsdb/Makefile.in b/backends/opentsdb/Makefile.in
new file mode 100644
index 00000000..89ee50ac
--- /dev/null
+++ b/backends/opentsdb/Makefile.in
@@ -0,0 +1,507 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = backends/opentsdb
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/opentsdb/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu backends/opentsdb/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/backends/opentsdb/README.md b/backends/opentsdb/README.md
deleted file mode 100644
index ab1f08bd..00000000
--- a/backends/opentsdb/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# OpenTSDB with HTTP
-
-Netdata can easily communicate with OpenTSDB using HTTP API. To enable this channel, set the following options in your `netdata.conf`:
-
-```
-[backend]
- type = opentsdb:http
- destination = localhost:4242
-```
-
-In this example, OpenTSDB is running with its default port, which is `4242`. If you run OpenTSDB on a different port, change the `destination = localhost:4242` line accordingly.
-
-## HTTPS
-
-As of [v1.16.0](https://github.com/netdata/netdata/releases/tag/v1.16.0), Netdata can send metrics to OpenTSDB using TLS/SSL. Unfortunately, OpenTDSB does not support encrypted connections, so you will have to configure a reverse proxy to enable HTTPS communication between Netdata and OpenTSBD. You can set up a reverse proxy with [Nginx](../../docs/Running-behind-nginx.md).
-
-After your proxy is configured, make the following changes to `netdata.conf`:
-
-```
-[backend]
- type = opentsdb:https
- destination = localhost:8082
-```
-
-In this example, we used the port `8082` for our reverse proxy. If your reverse proxy listens on a different port, change the `destination = localhost:8082` line accordingly.
diff --git a/backends/prometheus/Makefile.in b/backends/prometheus/Makefile.in
new file mode 100644
index 00000000..1c2a1cb1
--- /dev/null
+++ b/backends/prometheus/Makefile.in
@@ -0,0 +1,698 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = backends/prometheus
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ remote_write \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/prometheus/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu backends/prometheus/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/backends/prometheus/README.md b/backends/prometheus/README.md
index 6b070dea..0a4be27e 100644
--- a/backends/prometheus/README.md
+++ b/backends/prometheus/README.md
@@ -1,37 +1,39 @@
-# Using netdata with Prometheus
+# Using Netdata with Prometheus
-> IMPORTANT: the format netdata sends metrics to prometheus has changed since netdata v1.7. The new prometheus backend for netdata supports a lot more features and is aligned to the development of the rest of the netdata backends.
+> IMPORTANT: the format Netdata sends metrics to prometheus has changed since Netdata v1.7. The new prometheus backend for Netdata supports a lot more features and is aligned to the development of the rest of the Netdata backends.
-Prometheus is a distributed monitoring system which offers a very simple setup along with a robust data model. Recently netdata added support for Prometheus. I'm going to quickly show you how to install both netdata and prometheus on the same server. We can then use grafana pointed at Prometheus to obtain long term metrics netdata offers. I'm assuming we are starting at a fresh ubuntu shell (whether you'd like to follow along in a VM or a cloud instance is up to you).
+Prometheus is a distributed monitoring system which offers a very simple setup along with a robust data model. Recently Netdata added support for Prometheus. I'm going to quickly show you how to install both Netdata and prometheus on the same server. We can then use grafana pointed at Prometheus to obtain long term metrics Netdata offers. I'm assuming we are starting at a fresh ubuntu shell (whether you'd like to follow along in a VM or a cloud instance is up to you).
+## Installing Netdata and prometheus
-## Installing netdata and prometheus
+### Installing Netdata
-### Installing netdata
+There are number of ways to install Netdata according to [Installation](../../packaging/installer/#installation)\
+The suggested way of installing the latest Netdata and keep it upgrade automatically. Using one line installation:
-There are number of ways to install netdata according to [Installation](../../packaging/installer/#installation)
-The suggested way of installing the latest netdata and keep it upgrade automatically. Using one line installation:
-
-```
+```sh
bash <(curl -Ss https://my-netdata.io/kickstart.sh)
```
-At this point we should have netdata listening on port 19999. Attempt to take your browser here:
+At this point we should have Netdata listening on port 19999. Attempt to take your browser here:
```
http://your.netdata.ip:19999
```
-*(replace `your.netdata.ip` with the IP or hostname of the server running netdata)*
+_(replace `your.netdata.ip` with the IP or hostname of the server running Netdata)_
### Installing Prometheus
-In order to install prometheus we are going to introduce our own systemd startup script along with an example of prometheus.yaml configuration. Prometheus needs to be pointed to your server at a specific target url for it to scrape netdata's api. Prometheus is always a pull model meaning netdata is the passive client within this architecture. Prometheus always initiates the connection with netdata.
+In order to install prometheus we are going to introduce our own systemd startup script along with an example of prometheus.yaml configuration. Prometheus needs to be pointed to your server at a specific target url for it to scrape Netdata's api. Prometheus is always a pull model meaning Netdata is the passive client within this architecture. Prometheus always initiates the connection with Netdata.
#### Download Prometheus
```sh
-wget -O /tmp/prometheus-2.3.2.linux-amd64.tar.gz https://github.com/prometheus/prometheus/releases/download/v2.3.2/prometheus-2.3.2.linux-amd64.tar.gz
+cd /tmp && curl -s https://api.github.com/repos/prometheus/prometheus/releases/latest \
+| grep "browser_download_url.*linux-amd64.tar.gz" \
+| cut -d '"' -f 4 \
+| wget -qi -
```
#### Create prometheus system user
@@ -50,14 +52,14 @@ sudo chown prometheus:prometheus /opt/prometheus
#### Untar prometheus directory
```sh
-sudo tar -xvf /tmp/prometheus-2.3.2.linux-amd64.tar.gz -C /opt/prometheus --strip=1
+sudo tar -xvf /tmp/prometheus-*linux-amd64.tar.gz -C /opt/prometheus --strip=1
```
#### Install prometheus.yml
We will use the following `prometheus.yml` file. Save it at `/opt/prometheus/prometheus.yml`.
-Make sure to replace `your.netdata.ip` with the IP or hostname of the host running netdata.
+Make sure to replace `your.netdata.ip` with the IP or hostname of the host running Netdata.
```yaml
# my global config
@@ -101,7 +103,7 @@ scrape_configs:
#source: [as-collected]
#
# server name for this prometheus - the default is the client IP
- # for netdata to uniquely identify it
+ # for Netdata to uniquely identify it
#server: ['prometheus1']
honor_labels: true
@@ -111,8 +113,8 @@ scrape_configs:
#### Install nodes.yml
-The following is completely optional, it will enable Prometheus to generate alerts from some NetData sources. Tweak the values to your own needs. We will use the following `nodes.yml` file below. Save it at `/opt/prometheus/nodes.yml`, and add a *- "nodes.yml"* entry under the *rule_files:* section in the example prometheus.yml file above.
-```
+The following is completely optional, it will enable Prometheus to generate alerts from some NetData sources. Tweak the values to your own needs. We will use the following `nodes.yml` file below. Save it at `/opt/prometheus/nodes.yml`, and add a _- "nodes.yml"_ entry under the _rule_files:_ section in the example prometheus.yml file above.
+```yaml
groups:
- name: nodes
@@ -171,69 +173,70 @@ ExecStop=/bin/kill -SIGINT $MAINPID
[Install]
WantedBy=multi-user.target
```
+
##### Start Prometheus
-```
+```sh
sudo systemctl start prometheus
sudo systemctl enable prometheus
```
Prometheus should now start and listen on port 9090. Attempt to head there with your browser.
-If everything is working correctly when you fetch `http://your.prometheus.ip:9090` you will see a 'Status' tab. Click this and click on 'targets' We should see the netdata host as a scraped target.
+If everything is working correctly when you fetch `http://your.prometheus.ip:9090` you will see a 'Status' tab. Click this and click on 'targets' We should see the Netdata host as a scraped target.
----
+- - -
## Netdata support for prometheus
-> IMPORTANT: the format netdata sends metrics to prometheus has changed since netdata v1.6. The new format allows easier queries for metrics and supports both `as collected` and normalized metrics.
+> IMPORTANT: the format Netdata sends metrics to prometheus has changed since Netdata v1.6. The new format allows easier queries for metrics and supports both `as collected` and normalized metrics.
-Before explaining the changes, we have to understand the key differences between netdata and prometheus.
+Before explaining the changes, we have to understand the key differences between Netdata and prometheus.
-### understanding netdata metrics
+### understanding Netdata metrics
-##### charts
+#### charts
-Each chart in netdata has several properties (common to all its metrics):
+Each chart in Netdata has several properties (common to all its metrics):
-- `chart_id` - uniquely identifies a chart.
+- `chart_id` - uniquely identifies a chart.
-- `chart_name` - a more human friendly name for `chart_id`, also unique.
+- `chart_name` - a more human friendly name for `chart_id`, also unique.
-- `context` - this is the template of the chart. All disk I/O charts have the same context, all mysql requests charts have the same context, etc. This is used for alarm templates to match all the charts they should be attached to.
+- `context` - this is the template of the chart. All disk I/O charts have the same context, all mysql requests charts have the same context, etc. This is used for alarm templates to match all the charts they should be attached to.
-- `family` groups a set of charts together. It is used as the submenu of the dashboard.
+- `family` groups a set of charts together. It is used as the submenu of the dashboard.
-- `units` is the units for all the metrics attached to the chart.
+- `units` is the units for all the metrics attached to the chart.
-##### dimensions
+#### dimensions
-Then each netdata chart contains metrics called `dimensions`. All the dimensions of a chart have the same units of measurement, and are contextually in the same category (ie. the metrics for disk bandwidth are `read` and `write` and they are both in the same chart).
+Then each Netdata chart contains metrics called `dimensions`. All the dimensions of a chart have the same units of measurement, and are contextually in the same category (ie. the metrics for disk bandwidth are `read` and `write` and they are both in the same chart).
-### netdata data source
+### Netdata data source
Netdata can send metrics to prometheus from 3 data sources:
-- `as collected` or `raw` - this data source sends the metrics to prometheus as they are collected. No conversion is done by netdata. The latest value for each metric is just given to prometheus. This is the most preferred method by prometheus, but it is also the harder to work with. To work with this data source, you will need to understand how to get meaningful values out of them.
+- `as collected` or `raw` - this data source sends the metrics to prometheus as they are collected. No conversion is done by Netdata. The latest value for each metric is just given to prometheus. This is the most preferred method by prometheus, but it is also the harder to work with. To work with this data source, you will need to understand how to get meaningful values out of them.
- The format of the metrics is: `CONTEXT{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+ The format of the metrics is: `CONTEXT{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
- If the metric is a counter (`incremental` in netdata lingo), `_total` is appended the context.
+ If the metric is a counter (`incremental` in Netdata lingo), `_total` is appended the context.
- Unlike prometheus, netdata allows each dimension of a chart to have a different algorithm and conversion constants (`multiplier` and `divisor`). In this case, that the dimensions of a charts are heterogeneous, netdata will use this format: `CONTEXT_DIMENSION{chart="CHART",family="FAMILY"}`
+ Unlike prometheus, Netdata allows each dimension of a chart to have a different algorithm and conversion constants (`multiplier` and `divisor`). In this case, that the dimensions of a charts are heterogeneous, Netdata will use this format: `CONTEXT_DIMENSION{chart="CHART",family="FAMILY"}`
-- `average` - this data source uses the netdata database to send the metrics to prometheus as they are presented on the netdata dashboard. So, all the metrics are sent as gauges, at the units they are presented in the netdata dashboard charts. This is the easiest to work with.
+- `average` - this data source uses the Netdata database to send the metrics to prometheus as they are presented on the Netdata dashboard. So, all the metrics are sent as gauges, at the units they are presented in the Netdata dashboard charts. This is the easiest to work with.
- The format of the metrics is: `CONTEXT_UNITS_average{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+ The format of the metrics is: `CONTEXT_UNITS_average{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
- When this source is used, netdata keeps track of the last access time for each prometheus server fetching the metrics. This last access time is used at the subsequent queries of the same prometheus server to identify the time-frame the `average` will be calculated. So, no matter how frequently prometheus scrapes netdata, it will get all the database data. To identify each prometheus server, netdata uses by default the IP of the client fetching the metrics. If there are multiple prometheus servers fetching data from the same netdata, using the same IP, each prometheus server can append `server=NAME` to the URL. Netdata will use this `NAME` to uniquely identify the prometheus server.
+ When this source is used, Netdata keeps track of the last access time for each prometheus server fetching the metrics. This last access time is used at the subsequent queries of the same prometheus server to identify the time-frame the `average` will be calculated. So, no matter how frequently prometheus scrapes Netdata, it will get all the database data. To identify each prometheus server, Netdata uses by default the IP of the client fetching the metrics. If there are multiple prometheus servers fetching data from the same Netdata, using the same IP, each prometheus server can append `server=NAME` to the URL. Netdata will use this `NAME` to uniquely identify the prometheus server.
-- `sum` or `volume`, is like `average` but instead of averaging the values, it sums them.
+- `sum` or `volume`, is like `average` but instead of averaging the values, it sums them.
- The format of the metrics is: `CONTEXT_UNITS_sum{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
- All the other operations are the same with `average`.
+ The format of the metrics is: `CONTEXT_UNITS_sum{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+ All the other operations are the same with `average`.
-Keep in mind that early versions of netdata were sending the metrics as: `CHART_DIMENSION{}`.
+Keep in mind that early versions of Netdata were sending the metrics as: `CHART_DIMENSION{}`.
### Querying Metrics
@@ -241,11 +244,11 @@ Fetch with your web browser this URL:
`http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes`
-*(replace `your.netdata.ip` with the ip or hostname of your netdata server)*
+_(replace `your.netdata.ip` with the ip or hostname of your Netdata server)_
-netdata will respond with all the metrics it sends to prometheus.
+Netdata will respond with all the metrics it sends to prometheus.
-If you search that page for `"system.cpu"` you will find all the metrics netdata is exporting to prometheus for this chart. `system.cpu` is the chart name on the netdata dashboard (on the netdata dashboard all charts have a text heading such as : `Total CPU utilization (system.cpu)`. What we are interested here in the chart name: `system.cpu`).
+If you search that page for `"system.cpu"` you will find all the metrics Netdata is exporting to prometheus for this chart. `system.cpu` is the chart name on the Netdata dashboard (on the Netdata dashboard all charts have a text heading such as : `Total CPU utilization (system.cpu)`. What we are interested here in the chart name: `system.cpu`).
Searching for `"system.cpu"` reveals:
@@ -272,7 +275,8 @@ netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension=
# COMMENT netdata_system_cpu_percentage_average: dimension "idle", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="idle"} 92.3630770 1500066662000
```
-*(netdata response for `system.cpu` with source=`average`)*
+
+_(Netdata response for `system.cpu` with source=`average`)_
In `average` or `sum` data sources, all values are normalized and are reported to prometheus as gauges. Now, use the 'expression' text form in prometheus. Begin to type the metrics we are looking for: `netdata_system_cpu`. You should see that the text form begins to auto-fill as prometheus knows about this metric.
@@ -302,15 +306,15 @@ netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="iowait"} 233
netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="idle"} 918470 1500066716438
```
-*(netdata response for `system.cpu` with source=`as-collected`)*
+_(Netdata response for `system.cpu` with source=`as-collected`)_
For more information check prometheus documentation.
### Streaming data from upstream hosts
-The `format=prometheus` parameter only exports the host's netdata metrics. If you are using the master/slave functionality of netdata this ignores any upstream hosts - so you should consider using the below in your **prometheus.yml**:
+The `format=prometheus` parameter only exports the host's Netdata metrics. If you are using the master/slave functionality of Netdata this ignores any upstream hosts - so you should consider using the below in your **prometheus.yml**:
-```
+```yaml
metrics_path: '/api/v1/allmetrics'
params:
format: [prometheus_all_hosts]
@@ -321,13 +325,13 @@ This will report all upstream host data, and `honor_labels` will make Prometheus
### Timestamps
-To pass the metrics through prometheus pushgateway, netdata supports the option `&timestamps=no` to send the metrics without timestamps.
+To pass the metrics through prometheus pushgateway, Netdata supports the option `&timestamps=no` to send the metrics without timestamps.
## Netdata host variables
-netdata collects various system configuration metrics, like the max number of TCP sockets supported, the max number of files allowed system-wide, various IPC sizes, etc. These metrics are not exposed to prometheus by default.
+Netdata collects various system configuration metrics, like the max number of TCP sockets supported, the max number of files allowed system-wide, various IPC sizes, etc. These metrics are not exposed to prometheus by default.
-To expose them, append `variables=yes` to the netdata URL.
+To expose them, append `variables=yes` to the Netdata URL.
### TYPE and HELP
@@ -335,7 +339,7 @@ To save bandwidth, and because prometheus does not use them anyway, `# TYPE` and
### Names and IDs
-netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names are human friendly labels (also unique).
+Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names are human friendly labels (also unique).
Most charts and metrics have the same ID and name, but in several cases they are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
@@ -348,23 +352,23 @@ The default is controlled in `netdata.conf`:
You can overwrite it from prometheus, by appending to the URL:
-* `&names=no` to get IDs (the old behaviour)
-* `&names=yes` to get names
+- `&names=no` to get IDs (the old behaviour)
+- `&names=yes` to get names
### Filtering metrics sent to prometheus
-netdata can filter the metrics it sends to prometheus with this setting:
+Netdata can filter the metrics it sends to prometheus with this setting:
```
[backend]
send charts matching = *
```
-This settings accepts a space separated list of patterns to match the **charts** to be sent to prometheus. Each pattern can use ` * ` as wildcard, any number of times (e.g `*a*b*c*` is valid). Patterns starting with ` ! ` give a negative match (e.g `!*.bad users.* groups.*` will send all the users and groups except `bad` user and `bad` group). The order is important: the first match (positive or negative) left to right, is used.
+This settings accepts a space separated list of patterns to match the **charts** to be sent to prometheus. Each pattern can use `*` as wildcard, any number of times (e.g `*a*b*c*` is valid). Patterns starting with `!` give a negative match (e.g `!*.bad users.* groups.*` will send all the users and groups except `bad` user and `bad` group). The order is important: the first match (positive or negative) left to right, is used.
-### Changing the prefix of netdata metrics
+### Changing the prefix of Netdata metrics
-netdata sends all metrics prefixed with `netdata_`. You can change this in `netdata.conf`, like this:
+Netdata sends all metrics prefixed with `netdata_`. You can change this in `netdata.conf`, like this:
```
[backend]
@@ -383,8 +387,8 @@ To get the metric names as they were before v1.12, append to the URL `&oldunits=
### Accuracy of `average` and `sum` data sources
-When the data source is set to `average` or `sum`, netdata remembers the last access of each client accessing prometheus metrics and uses this last access time to respond with the `average` or `sum` of all the entries in the database since that. This means that prometheus servers are not losing data when they access netdata with data source = `average` or `sum`.
+When the data source is set to `average` or `sum`, Netdata remembers the last access of each client accessing prometheus metrics and uses this last access time to respond with the `average` or `sum` of all the entries in the database since that. This means that prometheus servers are not losing data when they access Netdata with data source = `average` or `sum`.
-To uniquely identify each prometheus server, netdata uses the IP of the client accessing the metrics. If however the IP is not good enough for identifying a single prometheus server (e.g. when prometheus servers are accessing netdata through a web proxy, or when multiple prometheus servers are NATed to a single IP), each prometheus may append `&server=NAME` to the URL. This `NAME` is used by netdata to uniquely identify each prometheus server and keep track of its last access time.
+To uniquely identify each prometheus server, Netdata uses the IP of the client accessing the metrics. If however the IP is not good enough for identifying a single prometheus server (e.g. when prometheus servers are accessing Netdata through a web proxy, or when multiple prometheus servers are NATed to a single IP), each prometheus may append `&server=NAME` to the URL. This `NAME` is used by Netdata to uniquely identify each prometheus server and keep track of its last access time.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Fprometheus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Fprometheus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/backends/prometheus/remote_write/Makefile.in b/backends/prometheus/remote_write/Makefile.in
new file mode 100644
index 00000000..f7bf4a6c
--- /dev/null
+++ b/backends/prometheus/remote_write/Makefile.in
@@ -0,0 +1,521 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = backends/prometheus/remote_write
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ remote_write.pb.cc \
+ remote_write.pb.h \
+ $(NULL)
+
+dist_noinst_DATA = \
+ remote_write.proto \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/prometheus/remote_write/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu backends/prometheus/remote_write/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/backends/prometheus/remote_write/README.md b/backends/prometheus/remote_write/README.md
index 73cb1daf..8af6f4d1 100644
--- a/backends/prometheus/remote_write/README.md
+++ b/backends/prometheus/remote_write/README.md
@@ -2,7 +2,7 @@
## Prerequisites
-To use the prometheus remote write API with [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage) [protobuf](https://developers.google.com/protocol-buffers/) and [snappy](https://github.com/google/snappy) libraries should be installed first. Next, netdata should be re-installed from the source. The installer will detect that the required libraries and utilities are now available.
+To use the prometheus remote write API with [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage) [protobuf](https://developers.google.com/protocol-buffers/) and [snappy](https://github.com/google/snappy) libraries should be installed first. Next, Netdata should be re-installed from the source. The installer will detect that the required libraries and utilities are now available.
## Configuration
@@ -27,4 +27,4 @@ The default value is `/receive`. `remote write URL path` is used to set an endpo
The remote write backend does not support `buffer on failures`
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Fprometheus%2Fremote_write%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Fprometheus%2Fremote_write%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/build/build.sh b/build/build.sh
deleted file mode 100755
index 892a7da9..00000000
--- a/build/build.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-if [ ! -f .gitignore ]; then
- echo "Run as ./travis/$(basename "$0") from top level directory of git repository"
- exit 1
-fi
-
-if [ "$IS_CONTAINER" != "" ]; then
- autoreconf -ivf
- ./configure --enable-maintainer-mode
- make dist
- rm -rf autom4te.cache
-else
- docker run --rm -it \
- --env IS_CONTAINER=TRUE \
- --volume "${PWD}:/project:Z" \
- --workdir "/project" \
- netdata/builder:gcc \
- ./build/build.sh
-fi
diff --git a/collectors/Makefile.in b/collectors/Makefile.in
new file mode 100644
index 00000000..cbc0a900
--- /dev/null
+++ b/collectors/Makefile.in
@@ -0,0 +1,717 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ plugins.d \
+ apps.plugin \
+ cgroups.plugin \
+ charts.d.plugin \
+ checks.plugin \
+ cups.plugin \
+ diskspace.plugin \
+ fping.plugin \
+ ioping.plugin \
+ freebsd.plugin \
+ freeipmi.plugin \
+ idlejitter.plugin \
+ macos.plugin \
+ nfacct.plugin \
+ xenstat.plugin \
+ perf.plugin \
+ node.d.plugin \
+ proc.plugin \
+ python.d.plugin \
+ statsd.plugin \
+ tc.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/README.md b/collectors/README.md
index 72521388..fe6939f6 100644
--- a/collectors/README.md
+++ b/collectors/README.md
@@ -1,49 +1,49 @@
# Data collection plugins
-netdata supports **internal** and **external** data collection plugins:
+Netdata supports **internal** and **external** data collection plugins:
-- **internal** plugins are written in `C` and run as threads inside the netdata daemon.
+- **internal** plugins are written in `C` and run as threads inside the `netdat`a\` daemon.
-- **external** plugins may be written in any computer language and are spawn as independent long-running processes by the netdata daemon.
- They communicate with the netdata daemon via `pipes` (`stdout` communication).
+- **external** plugins may be written in any computer language and are spawn as independent long-running processes by the `netdata` daemon.
+ They communicate with the `netdata` daemon via `pipes` (`stdout` communication).
-To minimize the number of processes spawn for data collection, netdata also supports **plugin orchestrators**.
+To minimize the number of processes spawn for data collection, Netdata also supports **plugin orchestrators**.
-- **plugin orchestrators** are external plugins that do not collect any data by themeselves.
- Instead they support data collection **modules** written in the language of the orchestrator.
- Usually the orchestrator provides a higher level abstraction, making it ideal for writing new
- data collection modules with the minimum of code.
+- **plugin orchestrators** are external plugins that do not collect any data by themeselves.
+ Instead they support data collection **modules** written in the language of the orchestrator.
+ Usually the orchestrator provides a higher level abstraction, making it ideal for writing new
+ data collection modules with the minimum of code.
- Currently netdata provides plugin orchestrators
- BASH v4+ [charts.d.plugin](charts.d.plugin/),
- node.js [node.d.plugin](node.d.plugin/) and
- python v2+ (including v3) [python.d.plugin](python.d.plugin/).
+ Currently Netdata provides plugin orchestrators
+ BASH v4+ [charts.d.plugin](charts.d.plugin/),
+ node.js [node.d.plugin](node.d.plugin/) and
+ python v2+ (including v3) [python.d.plugin](python.d.plugin/).
## Netdata Plugins
-plugin|lang|O/S|runs as|modular|description
-:---:|:---:|:---:|:---:|:---:|:---
-[apps.plugin](apps.plugin/)|`C`|linux, freebsd|external|-|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.
-[cgroups.plugin](cgroups.plugin/)|`C`|linux|internal|-|collects resource usage of **Containers**, libvirt **VMs** and **systemd services**, on Linux systems
-[charts.d.plugin](charts.d.plugin/)|`BASH` v4+|any|external|yes|a **plugin orchestrator** for data collection modules written in `BASH` v4+.
-[checks.plugin](checks.plugin/)|`C`|any|internal|-|a debugging plugin (by default it is disabled)
-[cups.plugin](cups.plugin/)|`C`|any|external|-|monitors **CUPS**
-[diskspace.plugin](diskspace.plugin/)|`C`|linux|internal|-|collects disk space usage metrics on Linux mount points
-[fping.plugin](fping.plugin/)|`C`|any|external|-|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.
-[ioping.plugin](ioping.plugin/)|`C`|any|external|-|measures disk read/write latency.
-[freebsd.plugin](freebsd.plugin/)|`C`|freebsd|internal|yes|collects resource usage and performance data on FreeBSD systems
-[freeipmi.plugin](freeipmi.plugin/)|`C`|linux, freebsd|external|-|collects metrics from enterprise hardware sensors, on Linux and FreeBSD servers.
-[idlejitter.plugin](idlejitter.plugin/)|`C`|any|internal|-|measures CPU latency and jitter on all operating systems
-[macos.plugin](macos.plugin/)|`C`|macos|internal|yes|collects resource usage and performance data on MacOS systems
-[nfacct.plugin](nfacct.plugin/)|`C`|linux|external|-|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`
-[xenstat.plugin](xenstat.plugin/)|`C`|linux|external|-|collects XenServer and XCP-ng metrics using `libxenstat`
-[perf.plugin](perf.plugin/)|`C`|linux|external|-|collects CPU performance metrics using performance monitoring units (PMU).
-[node.d.plugin](node.d.plugin/)|`node.js`|any|external|yes|a **plugin orchestrator** for data collection modules written in `node.js`.
-[plugins.d](plugins.d/)|`C`|any|internal|-|implements the **external plugins** API and serves external plugins
-[proc.plugin](proc.plugin/)|`C`|linux|internal|yes|collects resource usage and performance data on Linux systems
-[python.d.plugin](python.d.plugin/)|`python` v2+|any|external|yes|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).
-[statsd.plugin](statsd.plugin/)|`C`|any|internal|-|implements a high performance **statsd** server for netdata
-[tc.plugin](tc.plugin/)|`C`|linux|internal|-|collects traffic QoS metrics (`tc`) of Linux network interfaces
+|plugin|lang|O/S|runs as|modular|description|
+|:----:|:--:|:-:|:-----:|:-----:|:----------|
+|[apps.plugin](apps.plugin/)|`C`|linux, freebsd|external|-|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.|
+|[cgroups.plugin](cgroups.plugin/)|`C`|linux|internal|-|collects resource usage of **Containers**, libvirt **VMs** and **systemd services**, on Linux systems|
+|[charts.d.plugin](charts.d.plugin/)|`BASH` v4+|any|external|yes|a **plugin orchestrator** for data collection modules written in `BASH` v4+.|
+|[checks.plugin](checks.plugin/)|`C`|any|internal|-|a debugging plugin (by default it is disabled)|
+|[cups.plugin](cups.plugin/)|`C`|any|external|-|monitors **CUPS**|
+|[diskspace.plugin](diskspace.plugin/)|`C`|linux|internal|-|collects disk space usage metrics on Linux mount points|
+|[fping.plugin](fping.plugin/)|`C`|any|external|-|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.|
+|[ioping.plugin](ioping.plugin/)|`C`|any|external|-|measures disk read/write latency.|
+|[freebsd.plugin](freebsd.plugin/)|`C`|freebsd|internal|yes|collects resource usage and performance data on FreeBSD systems|
+|[freeipmi.plugin](freeipmi.plugin/)|`C`|linux, freebsd|external|-|collects metrics from enterprise hardware sensors, on Linux and FreeBSD servers.|
+|[idlejitter.plugin](idlejitter.plugin/)|`C`|any|internal|-|measures CPU latency and jitter on all operating systems|
+|[macos.plugin](macos.plugin/)|`C`|macos|internal|yes|collects resource usage and performance data on MacOS systems|
+|[nfacct.plugin](nfacct.plugin/)|`C`|linux|external|-|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`|
+|[xenstat.plugin](xenstat.plugin/)|`C`|linux|external|-|collects XenServer and XCP-ng metrics using `libxenstat`|
+|[perf.plugin](perf.plugin/)|`C`|linux|external|-|collects CPU performance metrics using performance monitoring units (PMU).|
+|[node.d.plugin](node.d.plugin/)|`node.js`|any|external|yes|a **plugin orchestrator** for data collection modules written in `node.js`.|
+|[plugins.d](plugins.d/)|`C`|any|internal|-|implements the **external plugins** API and serves external plugins|
+|[proc.plugin](proc.plugin/)|`C`|linux|internal|yes|collects resource usage and performance data on Linux systems|
+|[python.d.plugin](python.d.plugin/)|`python` v2+|any|external|yes|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).|
+|[statsd.plugin](statsd.plugin/)|`C`|any|internal|-|implements a high performance **statsd** server for Netdata|
+|[tc.plugin](tc.plugin/)|`C`|linux|internal|-|collects traffic QoS metrics (`tc`) of Linux network interfaces|
## Enabling and Disabling plugins
@@ -59,7 +59,7 @@ All **external plugins** are managed by [plugins.d](plugins.d/), which provides
### Internal Plugins
-Each of the internal plugins runs as a thread inside the netdata daemon.
+Each of the internal plugins runs as a thread inside the `netdata` daemon.
Once this thread has started, the plugin may spawn additional threads according to its design.
#### Internal Plugins API
@@ -72,7 +72,7 @@ collect_data() {
collected_number collected_value = collect_a_value();
- // give the metrics to netdata
+ // give the metrics to Netdata
static RRDSET *st = NULL; // the chart
static RRDDIM *rd = NULL; // a dimension attached to this chart
@@ -100,25 +100,22 @@ collect_data() {
}
else {
// this chart is already created
- // let netdata know we start a new iteration on it
+ // let Netdata know we start a new iteration on it
rrdset_next(st);
}
// give the collected value(s) to the chart
rrddim_set_by_pointer(st, rd, collected_value);
- // signal netdata we are done with this iteration
+ // signal Netdata we are done with this iteration
rrdset_done(st);
}
```
-Of course netdata has a lot of libraries to help you also in collecting the metrics.
-The best way to find your way through this, is to examine what other similar plugins do.
-
+Of course, Netdata has a lot of libraries to help you also in collecting the metrics. The best way to find your way through this, is to examine what other similar plugins do.
### External Plugins
**External plugins** use the API and are managed by [plugins.d](plugins.d/).
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/all.h b/collectors/all.h
index a18c43a2..5fe5e9be 100644
--- a/collectors/all.h
+++ b/collectors/all.h
@@ -93,6 +93,10 @@
#define NETDATA_CHART_PRIO_MEM_HW 1500
#define NETDATA_CHART_PRIO_MEM_HW_ECC_CE 1550
#define NETDATA_CHART_PRIO_MEM_HW_ECC_UE 1560
+#define NETDATA_CHART_PRIO_MEM_ZRAM 1600
+#define NETDATA_CHART_PRIO_MEM_ZRAM_SAVINGS 1601
+#define NETDATA_CHART_PRIO_MEM_ZRAM_RATIO 1602
+#define NETDATA_CHART_PRIO_MEM_ZRAM_EFFICIENCY 1603
// Disks
diff --git a/collectors/apps.plugin/Makefile.in b/collectors/apps.plugin/Makefile.in
new file mode 100644
index 00000000..74e29b03
--- /dev/null
+++ b/collectors/apps.plugin/Makefile.in
@@ -0,0 +1,571 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/apps.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_libconfig_DATA) \
+ $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(libconfigdir)"
+DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ apps_groups.conf \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/apps.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/apps.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(libconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_libconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_libconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_libconfigDATA install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_libconfigDATA
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/apps.plugin/README.md b/collectors/apps.plugin/README.md
index ee5c6971..1b682bc6 100644
--- a/collectors/apps.plugin/README.md
+++ b/collectors/apps.plugin/README.md
@@ -5,9 +5,9 @@
To achieve this task, it iterates through the whole process tree, collecting resource usage information
for every process found running.
-Since netdata needs to present this information in charts and track them through time,
+Since Netdata needs to present this information in charts and track them through time,
instead of presenting a `top` like list, `apps.plugin` uses a pre-defined list of **process groups**
-to which it assigns all running processes. This list is [customizable](apps_groups.conf) and netdata
+to which it assigns all running processes. This list is [customizable](apps_groups.conf) and Netdata
ships with a good default for most cases (to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`).
So, `apps.plugin` builds a process tree (much like `ps fax` does in Linux), and groups
@@ -15,7 +15,7 @@ processes together (evaluating both child and parent processes) so that the resu
a predefined set of members (of course, only process groups found running are reported).
> If you find that `apps.plugin` categorizes standard applications as `other`, we would be
-> glad to accept pull requests improving the [defaults](apps_groups.conf) shipped with netdata.
+> glad to accept pull requests improving the [defaults](apps_groups.conf) shipped with Netdata.
Unlike traditional process monitoring tools (like `top`), `apps.plugin` is able to account the resource
utilization of exit processes. Their utilization is accounted at their currently running parents.
@@ -26,45 +26,50 @@ that fork/spawn other short lived processes hundreds of times per second.
`apps.plugin` provides charts for 3 sections:
-1. Per application charts as **Applications** at netdata dashboards
-2. Per user charts as **Users** at netdata dashboards
-3. Per user group charts as **User Groups** at netdata dashboards
+1. Per application charts as **Applications** at Netdata dashboards
+2. Per user charts as **Users** at Netdata dashboards
+3. Per user group charts as **User Groups** at Netdata dashboards
Each of these sections provides the same number of charts:
-- CPU Utilization
- - Total CPU usage
- - User / System CPU usage
-- Disk I/O
- - Physical Reads / Writes
- - Logical Reads / Writes
- - Open Unique Files (if a file is found open multiple times, it is counted just once)
-- Memory
- - Real Memory Used (non shared)
- - Virtual Memory Allocated
- - Minor Page Faults (i.e. memory activity)
-- Processes
- - Threads Running
- - Processes Running
- - Pipes Open
-- Swap Memory
- - Swap Memory Used
- - Major Page Faults (i.e. swap activity)
-- Network
- - Sockets Open
+- CPU Utilization
+ - Total CPU usage
+ - User / System CPU usage
+- Disk I/O
+ - Physical Reads / Writes
+ - Logical Reads / Writes
+ - Open Unique Files (if a file is found open multiple times, it is counted just once)
+- Memory
+ - Real Memory Used (non shared)
+ - Virtual Memory Allocated
+ - Minor Page Faults (i.e. memory activity)
+- Processes
+ - Threads Running
+ - Processes Running
+ - Pipes Open
+ - Carried Over Uptime (since the Netdata restart)
+ - Minimum Uptime
+ - Average Uptime
+ - Maximum Uptime
+
+- Swap Memory
+ - Swap Memory Used
+ - Major Page Faults (i.e. swap activity)
+- Network
+ - Sockets Open
The above are reported:
-- For **Applications** per [target configured](apps_groups.conf).
-- For **Users** per username or UID (when the username is not available).
-- For **User Groups** per groupname or GID (when groupname is not available).
+- For **Applications** per [target configured](apps_groups.conf).
+- For **Users** per username or UID (when the username is not available).
+- For **User Groups** per groupname or GID (when groupname is not available).
## Performance
`apps.plugin` is a complex piece of software and has a lot of work to do
We are proud that `apps.plugin` is a lot faster compared to any other similar tool,
while collecting a lot more information for the processes, however the fact is that
-this plugin requires more CPU resources than the netdata daemon itself.
+this plugin requires more CPU resources than the `netdata` daemon itself.
Under Linux, for each process running, `apps.plugin` reads several `/proc` files
per process. Doing this work per-second, especially on hosts with several thousands
@@ -80,7 +85,7 @@ To do this, edit `/etc/netdata/netdata.conf` and find this section:
# command options =
```
-Uncomment the line `update every` and set it to a higher number. If you just set it to ` 2 `,
+Uncomment the line `update every` and set it to a higher number. If you just set it to `2`,
its CPU resources will be cut in half, and data collection will be once every 2 seconds.
## Configuration
@@ -105,28 +110,28 @@ undesirable, the line `other: *` should be appended to the `apps_groups.conf`.
The process names are the ones returned by:
- - `ps -e` or `cat /proc/PID/stat`
- - in case of substring mode (see below): `/proc/PID/cmdline`
+- `ps -e` or `cat /proc/PID/stat`
+- in case of substring mode (see below): `/proc/PID/cmdline`
To add process names with spaces, enclose them in quotes (single or double)
-example: ` 'Plex Media Serv' ` or ` "my other process" `.
+example: `'Plex Media Serv'` or `"my other process"`.
-You can add an asterisk ` * ` at the beginning and/or the end of a process:
+You can add an asterisk `*` at the beginning and/or the end of a process:
- - `*name` *suffix* mode: will search for processes ending with `name` (at `/proc/PID/stat`)
- - `name*` *prefix* mode: will search for processes beginning with `name` (at `/proc/PID/stat`)
- - `*name*` *substring* mode: will search for `name` in the whole command line (at `/proc/PID/cmdline`)
+- `*name` _suffix_ mode: will search for processes ending with `name` (at `/proc/PID/stat`)
+- `name*` _prefix_ mode: will search for processes beginning with `name` (at `/proc/PID/stat`)
+- `*name*` _substring_ mode: will search for `name` in the whole command line (at `/proc/PID/cmdline`)
-If you enter even just one *name* (substring), `apps.plugin` will process
+If you enter even just one _name_ (substring), `apps.plugin` will process
`/proc/PID/cmdline` for all processes (of course only once per process: when they are first seen).
-To add processes with single quotes, enclose them in double quotes: ` "process with this ' single quote" `
+To add processes with single quotes, enclose them in double quotes: `"process with this ' single quote"`
-To add processes with double quotes, enclose them in single quotes: ` 'process with this " double quote' `
+To add processes with double quotes, enclose them in single quotes: `'process with this " double quote'`
-If a group or process name starts with a ` - `, the dimension will be hidden from the chart (cpu chart only).
+If a group or process name starts with a `-`, the dimension will be hidden from the chart (cpu chart only).
-If a process starts with a ` + `, debugging will be enabled for it (debugging produces a lot of output - do not enable it in production systems).
+If a process starts with a `+`, debugging will be enabled for it (debugging produces a lot of output - do not enable it in production systems).
You can add any number of groups. Only the ones found running will affect the charts generated.
However, producing charts with hundreds of dimensions may slow down your web browser.
@@ -135,17 +140,24 @@ The order of the entries in this list is important: the first that matches a pro
ones at the top. Processes not matched by any row, will inherit it from their parents or children.
The order also controls the order of the dimensions on the generated charts (although applications started
-after apps.plugin is started, will be appended to the existing list of dimensions the netdata daemon maintains).
+after apps.plugin is started, will be appended to the existing list of dimensions the `netdata` daemon maintains).
+
+There are a few command line options you can pass to `apps.plugin`. The list of available options can be acquired with the `--help` flag. The options can be set in the `netdata.conf` file. For example, to disable user and user group charts you should set
+
+```
+[plugin:apps]
+ command options = without-users without-groups
+```
## Permissions
`apps.plugin` requires additional privileges to collect all the information it needs.
The problem is described in issue #157.
-When netdata is installed, `apps.plugin` is given the capabilities `cap_dac_read_search,cap_sys_ptrace+ep`.
+When Netdata is installed, `apps.plugin` is given the capabilities `cap_dac_read_search,cap_sys_ptrace+ep`.
If this fails (i.e. `setcap` fails), `apps.plugin` is setuid to `root`.
-#### linux capabilities in containers
+### linux capabilities in containers
There are a few cases, like `docker` and `virtuozzo` containers, where `setcap` succeeds, but the capabilities
are silently ignored (in `lxc` containers `setcap` fails).
@@ -158,15 +170,15 @@ chown root:netdata /usr/libexec/netdata/plugins.d/apps.plugin
chmod 4750 /usr/libexec/netdata/plugins.d/apps.plugin
```
-You will have to run these, every time you update netdata.
+You will have to run these, every time you update Netdata.
## Security
`apps.plugin` performs a hard-coded function of building the process tree in memory,
-iterating forever, collecting metrics for each running process and sending them to netdata.
-This is a one-way communication, from `apps.plugin` to netdata.
+iterating forever, collecting metrics for each running process and sending them to Netdata.
+This is a one-way communication, from `apps.plugin` to Netdata.
-So, since `apps.plugin` cannot be instructed by netdata for the actions it performs,
+So, since `apps.plugin` cannot be instructed by Netdata for the actions it performs,
we think it is pretty safe to allow it have these increased privileges.
Keep in mind that `apps.plugin` will still run without escalated permissions,
@@ -189,28 +201,27 @@ Here is an example for the process group `sql` at `https://registry.my-netdata.i
Netdata is able give you a lot more badges for your app.
Examples below for process group `sql`:
-- CPU usage: ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.cpu&dimensions=sql&value_color=green=0%7Corange%3C50%7Cred)
-- Disk Physical Reads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.preads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Disk Physical Writes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Disk Logical Reads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lreads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Disk Logical Writes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Open Files ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.files&dimensions=sql&value_color=green%3E30%7Cred)
-- Real Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.mem&dimensions=sql&value_color=green%3C100%7Corange%3C200%7Cred)
-- Virtual Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.vmem&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Swap Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.swap&dimensions=sql&value_color=green=0%7Cred)
-- Minor Page Faults ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.minor_faults&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Processes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred)
-- Threads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.threads&dimensions=sql&value_color=green%3E=28%7Cred)
-- Major Faults (swap activity) ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.major_faults&dimensions=sql&value_color=green=0%7Cred)
-- Open Pipes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pipes&dimensions=sql&value_color=green=0%7Cred)
-- Open Sockets ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.sockets&dimensions=sql&value_color=green%3E=3%7Cred)
-
+- CPU usage: ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.cpu&dimensions=sql&value_color=green=0%7Corange%3C50%7Cred)
+- Disk Physical Reads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.preads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Disk Physical Writes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Disk Logical Reads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lreads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Disk Logical Writes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Open Files ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.files&dimensions=sql&value_color=green%3E30%7Cred)
+- Real Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.mem&dimensions=sql&value_color=green%3C100%7Corange%3C200%7Cred)
+- Virtual Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.vmem&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Swap Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.swap&dimensions=sql&value_color=green=0%7Cred)
+- Minor Page Faults ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.minor_faults&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
+- Processes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred)
+- Threads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.threads&dimensions=sql&value_color=green%3E=28%7Cred)
+- Major Faults (swap activity) ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.major_faults&dimensions=sql&value_color=green=0%7Cred)
+- Open Pipes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pipes&dimensions=sql&value_color=green=0%7Cred)
+- Open Sockets ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.sockets&dimensions=sql&value_color=green%3E=3%7Cred)
For more information about badges check [Generating Badges](../../web/api/badges)
## Comparison with console tools
-Ssh to a server running netdata and execute this:
+SSH to a server running Netdata and execute this:
```sh
while true; do ls -l /var/run >/dev/null; done
@@ -225,7 +236,7 @@ identify the process that consumes so much CPU**.
Here is what common Linux console monitoring tools report:
-#### top
+### top
`top` reports that `bash` is using just 14%.
@@ -249,7 +260,7 @@ KiB Swap: 0 total, 0 free, 0 used. 753712 avail Mem
2 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kthreadd
```
-#### htop
+### htop
Exactly like `top`, `htop` is providing an incomplete breakdown of the system CPU utilization.
@@ -267,7 +278,7 @@ Exactly like `top`, `htop` is providing an incomplete breakdown of the system CP
7019 netdata 20 0 138M 21016 2712 S 0.0 2.1 0:00.14 /usr/sbin/netdata
```
-#### atop
+### atop
`atop` also fails to break down CPU usage.
@@ -290,14 +301,13 @@ NET | eth0 ---- | pcki 16 | pcko 15 | si 1 Kbps | so 4 Kbps |
7009 0.01s 0.01s 0K 0K 0K 4K -- - S 0% netdata
```
-#### glances
+### glances
And the same is true for `glances`. The system runs at 100%, but `glances` reports only 17%
per process utilization.
Note also, that being a `python` program, `glances` uses 1.6% CPU while it runs.
-
```
localhost Uptime: 3 days, 21:42:00
@@ -318,24 +328,24 @@ FILE SYS Used Total 0.3 2.1 7009 netdata 0 S /usr/sbin/netdata
/ (vda1) 1.56G 29.5G 0.0 0.0 17 root 0 S oom_reaper
```
-#### why this happens?
+### why does this happen?
All the console tools report usage based on the processes found running *at the moment they
examine the process tree*. So, they see just one `ls` command, which is actually very quick
with minor CPU utilization. But the shell, is spawning hundreds of them, one after another
(much like shell scripts do).
-#### what netdata reports?
+### What does Netdata report?
The total CPU utilization of the system:
![image](https://cloud.githubusercontent.com/assets/2662304/21076212/9198e5a6-bf2e-11e6-9bc0-6bdea25befb2.png)
-<br/>_**Figure 1**: The system overview section at netdata, just a few seconds after the command was run_
+<br/>***Figure 1**: The system overview section at Netdata, just a few seconds after the command was run*
And at the applications `apps.plugin` breaks down CPU usage per application:
![image](https://cloud.githubusercontent.com/assets/2662304/21076220/c9687848-bf2e-11e6-8d81-348592c5aca2.png)
-<br/>_**Figure 2**: The Applications section at netdata, just a few seconds after the command was run_
+<br/>***Figure 2**: The Applications section at Netdata, just a few seconds after the command was run*
So, the `ssh` session is using 95% CPU time.
@@ -344,7 +354,7 @@ Why `ssh`?
`apps.plugin` groups all processes based on its configuration file
[`/etc/netdata/apps_groups.conf`](apps_groups.conf)
(to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`).
-The default configuration has nothing for `bash`, but it has for `sshd`, so netdata accumulates
+The default configuration has nothing for `bash`, but it has for `sshd`, so Netdata accumulates
all ssh sessions to a dimension on the charts, called `ssh`. This includes all the processes in
the process tree of `sshd`, **including the exited children**.
@@ -353,9 +363,9 @@ the process tree of `sshd`, **including the exited children**.
> `apps.plugin` does not use these mechanisms. The process grouping made by `apps.plugin` works
> on any Linux, `systemd` based or not.
-#### a more technical description of how netdata works
+#### a more technical description of how Netdata works
-netdata reads `/proc/<pid>/stat` for all processes, once per second and extracts `utime` and
+Netdata reads `/proc/<pid>/stat` for all processes, once per second and extracts `utime` and
`stime` (user and system cpu utilization), much like all the console tools do.
But it [also extracts `cutime` and `cstime`](https://github.com/netdata/netdata/blob/62596cc6b906b1564657510ca9135c08f6d4cdda/src/apps_plugin.c#L636-L642)
@@ -369,7 +379,7 @@ been reported for it prior to this iteration.
It is even trickier, because walking through the entire process tree takes some time itself. So,
if you sum the CPU utilization of all processes, you might have more CPU time than the reported
-total cpu time of the system. netdata solves this, by adapting the per process cpu utilization to
+total cpu time of the system. Netdata solves this, by adapting the per process cpu utilization to
the total of the system. [Netdata adds charts that document this normalization](https://london.my-netdata.io/default.html#menu_netdata_submenu_apps_plugin).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fapps.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fapps.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c
index a757a5bd..29d38bb4 100644
--- a/collectors/apps.plugin/apps_plugin.c
+++ b/collectors/apps.plugin/apps_plugin.c
@@ -260,6 +260,12 @@ struct target {
kernel_uint_t openeventpolls;
kernel_uint_t openother;
+ kernel_uint_t starttime;
+ kernel_uint_t collected_starttime;
+ kernel_uint_t uptime_min;
+ kernel_uint_t uptime_sum;
+ kernel_uint_t uptime_max;
+
unsigned int processes; // how many processes have been merged to this
int exposed; // if set, we have sent this to netdata
int hidden; // if set, we set the hidden flag on the dimension
@@ -345,7 +351,7 @@ struct pid_stat {
// int64_t nice;
int32_t num_threads;
// int64_t itrealvalue;
- // kernel_uint_t starttime;
+ kernel_uint_t collected_starttime;
// kernel_uint_t vsize;
// kernel_uint_t rss;
// kernel_uint_t rsslim;
@@ -419,6 +425,8 @@ struct pid_stat {
usec_t io_collected_usec;
usec_t last_io_collected_usec;
+ kernel_uint_t uptime;
+
char *fds_dirname; // the full directory name in /proc/PID/fd
char *stat_filename;
@@ -433,6 +441,8 @@ struct pid_stat {
size_t pagesize;
+kernel_uint_t global_uptime;
+
// log each problem once per process
// log flood protection flags (log_thrown)
#define PID_LOG_IO 0x00000001
@@ -1421,7 +1431,8 @@ static inline int read_proc_pid_stat(struct pid_stat *p, void *ptr) {
// p->nice = str2kernel_uint_t(procfile_lineword(ff, 0, 18));
p->num_threads = (int32_t)str2uint32_t(procfile_lineword(ff, 0, 19));
// p->itrealvalue = str2kernel_uint_t(procfile_lineword(ff, 0, 20));
- // p->starttime = str2kernel_uint_t(procfile_lineword(ff, 0, 21));
+ p->collected_starttime = str2kernel_uint_t(procfile_lineword(ff, 0, 21)) / system_hz;
+ p->uptime = (global_uptime > p->collected_starttime)?(global_uptime - p->collected_starttime):0;
// p->vsize = str2kernel_uint_t(procfile_lineword(ff, 0, 22));
// p->rss = str2kernel_uint_t(procfile_lineword(ff, 0, 23));
// p->rsslim = str2kernel_uint_t(procfile_lineword(ff, 0, 24));
@@ -1490,6 +1501,8 @@ cleanup:
return 0;
}
+// ----------------------------------------------------------------------------
+
static inline int read_proc_pid_io(struct pid_stat *p, void *ptr) {
(void)ptr;
#ifdef __FreeBSD__
@@ -2634,6 +2647,12 @@ static int collect_data_for_all_processes(void) {
collect_data_for_pid(pid, &procbase[i]);
}
#else
+ static char uptime_filename[FILENAME_MAX + 1] = "";
+ if(*uptime_filename == '\0')
+ snprintfz(uptime_filename, FILENAME_MAX, "%s/proc/uptime", netdata_configured_host_prefix);
+
+ global_uptime = (kernel_uint_t)(uptime_msec(uptime_filename) / MSEC_PER_SEC);
+
char dirname[FILENAME_MAX + 1];
snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix);
@@ -2879,6 +2898,11 @@ static size_t zero_all_targets(struct target *root) {
w->openother = 0;
}
+ w->collected_starttime = 0;
+ w->uptime_min = 0;
+ w->uptime_sum = 0;
+ w->uptime_max = 0;
+
if(unlikely(w->root_pid)) {
struct pid_on_target *pid_on_target_to_free, *pid_on_target = w->root_pid;
@@ -3032,6 +3056,11 @@ static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p,
w->processes++;
w->num_threads += p->num_threads;
+ if(!w->collected_starttime || p->collected_starttime < w->collected_starttime) w->collected_starttime = p->collected_starttime;
+ if(!w->uptime_min || p->uptime < w->uptime_min) w->uptime_min = p->uptime;
+ w->uptime_sum += p->uptime;
+ if(!w->uptime_max || w->uptime_max < p->uptime) w->uptime_max = p->uptime;
+
if(unlikely(debug_enabled || w->debug_enabled)) {
debug_log_int("aggregating '%s' pid %d on target '%s' utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->comm, p->pid, w->name, p->utime, p->stime, p->gtime, p->cutime, p->cstime, p->cgtime, p->minflt, p->majflt, p->cminflt, p->cmajflt);
@@ -3042,6 +3071,19 @@ static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p,
}
}
+static inline void post_aggregate_targets(struct target *root) {
+ struct target *w;
+ for (w = root; w ; w = w->next) {
+ if(w->collected_starttime) {
+ if (!w->starttime || w->collected_starttime < w->starttime) {
+ w->starttime = w->collected_starttime;
+ }
+ } else {
+ w->starttime = 0;
+ }
+ }
+}
+
static void calculate_netdata_statistics(void) {
apply_apps_groups_targets_inheritance();
@@ -3102,6 +3144,10 @@ static void calculate_netdata_statistics(void) {
aggregate_pid_fds_on_targets(p);
}
+ post_aggregate_targets(apps_groups_root_target);
+ post_aggregate_targets(users_root_target);
+ post_aggregate_targets(groups_root_target);
+
cleanup_exited_pids();
}
@@ -3457,6 +3503,36 @@ static void send_collected_data_to_netdata(struct target *root, const char *type
}
send_END();
+#ifndef __FreeBSD__
+ send_BEGIN(type, "uptime", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, (global_uptime > w->starttime)?(global_uptime - w->starttime):0);
+ }
+ send_END();
+
+ send_BEGIN(type, "uptime_min", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->uptime_min);
+ }
+ send_END();
+
+ send_BEGIN(type, "uptime_avg", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->processes?(w->uptime_sum / w->processes):0);
+ }
+ send_END();
+
+ send_BEGIN(type, "uptime_max", dt);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed && w->processes))
+ send_SET(w->name, w->uptime_max);
+ }
+ send_END();
+#endif
+
send_BEGIN(type, "mem", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed && w->processes))
@@ -3615,6 +3691,32 @@ static void send_charts_updates_to_netdata(struct target *root, const char *type
fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
}
+#ifndef __FreeBSD__
+ fprintf(stdout, "CHART %s.uptime '' '%s Carried Over Uptime' 'seconds' processes %s.uptime line 20008 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+ fprintf(stdout, "CHART %s.uptime_min '' '%s Minimum Uptime' 'seconds' processes %s.uptime_min line 20009 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+ fprintf(stdout, "CHART %s.uptime_avg '' '%s Average Uptime' 'seconds' processes %s.uptime_avg line 20010 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+
+ fprintf(stdout, "CHART %s.uptime_max '' '%s Maximum Uptime' 'seconds' processes %s.uptime_max line 20011 %d\n", type, title, type, update_every);
+ for (w = root; w ; w = w->next) {
+ if(unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+#endif
+
fprintf(stdout, "CHART %s.cpu_user '' '%s CPU User Time (%d%% = %d core%s)' 'percentage' cpu %s.cpu_user stacked 20020 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
@@ -3855,6 +3957,10 @@ static void parse_args(int argc, char **argv)
" without-files enable / disable reporting files, sockets, pipes\n"
" (default is enabled)\n"
"\n"
+ " without-users disable reporting per user charts\n"
+ "\n"
+ " without-groups disable reporting per user group charts\n"
+ "\n"
#ifndef __FreeBSD__
" fds-cache-secs N cache the files of processed for N seconds\n"
" caching is adaptive per file (when a file\n"
diff --git a/collectors/cgroups.plugin/Makefile.in b/collectors/cgroups.plugin/Makefile.in
new file mode 100644
index 00000000..611ea570
--- /dev/null
+++ b/collectors/cgroups.plugin/Makefile.in
@@ -0,0 +1,613 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/cgroups.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ cgroup-name.sh \
+ $(NULL)
+
+SUFFIXES = .in
+dist_plugins_SCRIPTS = \
+ cgroup-name.sh \
+ cgroup-network-helper.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ cgroup-name.sh.in \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/cgroups.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/cgroups.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_pluginsSCRIPTS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_pluginsSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_pluginsSCRIPTS install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_pluginsSCRIPTS
+
+.PRECIOUS: Makefile
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/cgroups.plugin/README.md b/collectors/cgroups.plugin/README.md
index 6ec9024d..f34055b3 100644
--- a/collectors/cgroups.plugin/README.md
+++ b/collectors/cgroups.plugin/README.md
@@ -6,11 +6,11 @@ cgroups (or control groups), are a Linux kernel feature that provides accounting
cgroups are hierarchical, meaning that cgroups can contain child cgroups, which can contain more cgroups, etc. All accounting is reported (and resource usage limits are applied) also in a hierarchical way.
-To visualize cgroup metrics netdata provides configuration for cherry picking the cgroups of interest. By default (without any configuration) netdata should pick **systemd services**, all kinds of **containers** (lxc, docker, etc) and **virtual machines** spawn by managers that register them with cgroups (qemu, libvirt, etc).
+To visualize cgroup metrics Netdata provides configuration for cherry picking the cgroups of interest. By default (without any configuration) Netdata should pick **systemd services**, all kinds of **containers** (lxc, docker, etc) and **virtual machines** spawn by managers that register them with cgroups (qemu, libvirt, etc).
-## configuring netdata for cgroups
+## configuring Netdata for cgroups
-For each cgroup available in the system, netdata provides this configuration:
+For each cgroup available in the system, Netdata provides this configuration:
```
[plugin:cgroups]
@@ -21,9 +21,9 @@ But it also provides a few patterns to provide a sane default (`yes` or `no`).
Below we see, how this works.
-### how netdata finds the available cgroups
+### how Netdata finds the available cgroups
-Linux exposes resource usage reporting and provides dynamic configuration for cgroups, using virtual files (usually) under `/sys/fs/cgroup`. netdata reads `/proc/self/mountinfo` to detect the exact mount point of cgroups. netdata also allows manual configuration of this mount point, using these settings:
+Linux exposes resource usage reporting and provides dynamic configuration for cgroups, using virtual files (usually) under `/sys/fs/cgroup`. Netdata reads `/proc/self/mountinfo` to detect the exact mount point of cgroups. Netdata also allows manual configuration of this mount point, using these settings:
```
[plugin:cgroups]
@@ -34,27 +34,27 @@ Linux exposes resource usage reporting and provides dynamic configuration for cg
path to /sys/fs/cgroup/devices = /sys/fs/cgroup/devices
```
-netdata rescans these directories for added or removed cgroups every `check for new cgroups every` seconds.
+Netdata rescans these directories for added or removed cgroups every `check for new cgroups every` seconds.
### hierarchical search for cgroups
-Since cgroups are hierarchical, for each of the directories shown above, netdata walks through the subdirectories recursively searching for cgroups (each subdirectory is another cgroup).
+Since cgroups are hierarchical, for each of the directories shown above, Netdata walks through the subdirectories recursively searching for cgroups (each subdirectory is another cgroup).
-For each of the directories found, netdata provides a configuration variable:
+For each of the directories found, Netdata provides a configuration variable:
```
[plugin:cgroups]
search for cgroups under PATH = yes | no
```
-To provide a sane default for this setting, netdata uses the following pattern list (patterns starting with `!` give a negative match and their order is important: the first matching a path will be used):
+To provide a sane default for this setting, Netdata uses the following pattern list (patterns starting with `!` give a negative match and their order is important: the first matching a path will be used):
```
[plugin:cgroups]
search for cgroups in subpaths matching = !*/init.scope !*-qemu !/init.scope !/system !/systemd !/user !/user.slice *
```
-So, we disable checking for **child cgroups** in systemd internal cgroups ([systemd services are monitored by netdata](#monitoring-systemd-services)), user cgroups (normally used for desktop and remote user sessions), qemu virtual machines (child cgroups of virtual machines) and `init.scope`. All others are enabled.
+So, we disable checking for **child cgroups** in systemd internal cgroups ([systemd services are monitored by Netdata](#monitoring-systemd-services)), user cgroups (normally used for desktop and remote user sessions), qemu virtual machines (child cgroups of virtual machines) and `init.scope`. All others are enabled.
### unified cgroups (cgroups v2) support
@@ -68,17 +68,16 @@ Basic unified cgroups metrics are supported. To use them instead of v1 cgroups a
Unified cgroups use same name pattern matching as v1 cgroups. `cgroup_enable_systemd_services_detailed_memory` is currently unsupported when using unified cgroups.
-
### enabled cgroups
-To check if the cgroup is enabled, netdata uses this setting:
+To check if the cgroup is enabled, Netdata uses this setting:
```
[plugin:cgroups]
enable cgroup NAME = yes | no
```
-To provide a sane default, netdata uses the following pattern list (it checks the pattern against the path of the cgroup):
+To provide a sane default, Netdata uses the following pattern list (it checks the pattern against the path of the cgroup):
```
[plugin:cgroups]
@@ -87,9 +86,9 @@ To provide a sane default, netdata uses the following pattern list (it checks th
The above provides the default `yes` or `no` setting for the cgroup. However, there is an additional step. In many cases the cgroups found in the `/sys/fs/cgroup` hierarchy are just random numbers and in many cases these numbers are ephemeral: they change across reboots or sessions.
-So, we need to somehow map the paths of the cgroups to names, to provide consistent netdata configuration (i.e. there is no point to say `enable cgroup 1234 = yes | no`, if `1234` is a random number that changes over time - we need a name for the cgroup first, so that `enable cgroup NAME = yes | no` will be consistent).
+So, we need to somehow map the paths of the cgroups to names, to provide consistent Netdata configuration (i.e. there is no point to say `enable cgroup 1234 = yes | no`, if `1234` is a random number that changes over time - we need a name for the cgroup first, so that `enable cgroup NAME = yes | no` will be consistent).
-For this mapping netdata provides 2 configuration options:
+For this mapping Netdata provides 2 configuration options:
```
[plugin:cgroups]
@@ -99,11 +98,11 @@ For this mapping netdata provides 2 configuration options:
The whole point for the additional pattern list, is to limit the number of times the script will be called. Without this pattern list, the script might be called thousands of times, depending on the number of cgroups available in the system.
-The above pattern list is matched against the path of the cgroup. For matched cgroups, netdata calls the script [cgroup-name.sh](cgroup-name.sh.in) to get its name. This script queries `docker`, or applies heuristics to find give a name for the cgroup.
+The above pattern list is matched against the path of the cgroup. For matched cgroups, Netdata calls the script [cgroup-name.sh](cgroup-name.sh.in) to get its name. This script queries `docker`, or applies heuristics to find give a name for the cgroup.
### charts with zero metrics
-By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. For example:
+By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. For example:
```
[plugin:cgroups]
@@ -118,26 +117,26 @@ CPU and memory limits are watched and used to rise alarms. Memory usage for ever
## Monitoring systemd services
-netdata monitors **systemd services**. Example:
+Netdata monitors **systemd services**. Example:
![image](https://cloud.githubusercontent.com/assets/2662304/21964372/20cd7b84-db53-11e6-98a2-b9c986b082c0.png)
Support per distribution:
-system|systemd services<br/>charts shown|`tree`<br/>`/sys/fs/cgroup`|comments
-:-------:|:-------:|:-------:|:------------
-Arch Linux|YES| |
-Gentoo|NO| |can be enabled, see below
-Ubuntu 16.04 LTS|YES| |
-Ubuntu 16.10|YES|[here](http://pastebin.com/PiWbQEXy)|
-Fedora 25|YES|[here](http://pastebin.com/ax0373wF)|
-Debian 8|NO| |can be enabled, see below
-AMI|NO|[here](http://pastebin.com/FrxmptjL)|not a systemd system
-Centos 7.3.1611|NO|[here](http://pastebin.com/SpzgezAg)|can be enabled, see below
+|system|systemd services<br/>charts shown|`tree`<br/>`/sys/fs/cgroup`|comments|
+|:----:|:-------------------------------:|:-------------------------:|:-------|
+|Arch Linux|YES|||
+|Gentoo|NO||can be enabled, see below|
+|Ubuntu 16.04 LTS|YES|||
+|Ubuntu 16.10|YES|[here](http://pastebin.com/PiWbQEXy)||
+|Fedora 25|YES|[here](http://pastebin.com/ax0373wF)||
+|Debian 8|NO||can be enabled, see below|
+|AMI|NO|[here](http://pastebin.com/FrxmptjL)|not a systemd system|
+|Centos 7.3.1611|NO|[here](http://pastebin.com/SpzgezAg)|can be enabled, see below|
-#### how to enable cgroup accounting on systemd systems that is by default disabled
+### how to enable cgroup accounting on systemd systems that is by default disabled
-You can verify there is no accounting enabled, by running `systemd-cgtop`. The program will show only resources for cgroup ` / `, but all services will show nothing.
+You can verify there is no accounting enabled, by running `systemd-cgtop`. The program will show only resources for cgroup `/`, but all services will show nothing.
To enable cgroup accounting, execute this:
@@ -175,7 +174,7 @@ sudo systemctl daemon-reexec
(`systemctl daemon-reload` does not reload the configuration of the server - so you have to execute `systemctl daemon-reexec`).
-Now, when you run `systemd-cgtop`, services will start reporting usage (if it does not, restart a service - any service - to wake it up). Refresh your netdata dashboard, and you will have the charts too.
+Now, when you run `systemd-cgtop`, services will start reporting usage (if it does not, restart a service - any service - to wake it up). Refresh your Netdata dashboard, and you will have the charts too.
In case memory accounting is missing, you will need to enable it at your kernel, by appending the following kernel boot options and rebooting:
@@ -185,39 +184,37 @@ cgroup_enable=memory swapaccount=1
You can add the above, directly at the `linux` line in your `/boot/grub/grub.cfg` or appending them to the `GRUB_CMDLINE_LINUX` in `/etc/default/grub` (in which case you will have to run `update-grub` before rebooting). On DigitalOcean debian images you may have to set it at `/etc/default/grub.d/50-cloudimg-settings.cfg`.
-Which systemd services are monitored by netdata is determined by the following pattern list:
+Which systemd services are monitored by Netdata is determined by the following pattern list:
```
[plugin:cgroups]
cgroups to match as systemd services = !/system.slice/*/*.service /system.slice/*.service
```
----
+- - -
## Monitoring ephemeral containers
-netdata monitors containers automatically when it is installed at the host, or when it is installed in a container that has access to the `/proc` and `/sys` filesystems of the host.
+Netdata monitors containers automatically when it is installed at the host, or when it is installed in a container that has access to the `/proc` and `/sys` filesystems of the host.
-netdata prior to v1.6 had 2 issues when such containers were monitored:
+Netdata prior to v1.6 had 2 issues when such containers were monitored:
-1. network interface alarms where triggering when containers were stopped
+1. network interface alarms where triggering when containers were stopped
-2. charts were never cleaned up, so after some time dozens of containers were showing up on the dashboard, and they were occupying memory.
+2. charts were never cleaned up, so after some time dozens of containers were showing up on the dashboard, and they were occupying memory.
-
-### the current netdata
+### the current Netdata
network interfaces and cgroups (containers) are now self-cleaned.
-So, when a network interface or container stops, netdata might log a few errors in error.log complaining about files it cannot find, but immediately:
-
-1. it will detect this is a removed container or network interface
-2. it will freeze/pause all alarms for them
-3. it will mark their charts as obsolete
-4. obsolete charts are not be offered on new dashboard sessions (so hit F5 and the charts are gone)
-5. existing dashboard sessions will continue to see them, but of course they will not refresh
-6. obsolete charts will be removed from memory, 1 hour after the last user viewed them (configurable with `[global].cleanup obsolete charts after seconds = 3600` (at netdata.conf).
-7. when obsolete charts are removed from memory they are also deleted from disk (configurable with `[global].delete obsolete charts files = yes`)
+So, when a network interface or container stops, Netdata might log a few errors in error.log complaining about files it cannot find, but immediately:
+1. it will detect this is a removed container or network interface
+2. it will freeze/pause all alarms for them
+3. it will mark their charts as obsolete
+4. obsolete charts are not be offered on new dashboard sessions (so hit F5 and the charts are gone)
+5. existing dashboard sessions will continue to see them, but of course they will not refresh
+6. obsolete charts will be removed from memory, 1 hour after the last user viewed them (configurable with `[global].cleanup obsolete charts after seconds = 3600` (at `netdata.conf`).
+7. when obsolete charts are removed from memory they are also deleted from disk (configurable with `[global].delete obsolete charts files = yes`)
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcgroups.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcgroups.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/cgroups.plugin/cgroup-name.sh b/collectors/cgroups.plugin/cgroup-name.sh
new file mode 100644
index 00000000..5c033ca3
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-name.sh
@@ -0,0 +1,218 @@
+#!/usr/bin/env bash
+#shellcheck disable=SC2001
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Script to find a better name for cgroups
+#
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+# -----------------------------------------------------------------------------
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+function docker_get_name_classic() {
+ local id="${1}"
+ info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\""
+ NAME="$(docker ps --filter=id="${id}" --format="{{.Names}}")"
+ return 0
+}
+
+function docker_get_name_api() {
+ local path="/containers/${1}/json"
+ if [ -z "${DOCKER_HOST}" ]; then
+ warning "No DOCKER_HOST is set"
+ return 1
+ fi
+ if ! command -v jq >/dev/null 2>&1; then
+ warning "Can't find jq command line tool. jq is required for netdata to retrieve docker container name using ${DOCKER_HOST} API, falling back to docker ps"
+ return 1
+ fi
+ if [ -S "${DOCKER_HOST}" ]; then
+ info "Running API command: curl --unix-socket ${DOCKER_HOST} http://localhost${path}"
+ JSON=$(curl -sS --unix-socket "${DOCKER_HOST}" "http://localhost${path}")
+ elif [ "${DOCKER_HOST}" == "/var/run/docker.sock" ]; then
+ warning "Docker socket was not found at ${DOCKER_HOST}"
+ return 1
+ else
+ info "Running API command: curl ${DOCKER_HOST}${path}"
+ JSON=$(curl -sS "${DOCKER_HOST}${path}")
+ fi
+ NAME=$(echo "$JSON" | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||')
+ return 0
+}
+
+function k8s_get_name() {
+ # Take the last part of the delimited path identifier (expecting either _ or / as a delimiter).
+ local id="${1##*_}"
+ if [ "${id}" == "${1}" ]; then
+ id="${1##*/}"
+ fi
+ KUBE_TOKEN="$(</var/run/secrets/kubernetes.io/serviceaccount/token)"
+ if command -v jq >/dev/null 2>&1; then
+ NAME="$(
+ curl -sSk -H "Authorization: Bearer $KUBE_TOKEN" "https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_PORT_443_TCP_PORT/api/v1/pods" |
+ jq -r '.items[] | "k8s_\(.metadata.namespace)_\(.metadata.name)_\(.metadata.uid)_\(.status.containerStatuses[0].name) \(.status.containerStatuses[0].containerID)"' |
+ grep "$id" |
+ cut -d' ' -f1
+ )"
+ else
+ warning "jq command not available, k8s_get_name() cannot execute. Please install jq should you wish for k8s to be fully functional"
+ fi
+
+ if [ -z "${NAME}" ]; then
+ warning "cannot find the name of k8s pod with containerID '${id}'. Setting name to ${id} and disabling it"
+ NAME="${id}"
+ NAME_NOT_FOUND=3
+ else
+ info "k8s containerID '${id}' has chart name (namespace_podname_poduid_containername) '${NAME}'"
+ fi
+}
+
+function docker_get_name() {
+ local id="${1}"
+ if hash docker 2>/dev/null; then
+ docker_get_name_classic "${id}"
+ else
+ docker_get_name_api "${id}" || docker_get_name_classic "${id}"
+ fi
+ if [ -z "${NAME}" ]; then
+ warning "cannot find the name of docker container '${id}'"
+ NAME_NOT_FOUND=2
+ NAME="${id:0:12}"
+ else
+ info "docker container '${id}' is named '${NAME}'"
+ fi
+}
+
+function docker_validate_id() {
+ local id="${1}"
+ if [ -n "${id}" ] && { [ ${#id} -eq 64 ] || [ ${#id} -eq 12 ]; }; then
+ docker_get_name "${id}"
+ else
+ error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
+ fi
+}
+
+
+# -----------------------------------------------------------------------------
+
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/lib/netdata/conf.d"
+
+DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
+CGROUP="${1}"
+NAME_NOT_FOUND=0
+NAME=
+
+# -----------------------------------------------------------------------------
+
+if [ -z "${CGROUP}" ]; then
+ fatal "called without a cgroup name. Nothing to do."
+fi
+
+for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf"; do
+ if [ -f "${CONFIG}" ]; then
+ NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed 's/[[:space:]]\+/ /g' | cut -d ' ' -f 2)"
+ if [ -z "${NAME}" ]; then
+ info "cannot find cgroup '${CGROUP}' in '${CONFIG}'."
+ else
+ break
+ fi
+ #else
+ # info "configuration file '${CONFIG}' is not available."
+ fi
+done
+
+if [ -z "${NAME}" ] && [ -n "${KUBERNETES_SERVICE_HOST}" ] && [ -n "${KUBERNETES_PORT_443_TCP_PORT}" ] && [[ ${CGROUP} =~ ^.*kubepods.* ]]; then
+ k8s_get_name "${CGROUP}"
+fi
+
+if [ -z "${NAME}" ]; then
+ if [[ ${CGROUP} =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
+ # docker containers
+ #shellcheck disable=SC1117
+ DOCKERID="$(echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
+ docker_validate_id "${DOCKERID}"
+ elif [[ ${CGROUP} =~ ^.*ecs[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
+ # ECS
+ #shellcheck disable=SC1117
+ DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ecs[-_/].*[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
+ docker_validate_id "${DOCKERID}"
+
+ elif [[ ${CGROUP} =~ machine.slice[_/].*\.service ]]; then
+ # systemd-nspawn
+ NAME="$(echo "${CGROUP}" | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
+
+ elif [[ ${CGROUP} =~ machine.slice_machine.*-qemu ]]; then
+ # libvirtd / qemu virtual machines
+ # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
+ NAME="qemu_$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')"
+
+ elif [[ ${CGROUP} =~ machine_.*\.libvirt-qemu ]]; then
+ # libvirtd / qemu virtual machines
+ NAME="qemu_$(echo "${CGROUP}" | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
+
+ elif [[ ${CGROUP} =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]; then
+ # Proxmox VMs
+
+ FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
+ if [[ -f $FILENAME && -r $FILENAME ]]; then
+ NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ elif [[ ${CGROUP} =~ lxc_([0-9]+) && -d /etc/pve ]]; then
+ # Proxmox Containers (LXC)
+
+ FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
+ if [[ -f ${FILENAME} && -r ${FILENAME} ]]; then
+ NAME=$(grep -e '^hostname: ' "/etc/pve/lxc/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ fi
+
+ [ -z "${NAME}" ] && NAME="${CGROUP}"
+ [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
+fi
+
+info "cgroup '${CGROUP}' is called '${NAME}'"
+echo "${NAME}"
+
+exit ${NAME_NOT_FOUND}
+
diff --git a/collectors/charts.d.plugin/Makefile.in b/collectors/charts.d.plugin/Makefile.in
new file mode 100644
index 00000000..0ef00d93
--- /dev/null
+++ b/collectors/charts.d.plugin/Makefile.in
@@ -0,0 +1,1003 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/charts.d.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_charts_SCRIPTS) \
+ $(dist_plugins_SCRIPTS) $(dist_charts_DATA) \
+ $(dist_chartsconfig_DATA) $(dist_libconfig_DATA) \
+ $(dist_noinst_DATA) $(dist_userchartsconfig_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(pluginsdir)" \
+ "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsconfigdir)" \
+ "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(userchartsconfigdir)"
+SCRIPTS = $(dist_charts_SCRIPTS) $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_charts_DATA) $(dist_chartsconfig_DATA) \
+ $(dist_libconfig_DATA) $(dist_noinst_DATA) \
+ $(dist_userchartsconfig_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/ap/Makefile.inc \
+ $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc \
+ $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc \
+ $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc \
+ $(srcdir)/hddtemp/Makefile.inc \
+ $(srcdir)/libreswan/Makefile.inc \
+ $(srcdir)/load_average/Makefile.inc \
+ $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc \
+ $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc \
+ $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc \
+ $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc \
+ $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc \
+ $(top_srcdir)/build/subst.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ charts.d.plugin \
+ $(NULL)
+
+SUFFIXES = .in
+dist_libconfig_DATA = \
+ charts.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ charts.d.dryrun-helper.sh \
+ charts.d.plugin \
+ loopsleepms.sh.inc \
+ $(NULL)
+
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA = charts.d.plugin.in README.md $(NULL) ap/README.md \
+ ap/Makefile.inc apache/README.md apache/Makefile.inc \
+ apcupsd/README.md apcupsd/Makefile.inc cpu_apps/README.md \
+ cpu_apps/Makefile.inc cpufreq/README.md cpufreq/Makefile.inc \
+ example/README.md example/Makefile.inc exim/README.md \
+ exim/Makefile.inc hddtemp/README.md hddtemp/Makefile.inc \
+ libreswan/README.md libreswan/Makefile.inc \
+ load_average/README.md load_average/Makefile.inc \
+ mem_apps/README.md mem_apps/Makefile.inc mysql/README.md \
+ mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \
+ nut/README.md nut/Makefile.inc opensips/README.md \
+ opensips/Makefile.inc phpfpm/README.md phpfpm/Makefile.inc \
+ postfix/README.md postfix/Makefile.inc sensors/README.md \
+ sensors/Makefile.inc squid/README.md squid/Makefile.inc \
+ tomcat/README.md tomcat/Makefile.inc
+dist_charts_SCRIPTS = \
+ $(NULL)
+
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+dist_charts_DATA = $(NULL) ap/ap.chart.sh apache/apache.chart.sh \
+ apcupsd/apcupsd.chart.sh cpu_apps/cpu_apps.chart.sh \
+ cpufreq/cpufreq.chart.sh example/example.chart.sh \
+ exim/exim.chart.sh hddtemp/hddtemp.chart.sh \
+ libreswan/libreswan.chart.sh \
+ load_average/load_average.chart.sh mem_apps/mem_apps.chart.sh \
+ mysql/mysql.chart.sh nginx/nginx.chart.sh nut/nut.chart.sh \
+ opensips/opensips.chart.sh phpfpm/phpfpm.chart.sh \
+ postfix/postfix.chart.sh sensors/sensors.chart.sh \
+ squid/squid.chart.sh tomcat/tomcat.chart.sh
+userchartsconfigdir = $(configdir)/charts.d
+dist_userchartsconfig_DATA = \
+ .keep \
+ $(NULL)
+
+chartsconfigdir = $(libconfigdir)/charts.d
+dist_chartsconfig_DATA = $(NULL) ap/ap.conf apache/apache.conf \
+ apcupsd/apcupsd.conf cpu_apps/cpu_apps.conf \
+ cpufreq/cpufreq.conf example/example.conf exim/exim.conf \
+ hddtemp/hddtemp.conf libreswan/libreswan.conf \
+ load_average/load_average.conf mem_apps/mem_apps.conf \
+ mysql/mysql.conf nginx/nginx.conf nut/nut.conf \
+ opensips/opensips.conf phpfpm/phpfpm.conf postfix/postfix.conf \
+ sensors/sensors.conf squid/squid.conf tomcat/tomcat.conf
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/libreswan/Makefile.inc $(srcdir)/load_average/Makefile.inc $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/charts.d.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/charts.d.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/libreswan/Makefile.inc $(srcdir)/load_average/Makefile.inc $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_chartsSCRIPTS: $(dist_charts_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(chartsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(chartsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_chartsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir)
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+install-dist_chartsDATA: $(dist_charts_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsdir)" || exit $$?; \
+ done
+
+uninstall-dist_chartsDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir)
+install-dist_chartsconfigDATA: $(dist_chartsconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(chartsconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(chartsconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_chartsconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(chartsconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_userchartsconfigDATA: $(dist_userchartsconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_userchartsconfig_DATA)'; test -n "$(userchartsconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(userchartsconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(userchartsconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userchartsconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(userchartsconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_userchartsconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_userchartsconfig_DATA)'; test -n "$(userchartsconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(userchartsconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsconfigdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(userchartsconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_chartsDATA install-dist_chartsSCRIPTS \
+ install-dist_chartsconfigDATA install-dist_libconfigDATA \
+ install-dist_pluginsSCRIPTS install-dist_userchartsconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS \
+ uninstall-dist_chartsconfigDATA uninstall-dist_libconfigDATA \
+ uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_userchartsconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_chartsDATA \
+ install-dist_chartsSCRIPTS install-dist_chartsconfigDATA \
+ install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
+ install-dist_userchartsconfigDATA install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS \
+ uninstall-dist_chartsconfigDATA uninstall-dist_libconfigDATA \
+ uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_userchartsconfigDATA
+
+.PRECIOUS: Makefile
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/charts.d.plugin/README.md b/collectors/charts.d.plugin/README.md
index 3d318f26..6bd115f3 100644
--- a/collectors/charts.d.plugin/README.md
+++ b/collectors/charts.d.plugin/README.md
@@ -1,11 +1,11 @@
# charts.d.plugin
-`charts.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `BASH` v4+.
+`charts.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `BASH` v4+.
-1. It runs as an independent process `ps fax` shows it
-2. It is started and stopped automatically by netdata
-3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon)
-4. Supports any number of data collection **modules**
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by Netdata
+3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon)
+4. Supports any number of data collection **modules**
`charts.d.plugin` has been designed so that the actual script that will do data collection will be permanently in
memory, collecting data with as little overheads as possible
@@ -39,35 +39,35 @@ A `charts.d.plugin` module is a BASH script defining a few functions.
For a module called `X`, the following criteria must be met:
-1. The module script must be called `X.chart.sh` and placed in `/usr/libexec/netdata/charts.d`.
+1. The module script must be called `X.chart.sh` and placed in `/usr/libexec/netdata/charts.d`.
-2. If the module needs a configuration, it should be called `X.conf` and placed in `/etc/netdata/charts.d`.
- The configuration file `X.conf` is also a BASH script itself.
- To edit the default files supplied by netdata run `/etc/netdata/edit-config charts.d/X.conf`,
- where `X` is the name of the module.
+2. If the module needs a configuration, it should be called `X.conf` and placed in `/etc/netdata/charts.d`.
+ The configuration file `X.conf` is also a BASH script itself.
+ To edit the default files supplied by Netdata, run `/etc/netdata/edit-config charts.d/X.conf`,
+ where `X` is the name of the module.
-3. All functions and global variables defined in the script and its configuration, must begin with `X_`.
+3. All functions and global variables defined in the script and its configuration, must begin with `X_`.
-4. The following functions must be defined:
+4. The following functions must be defined:
- - `X_check()` - returns 0 or 1 depending on whether the module is able to run or not
- (following the standard Linux command line return codes: 0 = OK, the collector can operate and 1 = FAILED,
- the collector cannot be used).
+ - `X_check()` - returns 0 or 1 depending on whether the module is able to run or not
+ (following the standard Linux command line return codes: 0 = OK, the collector can operate and 1 = FAILED,
+ the collector cannot be used).
- - `X_create()` - creates the netdata charts, following the standard netdata plugin guides as described in
- **[External Plugins](../plugins.d/)** (commands `CHART` and `DIMENSION`).
- The return value does matter: 0 = OK, 1 = FAILED.
+ - `X_create()` - creates the Netdata charts, following the standard Netdata plugin guides as described in
+ **[External Plugins](../plugins.d/)** (commands `CHART` and `DIMENSION`).
+ The return value does matter: 0 = OK, 1 = FAILED.
- - `X_update()` - collects the values for the defined charts, following the standard netdata plugin guides
- as described in **[External Plugins](../plugins.d/)** (commands `BEGIN`, `SET`, `END`).
- The return value also matters: 0 = OK, 1 = FAILED.
+ - `X_update()` - collects the values for the defined charts, following the standard Netdata plugin guides
+ as described in **[External Plugins](../plugins.d/)** (commands `BEGIN`, `SET`, `END`).
+ The return value also matters: 0 = OK, 1 = FAILED.
-5. The following global variables are available to be set:
- - `X_update_every` - is the data collection frequency for the module script, in seconds.
+5. The following global variables are available to be set:
+ - `X_update_every` - is the data collection frequency for the module script, in seconds.
The module script may use more functions or variables. But all of them must begin with `X_`.
-The standard netdata plugin variables are also available (check **[External Plugins](../plugins.d/)**).
+The standard Netdata plugin variables are also available (check **[External Plugins](../plugins.d/)**).
### X_check()
@@ -80,7 +80,7 @@ connect to a local mysql database to find out if it can read the values it needs
### X_create()
-The purpose of the BASH function `X_create()` is to create the charts and dimensions using the standard netdata
+The purpose of the BASH function `X_create()` is to create the charts and dimensions using the standard Netdata
plugin guides (**[External Plugins](../plugins.d/)**).
`X_create()` will be called just once and only after `X_check()` was successful.
@@ -90,8 +90,8 @@ A non-zero return value will disable the collector.
### X_update()
-`X_update()` will be called repeatedly every `X_update_every` seconds, to collect new values and send them to netdata,
-following the netdata plugin guides (**[External Plugins](../plugins.d/)**).
+`X_update()` will be called repeatedly every `X_update_every` seconds, to collect new values and send them to Netdata,
+following the Netdata plugin guides (**[External Plugins](../plugins.d/)**).
The function will be called with one parameter: microseconds since the last time it was run. This value should be
appended to the `BEGIN` statement of every chart updated by the collector script.
@@ -167,8 +167,7 @@ Keep in mind that if your configs are not in `/etc/netdata`, you should do the f
export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata"
```
-Also, remember that netdata runs `chart.d.plugin` as user `netdata` (or any other user netdata is configured to run as).
-
+Also, remember that Netdata runs `chart.d.plugin` as user `netdata` (or any other user the `netdata` process is configured to run as).
## Running multiple instances of charts.d.plugin
@@ -179,17 +178,16 @@ You can have multiple `charts.d.plugin` running to overcome this problem.
This is what you need to do:
-1. Decide a new name for the new charts.d instance: example `charts2.d`.
+1. Decide a new name for the new charts.d instance: example `charts2.d`.
-2. Create/edit the files `/etc/netdata/charts.d.conf` and `/etc/netdata/charts2.d.conf` and enable / disable the
- module you want each to run. Remember to set `enable_all_charts="no"` to both of them, and enable the individual
- modules for each.
-
-3. link `/usr/libexec/netdata/plugins.d/charts.d.plugin` to `/usr/libexec/netdata/plugins.d/charts2.d.plugin`.
- Netdata will spawn a new charts.d process.
+2. Create/edit the files `/etc/netdata/charts.d.conf` and `/etc/netdata/charts2.d.conf` and enable / disable the
+ module you want each to run. Remember to set `enable_all_charts="no"` to both of them, and enable the individual
+ modules for each.
-Execute the above in this order, since netdata will (by default) attempt to start new plugins soon after they are
-created in `/usr/libexec/netdata/plugins.d/`.
+3. link `/usr/libexec/netdata/plugins.d/charts.d.plugin` to `/usr/libexec/netdata/plugins.d/charts2.d.plugin`.
+ Netdata will spawn a new charts.d process.
+Execute the above in this order, since Netdata will (by default) attempt to start new plugins soon after they are
+created in `/usr/libexec/netdata/plugins.d/`.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md
index 962a8565..befe21ee 100644
--- a/collectors/charts.d.plugin/ap/README.md
+++ b/collectors/charts.d.plugin/ap/README.md
@@ -2,7 +2,7 @@
The `ap` collector visualizes data related to access points.
-## Example netdata charts
+## Example Netdata charts
![image](https://cloud.githubusercontent.com/assets/2662304/12377654/9f566e88-bd2d-11e5-855a-e0ba96b8fd98.png)
@@ -10,11 +10,12 @@ The `ap` collector visualizes data related to access points.
It does the following:
-1. Runs `iw dev` searching for interfaces that have `type AP`.
+1. Runs `iw dev` searching for interfaces that have `type AP`.
- From the same output it collects the SSIDs each AP supports by looking for lines `ssid NAME`.
+ From the same output it collects the SSIDs each AP supports by looking for lines `ssid NAME`.
+
+ Example:
- Example:
```sh
# iw dev
phy#0
@@ -27,20 +28,19 @@ phy#0
channel 7 (2442 MHz), width: 20 MHz, center1: 2442 MHz
```
+2. For each interface found, it runs `iw INTERFACE station dump`.
-2. For each interface found, it runs `iw INTERFACE station dump`.
-
- From the output is collects:
+ From the output is collects:
- - rx/tx bytes
- - rx/tx packets
- - tx retries
- - tx failed
- - signal strength
- - rx/tx bitrate
- - expected throughput
+ - rx/tx bytes
+ - rx/tx packets
+ - tx retries
+ - tx failed
+ - signal strength
+ - rx/tx bitrate
+ - expected throughput
- Example:
+ Example:
```sh
# iw wlan0 station dump
@@ -65,14 +65,14 @@ Station 40:b8:37:5a:ed:5e (on wlan0)
TDLS peer: no
```
-3. For each interface found, it creates 6 charts:
+3. For each interface found, it creates 6 charts:
- - Number of Connected clients
- - Bandwidth for all clients
- - Packets for all clients
- - Transmit Issues for all clients
- - Average Signal among all clients
- - Average Bitrate (including average expected throughput) among all clients
+ - Number of Connected clients
+ - Bandwidth for all clients
+ - Packets for all clients
+ - Transmit Issues for all clients
+ - Average Signal among all clients
+ - Average Bitrate (including average expected throughput) among all clients
## Configuration
@@ -83,4 +83,4 @@ To edit this file on your system run `/etc/netdata/edit-config charts.d/ap.conf`
The plugin is able to auto-detect if you are running access points on your linux box.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fap%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fap%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/apache/README.md b/collectors/charts.d.plugin/apache/README.md
index 27397910..d5c7a80a 100644
--- a/collectors/charts.d.plugin/apache/README.md
+++ b/collectors/charts.d.plugin/apache/README.md
@@ -7,7 +7,7 @@
The `apache` collector visualizes key performance data for an apache web server.
-## Example netdata charts
+## Example Netdata charts
For apache 2.2:
@@ -78,14 +78,14 @@ Scoreboard: ____________________________________________________________________
From the apache status output it collects:
- - total accesses (incremental value, rendered as requests/s)
- - total bandwidth (incremental value, rendered as bandwidth/s)
- - requests per second (this appears to be calculated by apache as an average for its lifetime, while the one calculated by netdata using the total accesses counter is real-time)
- - bytes per second (average for the lifetime of the apache server)
- - bytes per request (average for the lifetime of the apache server)
- - workers by status (`busy` and `idle`)
- - total connections (currently active connections - offered by apache 2.4+)
- - async connections per status (`keepalive`, `writing`, `closing` - offered by apache 2.4+)
+- total accesses (incremental value, rendered as requests/s)
+- total bandwidth (incremental value, rendered as bandwidth/s)
+- requests per second (this appears to be calculated by apache as an average for its lifetime, while the one calculated by Netdata using the total accesses counter is real-time)
+- bytes per second (average for the lifetime of the apache server)
+- bytes per request (average for the lifetime of the apache server)
+- workers by status (`busy` and `idle`)
+- total connections (currently active connections - offered by apache 2.4+)
+- async connections per status (`keepalive`, `writing`, `closing` - offered by apache 2.4+)
## Configuration
@@ -106,7 +106,7 @@ apache_curl_opts=
apache_update_every=
```
-The default `apache_update_every` is configured in netdata.
+The default `apache_update_every` is configured in Netdata.
## Auto-detection
@@ -122,8 +122,8 @@ If you are able to run successfully, by hand this command:
curl "http://127.0.0.1:80/server-status?auto"
```
-netdata will be able to do it too.
+Netdata will be able to do it too.
-Notice: You may need to have the default `000-default.conf ` website enabled in order for the status mod to work.
+Notice: You may need to have the default `000-default.conf` website enabled in order for the status mod to work.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fapache%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fapache%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/apcupsd/README.md b/collectors/charts.d.plugin/apcupsd/README.md
index 59739efc..51bb6ecc 100644
--- a/collectors/charts.d.plugin/apcupsd/README.md
+++ b/collectors/charts.d.plugin/apcupsd/README.md
@@ -1,7 +1,7 @@
# apcupsd
-*Under construction*
+_Under construction_
Collects UPS metrics
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fapcupsd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fapcupsd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/charts.d.plugin b/collectors/charts.d.plugin/charts.d.plugin
new file mode 100644
index 00000000..40dc48c1
--- /dev/null
+++ b/collectors/charts.d.plugin/charts.d.plugin
@@ -0,0 +1,698 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+# charts.d.plugin allows easy development of BASH plugins
+#
+# if you need to run parallel charts.d processes, link this file to a different name
+# in the same directory, with a .plugin suffix and netdata will start both of them,
+# each will have a different config file and modules configuration directory.
+#
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
+
+PROGRAM_FILE="$0"
+PROGRAM_NAME="$(basename $0)"
+PROGRAM_NAME="${PROGRAM_NAME/.plugin/}"
+MODULE_NAME="main"
+
+# -----------------------------------------------------------------------------
+# create temp dir
+
+debug=0
+TMP_DIR=
+chartsd_cleanup() {
+ trap '' EXIT QUIT HUP INT TERM
+
+ if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]; then
+ [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..."
+ rm -rf "$TMP_DIR"
+ fi
+ exit 0
+}
+trap chartsd_cleanup EXIT QUIT HUP INT TERM
+
+if [ $UID = "0" ]; then
+ TMP_DIR="$(mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX)"
+else
+ TMP_DIR="$(mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX)"
+fi
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ echo "DISABLE"
+ exit 1
+}
+
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# check a few commands
+
+require_cmd() {
+ local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null)
+ if [ -z "${x}" -o ! -x "${x}" ]; then
+ warning "command '${1}' is not found in ${PATH}."
+ eval "${1^^}_CMD=\"\""
+ return 1
+ fi
+
+ eval "${1^^}_CMD=\"${x}\""
+ return 0
+}
+
+require_cmd date || exit 1
+require_cmd sed || exit 1
+require_cmd basename || exit 1
+require_cmd dirname || exit 1
+require_cmd cat || exit 1
+require_cmd grep || exit 1
+require_cmd egrep || exit 1
+require_cmd mktemp || exit 1
+require_cmd awk || exit 1
+require_cmd timeout || exit 1
+require_cmd curl || exit 1
+
+# -----------------------------------------------------------------------------
+
+[ $((BASH_VERSINFO[0])) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade."
+
+info "started from '$PROGRAM_FILE' with options: $*"
+
+# -----------------------------------------------------------------------------
+# internal defaults
+# netdata exposes a few environment variables for us
+
+[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/lib/netdata/conf.d"
+
+pluginsd="${NETDATA_PLUGINS_DIR}"
+stockconfd="${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}"
+userconfd="${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}"
+olduserconfd="${NETDATA_USER_CONFIG_DIR}"
+chartsd="$pluginsd/../charts.d"
+
+minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}"
+update_every=${minimum_update_frequency} # this will be overwritten by the command line
+
+# work around for non BASH shells
+charts_create="_create"
+charts_update="_update"
+charts_check="_check"
+charts_undescore="_"
+
+# when making iterations, charts.d can loop more frequently
+# to prevent plugins missing iterations.
+# this is a percentage relative to update_every to align its
+# iterations.
+# The minimum is 10%, the maximum 100%.
+# So, if update_every is 1 second and time_divisor is 50,
+# charts.d will iterate every 500ms.
+# Charts will be called to collect data only if the time
+# passed since the last time the collected data is equal or
+# above their update_every.
+time_divisor=50
+
+# number of seconds to run without restart
+# after this time, charts.d.plugin will exit
+# netdata will restart it
+restart_timeout=$((3600 * 4))
+
+# check if the charts.d plugins are using global variables
+# they should not.
+# It does not currently support BASH v4 arrays, so it is
+# disabled
+dryrunner=0
+
+# check for timeout command
+check_for_timeout=1
+
+# the default enable/disable value for all charts
+enable_all_charts="yes"
+
+# -----------------------------------------------------------------------------
+# parse parameters
+
+check=0
+chart_only=
+while [ ! -z "$1" ]; do
+ if [ "$1" = "check" ]; then
+ check=1
+ shift
+ continue
+ fi
+
+ if [ "$1" = "debug" -o "$1" = "all" ]; then
+ debug=1
+ shift
+ continue
+ fi
+
+ if [ -f "$chartsd/$1.chart.sh" ]; then
+ debug=1
+ chart_only="$(echo $1.chart.sh | sed "s/\.chart\.sh$//g")"
+ shift
+ continue
+ fi
+
+ if [ -f "$chartsd/$1" ]; then
+ debug=1
+ chart_only="$(echo $1 | sed "s/\.chart\.sh$//g")"
+ shift
+ continue
+ fi
+
+ # number check
+ n="$1"
+ x=$((n))
+ if [ "$x" = "$n" ]; then
+ shift
+ update_every=$x
+ [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency
+ continue
+ fi
+
+ fatal "Cannot understand parameter $1. Aborting."
+done
+
+# -----------------------------------------------------------------------------
+# loop control
+
+# default sleep function
+LOOPSLEEPMS_HIGHRES=0
+now_ms=
+current_time_ms_default() {
+ now_ms="$(date +'%s')000"
+}
+current_time_ms="current_time_ms_default"
+current_time_ms_accuracy=1
+mysleep="sleep"
+
+# if found and included, this file overwrites loopsleepms()
+# and current_time_ms() with a high resolution timer function
+# for precise looping.
+source "$pluginsd/loopsleepms.sh.inc"
+[ $? -ne 0 ] && error "Failed to load '$pluginsd/loopsleepms.sh.inc'."
+
+# -----------------------------------------------------------------------------
+# load my configuration
+
+for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf"; do
+ if [ -f "$myconfig" ]; then
+ source "$myconfig"
+ if [ $? -ne 0 ]; then
+ error "Config file '$myconfig' loaded with errors."
+ else
+ info "Configuration file '$myconfig' loaded."
+ fi
+ else
+ warning "Configuration file '$myconfig' not found."
+ fi
+done
+
+# make sure time_divisor is right
+time_divisor=$((time_divisor))
+[ $time_divisor -lt 10 ] && time_divisor=10
+[ $time_divisor -gt 100 ] && time_divisor=100
+
+# we check for the timeout command, after we load our
+# configuration, so that the user may overwrite the
+# timeout command we use, providing a function that
+# can emulate the timeout command we need:
+# > timeout SECONDS command ...
+if [ $check_for_timeout -eq 1 ]; then
+ require_cmd timeout || exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# internal checks
+
+# netdata passes the requested update frequency as the first argument
+update_every=$((update_every + 1 - 1)) # makes sure it is a number
+test $update_every -eq 0 && update_every=1 # if it is zero, make it 1
+
+# check the charts.d directory
+[ ! -d "$chartsd" ] && fatal "cannot find charts directory '$chartsd'"
+
+# -----------------------------------------------------------------------------
+# library functions
+
+fixid() {
+ echo "$*" |
+ tr -c "[A-Z][a-z][0-9]" "_" |
+ sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |
+ tr "[A-Z]" "[a-z]"
+}
+
+run() {
+ local ret pid="${BASHPID}" t
+
+ if [ "z${1}" = "z-t" -a "${2}" != "0" ]; then
+ t="${2}"
+ shift 2
+ case "${NETDATA_SYSTEM_OS_ID}" in
+ "alpine")
+ timeout -t ${t} "${@}" 2>"${TMP_DIR}/run.${pid}"
+ ;;
+ *)
+ timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}"
+ ;;
+ esac
+ ret=$?
+ else
+ "${@}" 2>"${TMP_DIR}/run.${pid}"
+ ret=$?
+ fi
+
+ if [ ${ret} -ne 0 ]; then
+ {
+ printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '"
+ printf "%q " "${@}"
+ printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n"
+ cat "${TMP_DIR}/run.${pid}"
+ printf " --- END TRACE ---\n"
+ } >&2
+ fi
+ rm -f "${TMP_DIR}/run.${pid}"
+
+ return ${ret}
+}
+
+# convert any floating point number
+# to integer, give a multiplier
+# the result is stored in ${FLOAT2INT_RESULT}
+# so that no fork is necessary
+# the multiplier must be a power of 10
+float2int() {
+ local f m="$2" a b l v=($1)
+ f=${v[0]}
+
+ # the length of the multiplier - 1
+ l=$((${#m} - 1))
+
+ # check if the number is in scientific notation
+ if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]]; then
+ # convert it to decimal
+ # unfortunately, this fork cannot be avoided
+ # if you know of a way to avoid it, please let me know
+ f=$(printf "%0.${l}f" ${f})
+ fi
+
+ # split the floating point number
+ # in integer (a) and decimal (b)
+ a=${f/.*/}
+ b=${f/*./}
+
+ # if the integer part is missing
+ # set it to zero
+ [ -z "${a}" ] && a="0"
+
+ # strip leading zeros from the integer part
+ # base 10 convertion
+ a=$((10#$a))
+
+ # check the length of the decimal part
+ # against the length of the multiplier
+ if [ ${#b} -gt ${l} ]; then
+ # too many digits - take the most significant
+ b=${b:0:l}
+
+ elif [ ${#b} -lt ${l} ]; then
+ # too few digits - pad with zero on the right
+ local z="00000000000000000000000" r=$((l - ${#b}))
+ b="${b}${z:0:r}"
+ fi
+
+ # strip leading zeros from the decimal part
+ # base 10 convertion
+ b=$((10#$b))
+
+ # store the result
+ FLOAT2INT_RESULT=$(((a * m) + b))
+}
+
+# -----------------------------------------------------------------------------
+# charts check functions
+
+all_charts() {
+ cd "$chartsd"
+ [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1
+
+ ls *.chart.sh | sed "s/\.chart\.sh$//g"
+}
+
+declare -A charts_enable_keyword=(
+ ['apache']="force"
+ ['cpu_apps']="force"
+ ['cpufreq']="force"
+ ['example']="force"
+ ['exim']="force"
+ ['hddtemp']="force"
+ ['load_average']="force"
+ ['mem_apps']="force"
+ ['mysql']="force"
+ ['nginx']="force"
+ ['phpfpm']="force"
+ ['postfix']="force"
+ ['sensors']="force"
+ ['squid']="force"
+ ['tomcat']="force"
+)
+
+all_enabled_charts() {
+ local charts= enabled= required=
+
+ # find all enabled charts
+
+ for chart in $(all_charts); do
+ MODULE_NAME="${chart}"
+
+ eval "enabled=\$$chart"
+ if [ -z "${enabled}" ]; then
+ enabled="${enable_all_charts}"
+ fi
+
+ required="${charts_enable_keyword[${chart}]}"
+ [ -z "${required}" ] && required="yes"
+
+ if [ ! "${enabled}" = "${required}" ]; then
+ info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)."
+ else
+ debug "is enabled for auto-detection."
+ local charts="$charts $chart"
+ fi
+ done
+ MODULE_NAME="main"
+
+ local charts2=
+ for chart in $charts; do
+ MODULE_NAME="${chart}"
+
+ # check the enabled charts
+ local check="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()")"
+ if [ -z "$check" ]; then
+ error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it."
+ continue
+ fi
+
+ local create="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()")"
+ if [ -z "$create" ]; then
+ error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it."
+ continue
+ fi
+
+ local update="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()")"
+ if [ -z "$update" ]; then
+ error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it."
+ continue
+ fi
+
+ # check its config
+ #if [ -f "$userconfd/$chart.conf" ]
+ #then
+ # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ]
+ # then
+ # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it."
+ # continue
+ # fi
+ #fi
+
+ #if [ $dryrunner -eq 1 ]
+ # then
+ # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null
+ # if [ $? -ne 0 ]
+ # then
+ # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it."
+ # continue
+ # fi
+ #fi
+
+ local charts2="$charts2 $chart"
+ done
+ MODULE_NAME="main"
+
+ echo $charts2
+ debug "enabled charts: $charts2"
+}
+
+# -----------------------------------------------------------------------------
+# load the charts
+
+suffix_retries="_retries"
+suffix_update_every="_update_every"
+active_charts=
+for chart in $(all_enabled_charts); do
+ MODULE_NAME="${chart}"
+
+ debug "loading module: '$chartsd/$chart.chart.sh'"
+
+ source "$chartsd/$chart.chart.sh"
+ [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors."
+
+ # first load the stock config
+ if [ -f "$stockconfd/$chart.conf" ]; then
+ debug "loading module configuration: '$stockconfd/$chart.conf'"
+ source "$stockconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors."
+ else
+ debug "not found module configuration: '$stockconfd/$chart.conf'"
+ fi
+
+ # then load the user config (it overwrites the stock)
+ if [ -f "$userconfd/$chart.conf" ]; then
+ debug "loading module configuration: '$userconfd/$chart.conf'"
+ source "$userconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors."
+ else
+ debug "not found module configuration: '$userconfd/$chart.conf'"
+
+ if [ -f "$olduserconfd/$chart.conf" ]; then
+ # support for very old netdata that had the charts.d module configs in /etc/netdata
+ info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'"
+ source "$olduserconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors."
+ fi
+ fi
+
+ eval "dt=\$$chart$suffix_update_every"
+ dt=$((dt + 1 - 1)) # make sure it is a number
+ if [ $dt -lt $update_every ]; then
+ eval "$chart$suffix_update_every=$update_every"
+ fi
+
+ $chart$charts_check
+ if [ $? -eq 0 ]; then
+ debug "module '$chart' activated"
+ active_charts="$active_charts $chart"
+ else
+ error "module's '$chart' check() function reports failure."
+ fi
+done
+MODULE_NAME="main"
+debug "activated modules: $active_charts"
+
+# -----------------------------------------------------------------------------
+# check overwrites
+
+# enable work time reporting
+debug_time=
+test $debug -eq 1 && debug_time=tellwork
+
+# if we only need a specific chart, remove all the others
+if [ ! -z "${chart_only}" ]; then
+ debug "requested to run only for: '${chart_only}'"
+ check_charts=
+ for chart in $active_charts; do
+ if [ "$chart" = "$chart_only" ]; then
+ check_charts="$chart"
+ break
+ fi
+ done
+ active_charts="$check_charts"
+fi
+debug "activated charts: $active_charts"
+
+# stop if we just need a pre-check
+if [ $check -eq 1 ]; then
+ info "CHECK RESULT"
+ info "Will run the charts: $active_charts"
+ exit 0
+fi
+
+# -----------------------------------------------------------------------------
+
+cd "${TMP_DIR}" || exit 1
+
+# -----------------------------------------------------------------------------
+# create charts
+
+run_charts=
+for chart in $active_charts; do
+ MODULE_NAME="${chart}"
+
+ debug "calling '$chart$charts_create()'..."
+ $chart$charts_create
+ if [ $? -eq 0 ]; then
+ run_charts="$run_charts $chart"
+ debug "'$chart' initialized."
+ else
+ error "module's '$chart' function '$chart$charts_create()' reports failure."
+ fi
+done
+MODULE_NAME="main"
+debug "run_charts='$run_charts'"
+
+# -----------------------------------------------------------------------------
+# update dimensions
+
+[ -z "$run_charts" ] && fatal "No charts to collect data from."
+
+declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=()
+global_update() {
+ local exit_at \
+ c=0 dt ret last_ms exec_start_ms exec_end_ms \
+ chart now_charts=() next_charts=($run_charts) \
+ next_ms x seconds millis
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+
+ exit_at=$((now_ms + (restart_timeout * 1000)))
+
+ for chart in $run_charts; do
+ eval "charts_update_every[$chart]=\$$chart$suffix_update_every"
+ test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every
+
+ eval "charts_retries[$chart]=\$$chart$suffix_retries"
+ test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10
+
+ charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000))))
+ charts_next_update[$chart]=$((charts_last_update[$chart] + (charts_update_every[$chart] * 1000)))
+ charts_run_counter[$chart]=0
+ charts_serial_failures[$chart]=0
+
+ echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}"
+ echo "DIMENSION run_time 'run time' absolute 1 1"
+ done
+
+ # the main loop
+ while [ "${#next_charts[@]}" -gt 0 ]; do
+ c=$((c + 1))
+ now_charts=("${next_charts[@]}")
+ next_charts=()
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+
+ for chart in "${now_charts[@]}"; do
+ MODULE_NAME="${chart}"
+
+ if [ ${now_ms} -ge ${charts_next_update[$chart]} ]; then
+ last_ms=${charts_last_update[$chart]}
+ dt=$((now_ms - last_ms))
+
+ charts_last_update[$chart]=${now_ms}
+
+ while [ ${charts_next_update[$chart]} -lt ${now_ms} ]; do
+ charts_next_update[$chart]=$((charts_next_update[$chart] + (charts_update_every[$chart] * 1000)))
+ done
+
+ # the first call should not give a duration
+ # so that netdata calibrates to current time
+ dt=$((dt * 1000))
+ charts_run_counter[$chart]=$((charts_run_counter[$chart] + 1))
+ if [ ${charts_run_counter[$chart]} -eq 1 ]; then
+ dt=
+ fi
+
+ exec_start_ms=$now_ms
+ $chart$charts_update $dt
+ ret=$?
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+ exec_end_ms=$now_ms
+
+ echo "BEGIN netdata.plugin_chartsd_$chart $dt"
+ echo "SET run_time = $((exec_end_ms - exec_start_ms))"
+ echo "END"
+
+ if [ $ret -eq 0 ]; then
+ charts_serial_failures[$chart]=0
+ next_charts+=($chart)
+ else
+ charts_serial_failures[$chart]=$((charts_serial_failures[$chart] + 1))
+
+ if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ]; then
+ error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it."
+ else
+ error "module's '$chart' update() function reports failure. Will keep trying for a while."
+ next_charts+=($chart)
+ fi
+ fi
+ else
+ next_charts+=($chart)
+ fi
+ done
+ MODULE_NAME="${chart}"
+
+ # wait the time you are required to
+ next_ms=$((now_ms + (update_every * 1000 * 100)))
+ for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done
+ next_ms=$((next_ms - now_ms))
+
+ if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ]; then
+ next_ms=$((next_ms + current_time_ms_accuracy))
+ seconds=$((next_ms / 1000))
+ millis=$((next_ms % 1000))
+ if [ ${millis} -lt 10 ]; then
+ millis="00${millis}"
+ elif [ ${millis} -lt 100 ]; then
+ millis="0${millis}"
+ fi
+
+ debug "sleeping for ${seconds}.${millis} seconds."
+ ${mysleep} ${seconds}.${millis}
+ else
+ debug "sleeping for ${update_every} seconds."
+ ${mysleep} $update_every
+ fi
+
+ test ${now_ms} -ge ${exit_at} && exit 0
+ done
+
+ fatal "nothing left to do, exiting..."
+}
+
+global_update
diff --git a/collectors/charts.d.plugin/cpu_apps/README.md b/collectors/charts.d.plugin/cpu_apps/README.md
index a32a6330..c8230aa4 100644
--- a/collectors/charts.d.plugin/cpu_apps/README.md
+++ b/collectors/charts.d.plugin/cpu_apps/README.md
@@ -3,4 +3,4 @@
> THIS MODULE IS OBSOLETE.
> USE [APPS.PLUGIN](../../apps.plugin).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fcpu_apps%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fcpu_apps%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/cpufreq/README.md b/collectors/charts.d.plugin/cpufreq/README.md
index 84883f58..fc2bfca1 100644
--- a/collectors/charts.d.plugin/cpufreq/README.md
+++ b/collectors/charts.d.plugin/cpufreq/README.md
@@ -3,4 +3,4 @@
> THIS MODULE IS OBSOLETE.
> USE THE [PROC PLUGIN](../../proc.plugin) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fcpufreq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fcpufreq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/example/README.md b/collectors/charts.d.plugin/example/README.md
index e62f7677..98562d62 100644
--- a/collectors/charts.d.plugin/example/README.md
+++ b/collectors/charts.d.plugin/example/README.md
@@ -2,5 +2,4 @@
This is just an example charts.d data collector.
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fexample%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fexample%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/exim/README.md b/collectors/charts.d.plugin/exim/README.md
index b4c85389..5c73c002 100644
--- a/collectors/charts.d.plugin/exim/README.md
+++ b/collectors/charts.d.plugin/exim/README.md
@@ -3,4 +3,4 @@
> THIS MODULE IS OBSOLETE.
> USE [THE PYTHON ONE](../../python.d.plugin/exim) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fexim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fexim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/hddtemp/README.md b/collectors/charts.d.plugin/hddtemp/README.md
index 86a2e19e..77f48956 100644
--- a/collectors/charts.d.plugin/hddtemp/README.md
+++ b/collectors/charts.d.plugin/hddtemp/README.md
@@ -7,9 +7,9 @@ The plugin will collect temperatures from disks
It will create one chart with all active disks
-1. **temperature in Celsius**
+1. **temperature in Celsius**
-### configuration
+## configuration
hddtemp needs to be running in daemonized mode
@@ -27,4 +27,4 @@ hddtemp_disks=()
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fhddtemp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fhddtemp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/libreswan/README.md b/collectors/charts.d.plugin/libreswan/README.md
index 18c64507..d75c1ea9 100644
--- a/collectors/charts.d.plugin/libreswan/README.md
+++ b/collectors/charts.d.plugin/libreswan/README.md
@@ -4,16 +4,16 @@ The plugin will collects bytes-in, bytes-out and uptime for all established libr
The following charts are created, **per tunnel**:
-1. **Uptime**
+1. **Uptime**
- * the uptime of the tunnel
+- the uptime of the tunnel
-2. **Traffic**
+2. **Traffic**
- * bytes in
- * bytes out
+- bytes in
+- bytes out
-### configuration
+## configuration
Its config file is `/etc/netdata/charts.d/libreswan.conf`.
@@ -41,4 +41,4 @@ Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to fi
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Flibreswan%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Flibreswan%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/load_average/README.md b/collectors/charts.d.plugin/load_average/README.md
index ef84b5bd..40b860cc 100644
--- a/collectors/charts.d.plugin/load_average/README.md
+++ b/collectors/charts.d.plugin/load_average/README.md
@@ -3,4 +3,4 @@
> THIS MODULE IS OBSOLETE.
> THE NETDATA DAEMON COLLECTS LOAD AVERAGE BY ITSELF
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fload_average%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fload_average%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/mem_apps/README.md b/collectors/charts.d.plugin/mem_apps/README.md
index a9513e9f..93d33832 100644
--- a/collectors/charts.d.plugin/mem_apps/README.md
+++ b/collectors/charts.d.plugin/mem_apps/README.md
@@ -3,4 +3,4 @@
> THIS MODULE IS OBSOLETE.
> USE [APPS.PLUGIN](../../apps.plugin).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fmem_apps%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fmem_apps%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/mysql/README.md b/collectors/charts.d.plugin/mysql/README.md
index e52449a4..2e8d72a4 100644
--- a/collectors/charts.d.plugin/mysql/README.md
+++ b/collectors/charts.d.plugin/mysql/README.md
@@ -7,57 +7,63 @@ The plugin will monitor one or more mysql servers
It will produce the following charts:
-1. **Bandwidth** in kbps
- * in
- * out
-
-2. **Queries** in queries/sec
- * queries
- * questions
- * slow queries
-
-3. **Operations** in operations/sec
- * opened tables
- * flush
- * commit
- * delete
- * prepare
- * read first
- * read key
- * read next
- * read prev
- * read random
- * read random next
- * rollback
- * save point
- * update
- * write
-
-4. **Table Locks** in locks/sec
- * immediate
- * waited
-
-5. **Select Issues** in issues/sec
- * full join
- * full range join
- * range
- * range check
- * scan
-
-6. **Sort Issues** in issues/sec
- * merge passes
- * range
- * scan
-
-### configuration
+1. **Bandwidth** in kbps
+
+- in
+- out
+
+2. **Queries** in queries/sec
+
+- queries
+- questions
+- slow queries
+
+3. **Operations** in operations/sec
+
+- opened tables
+- flush
+- commit
+- delete
+- prepare
+- read first
+- read key
+- read next
+- read prev
+- read random
+- read random next
+- rollback
+- save point
+- update
+- write
+
+4. **Table Locks** in locks/sec
+
+- immediate
+- waited
+
+5. **Select Issues** in issues/sec
+
+- full join
+- full range join
+- range
+- range check
+- scan
+
+6. **Sort Issues** in issues/sec
+
+- merge passes
+- range
+- scan
+
+## configuration
You can configure many database servers, like this:
You can provide, per server, the following:
-1. a name, anything you like, but keep it short
-2. the mysql command to connect to the server
-3. the mysql command line options to be used for connecting to the server
+1. a name, anything you like, but keep it short
+2. the mysql command to connect to the server
+3. the mysql command line options to be used for connecting to the server
Here is an example for 2 servers:
@@ -77,7 +83,6 @@ The above sets the mysql command only for server2. server1 will use the system d
If no configuration is given, the plugin will attempt to connect to mysql server at localhost.
-
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fmysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fmysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/nginx/README.md b/collectors/charts.d.plugin/nginx/README.md
index 42a4f812..57b4a4b1 100644
--- a/collectors/charts.d.plugin/nginx/README.md
+++ b/collectors/charts.d.plugin/nginx/README.md
@@ -3,4 +3,4 @@
> THIS MODULE IS OBSOLETE.
> USE [THE PYTHON ONE](../../python.d.plugin/nginx) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fnginx%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fnginx%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/nut/README.md b/collectors/charts.d.plugin/nut/README.md
index 3e169936..ea93318f 100644
--- a/collectors/charts.d.plugin/nut/README.md
+++ b/collectors/charts.d.plugin/nut/README.md
@@ -4,46 +4,45 @@ The plugin will collect UPS data for all UPSes configured in the system.
The following charts will be created:
-1. **UPS Charge**
+1. **UPS Charge**
- * percentage changed
+- percentage changed
-2. **UPS Battery Voltage**
+2. **UPS Battery Voltage**
- * current voltage
- * high voltage
- * low voltage
- * nominal voltage
+- current voltage
+- high voltage
+- low voltage
+- nominal voltage
-3. **UPS Input Voltage**
+3. **UPS Input Voltage**
- * current voltage
- * fault voltage
- * nominal voltage
+- current voltage
+- fault voltage
+- nominal voltage
-4. **UPS Input Current**
+4. **UPS Input Current**
- * nominal current
+- nominal current
-5. **UPS Input Frequency**
+5. **UPS Input Frequency**
- * current frequency
- * nominal frequency
+- current frequency
+- nominal frequency
-6. **UPS Output Voltage**
+6. **UPS Output Voltage**
- * current voltage
+- current voltage
-7. **UPS Load**
+7. **UPS Load**
- * current load
+- current load
-8. **UPS Temperature**
+8. **UPS Temperature**
- * current temperature
+- current temperature
-
-### configuration
+## configuration
This is the internal default for `/etc/netdata/nut.conf`
@@ -58,4 +57,4 @@ nut_update_every=2
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fnut%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fnut%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/opensips/README.md b/collectors/charts.d.plugin/opensips/README.md
index cb056da8..d41b4112 100644
--- a/collectors/charts.d.plugin/opensips/README.md
+++ b/collectors/charts.d.plugin/opensips/README.md
@@ -1,7 +1,7 @@
# OpenSIPS
-*Under construction*
+_Under construction_
Collects OpenSIPS metrics
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fopensips%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fopensips%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/phpfpm/README.md b/collectors/charts.d.plugin/phpfpm/README.md
index 36462ba9..f8976301 100644
--- a/collectors/charts.d.plugin/phpfpm/README.md
+++ b/collectors/charts.d.plugin/phpfpm/README.md
@@ -3,4 +3,4 @@
> THIS MODULE IS OBSOLETE.
> USE [THE PYTHON ONE](../../python.d.plugin/phpfpm) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fphpfpm%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fphpfpm%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/postfix/README.md b/collectors/charts.d.plugin/postfix/README.md
index e0dc6330..d9bf77f2 100644
--- a/collectors/charts.d.plugin/postfix/README.md
+++ b/collectors/charts.d.plugin/postfix/README.md
@@ -7,10 +7,10 @@ The plugin will collect the postfix queue size.
It will create two charts:
-1. **queue size in emails**
-2. **queue size in KB**
+1. **queue size in emails**
+2. **queue size in KB**
-### configuration
+## configuration
This is the internal default for `/etc/netdata/postfix.conf`
@@ -25,4 +25,4 @@ postfix_update_every=15
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fpostfix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fpostfix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md
index 4f3e46d6..a3fa9d20 100644
--- a/collectors/charts.d.plugin/sensors/README.md
+++ b/collectors/charts.d.plugin/sensors/README.md
@@ -2,10 +2,9 @@
> THIS MODULE IS OBSOLETE.
> USE [THE PYTHON ONE](../../python.d.plugin/sensors) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
+>
> Unlike the python one, this module can collect temperature on RPi.
-
The plugin will provide charts for all configured system sensors
> This plugin is reading sensors directly from the kernel.
@@ -13,19 +12,19 @@ The plugin will provide charts for all configured system sensors
> kernel provided values, this plugin will not perform.
> So, the values graphed, are the raw hardware values of the sensors.
-The plugin will create netdata charts for:
+The plugin will create Netdata charts for:
-1. **Temperature**
-2. **Voltage**
-3. **Current**
-4. **Power**
-5. **Fans Speed**
-6. **Energy**
-7. **Humidity**
+1. **Temperature**
+2. **Voltage**
+3. **Current**
+4. **Power**
+5. **Fans Speed**
+6. **Energy**
+7. **Humidity**
One chart for every sensor chip found and each of the above will be created.
-### configuration
+## configuration
This is the internal default for `/etc/netdata/sensors.conf`
@@ -52,4 +51,4 @@ sensors_excluded=()
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fsensors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fsensors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/squid/README.md b/collectors/charts.d.plugin/squid/README.md
index cfb61790..831a04f7 100644
--- a/collectors/charts.d.plugin/squid/README.md
+++ b/collectors/charts.d.plugin/squid/README.md
@@ -7,50 +7,50 @@ The plugin will monitor a squid server.
It will produce 4 charts:
-1. **Squid Client Bandwidth** in kbps
+1. **Squid Client Bandwidth** in kbps
- * in
- * out
- * hits
+- in
+- out
+- hits
-2. **Squid Client Requests** in requests/sec
+2. **Squid Client Requests** in requests/sec
- * requests
- * hits
- * errors
+- requests
+- hits
+- errors
-3. **Squid Server Bandwidth** in kbps
+3. **Squid Server Bandwidth** in kbps
- * in
- * out
+- in
+- out
-4. **Squid Server Requests** in requests/sec
+4. **Squid Server Requests** in requests/sec
- * requests
- * errors
+- requests
+- errors
-### autoconfig
+## autoconfig
The plugin will by itself detect squid servers running on
localhost, on ports 3128 or 8080.
It will attempt to download URLs in the form:
-- `cache_object://HOST:PORT/counters`
-- `/squid-internal-mgr/counters`
+- `cache_object://HOST:PORT/counters`
+- `/squid-internal-mgr/counters`
If any succeeds, it will use this.
-### configuration
+## configuration
If you need to configure it by hand, create the file
`/etc/netdata/squid.conf` with the following variables:
-- `squid_host=IP` the IP of the squid host
-- `squid_port=PORT` the port the squid is listening
-- `squid_url="URL"` the URL with the statistics to be fetched from squid
-- `squid_timeout=SECONDS` how much time we should wait for squid to respond
-- `squid_update_every=SECONDS` the frequency of the data collection
+- `squid_host=IP` the IP of the squid host
+- `squid_port=PORT` the port the squid is listening
+- `squid_url="URL"` the URL with the statistics to be fetched from squid
+- `squid_timeout=SECONDS` how much time we should wait for squid to respond
+- `squid_update_every=SECONDS` the frequency of the data collection
Example `/etc/netdata/squid.conf`:
@@ -64,4 +64,4 @@ squid_update_every=5
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fsquid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fsquid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/tomcat/README.md b/collectors/charts.d.plugin/tomcat/README.md
index 84337860..752332cf 100644
--- a/collectors/charts.d.plugin/tomcat/README.md
+++ b/collectors/charts.d.plugin/tomcat/README.md
@@ -3,4 +3,4 @@
> THIS MODULE IS OBSOLETE.
> USE [THE PYTHON ONE](../../python.d.plugin/tomcat) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Ftomcat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Ftomcat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/checks.plugin/Makefile.in b/collectors/checks.plugin/Makefile.in
new file mode 100644
index 00000000..ab2ad416
--- /dev/null
+++ b/collectors/checks.plugin/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/checks.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/checks.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/checks.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/checks.plugin/README.md b/collectors/checks.plugin/README.md
index 461e3ba8..4510795d 100644
--- a/collectors/checks.plugin/README.md
+++ b/collectors/checks.plugin/README.md
@@ -2,4 +2,4 @@
A debugging plugin (by default it is disabled)
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fchecks.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fchecks.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/cups.plugin/Makefile.in b/collectors/cups.plugin/Makefile.in
new file mode 100644
index 00000000..0db0de41
--- /dev/null
+++ b/collectors/cups.plugin/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/cups.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/cups.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/cups.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/cups.plugin/README.md b/collectors/cups.plugin/README.md
index f04aa567..b8d56387 100644
--- a/collectors/cups.plugin/README.md
+++ b/collectors/cups.plugin/README.md
@@ -10,40 +10,46 @@ This plugin needs a running local CUPS daemon (`cupsd`). This plugin does not ne
`cups.plugin` provides one common section `destinations` and one section per destination.
-> Destinations in CUPS represent individual printers or classes (collections or pools) of printers (https://www.cups.org/doc/cupspm.html#working-with-destinations)
+> Destinations in CUPS represent individual printers or classes (collections or pools) of printers (<https://www.cups.org/doc/cupspm.html#working-with-destinations>)
The section `server` provides these charts:
-1. **destinations by state**
- * idle
- * printing
- * stopped
+1. **destinations by state**
-2. **destinations by options**
- * total
- * accepting jobs
- * shared
+ - idle
+ - printing
+ - stopped
-3. **total job number by status**
- * pending
- * processing
- * held
+2. **destinations by options**
-4. **total job size by status**
- * pending
- * processing
- * held
+ - total
+ - accepting jobs
+ - shared
+
+3. **total job number by status**
+
+ - pending
+ - processing
+ - held
+
+4. **total job size by status**
+
+ - pending
+ - processing
+ - held
For each destination the plugin provides these charts:
-1. **job number by status**
- * pending
- * held
- * processing
+1. **job number by status**
+
+ - pending
+ - held
+ - processing
+
+2. **job size by status**
+
+ - pending
+ - held
+ - processing
-3. **job size by status**
- * pending
- * held
- * processing
-
At the moment only job status pending, processing, and held are reported because we do not have a method to collect stopped, canceled, aborted and completed jobs which scales.
diff --git a/collectors/diskspace.plugin/Makefile.in b/collectors/diskspace.plugin/Makefile.in
new file mode 100644
index 00000000..01c63c12
--- /dev/null
+++ b/collectors/diskspace.plugin/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/diskspace.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/diskspace.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/diskspace.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md
index ff98a874..080140e4 100644
--- a/collectors/diskspace.plugin/README.md
+++ b/collectors/diskspace.plugin/README.md
@@ -3,15 +3,15 @@
This plugin monitors the disk space usage of mounted disks, under Linux. The plugin requires Netdata to have execute/search permissions on the mount point itself, as well as each component of the absolute path to the mount point.
Two charts are available for every mount:
- - Disk Space Usage
- - Disk Files (inodes) Usage
+
+- Disk Space Usage
+- Disk Files (inodes) Usage
## configuration
Simple patterns can be used to exclude mounts from showed statistics based on path or filesystem. By default read-only mounts are not displayed. To display them `yes` should be set for a chart instead of `auto`.
-By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
-
+By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
```
[plugin:proc:diskspace]
@@ -34,5 +34,4 @@ Charts can be enabled/disabled for every mount separately:
> for disks performance monitoring, see the `proc` plugin, [here](../proc.plugin/#monitoring-disks)
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fdiskspace.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fdiskspace.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/fping.plugin/Makefile.in b/collectors/fping.plugin/Makefile.in
new file mode 100644
index 00000000..8f2d4c01
--- /dev/null
+++ b/collectors/fping.plugin/Makefile.in
@@ -0,0 +1,641 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/fping.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_libconfig_DATA) $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)" \
+ "$(DESTDIR)$(libconfigdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ fping.plugin \
+ $(NULL)
+
+SUFFIXES = .in
+dist_plugins_SCRIPTS = \
+ fping.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ fping.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ fping.conf \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_libconfigDATA \
+ install-dist_pluginsSCRIPTS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_libconfigDATA \
+ uninstall-dist_pluginsSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_libconfigDATA \
+ install-dist_pluginsSCRIPTS install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS
+
+.PRECIOUS: Makefile
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/fping.plugin/README.md b/collectors/fping.plugin/README.md
index d5f83fdf..ad79f06a 100644
--- a/collectors/fping.plugin/README.md
+++ b/collectors/fping.plugin/README.md
@@ -3,7 +3,7 @@
The fping plugin supports monitoring latency, packet loss and uptime of any number of network end points,
by pinging them with `fping`.
-A recent version of `fping` is required (one that supports option ` -N `).
+A recent version of `fping` is required (one that supports option `-N`).
The supplied plugin can install it, by running:
```sh
@@ -23,7 +23,7 @@ fping="/usr/local/bin/fping"
# I suggest to use hostnames and put their IPs in /etc/hosts
hosts="host1 host2 host3"
-# override the chart update frequency - the default is inherited from netdata
+# override the chart update frequency - the default is inherited from Netdata
update_every=1
# time in milliseconds (1 sec = 1000 ms) to ping the hosts
@@ -36,10 +36,10 @@ fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
## alarms
-netdata will automatically attach a few alarms for each host.
+Netdata will automatically attach a few alarms for each host.
Check the [latest versions of the fping alarms](../../health/health.d/fping.conf)
-## Additional Tips
+## Additional Tips
### Customizing Amount of Pings Per Second
@@ -60,13 +60,12 @@ ping_every=5000
You may need to run multiple fping plugins with different settings for different end points.
For example, you may need to ping a few hosts 10 times per second, and others once per second.
-netdata allows you to add as many `fping` plugins as you like.
+Netdata allows you to add as many `fping` plugins as you like.
Follow this procedure:
**1. Create New fping Configuration File**
-
```sh
# Step Into Configuration Directory
cd /etc/netdata
@@ -90,9 +89,9 @@ cd /usr/libexec/netdata/plugins.d
ln -s fping.plugin fping2.plugin
```
-That's it. netdata will detect the new plugin and start it.
+That's it. Netdata will detect the new plugin and start it.
You can name the new plugin any name you like.
Just make sure the plugin and the configuration file have the same name.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffping.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffping.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/fping.plugin/fping.plugin b/collectors/fping.plugin/fping.plugin
new file mode 100644
index 00000000..13145d7a
--- /dev/null
+++ b/collectors/fping.plugin/fping.plugin
@@ -0,0 +1,200 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+# This plugin requires a latest version of fping.
+# You can compile it from source, by running me with option: install
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+if [ "${1}" = "install" ]
+ then
+ [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/local/bin/fping." && exit 1
+
+ run() {
+ printf >&2 " > "
+ printf >&2 "%q " "${@}"
+ printf >&2 "\n"
+ "${@}" || exit 1
+ }
+
+ download() {
+ local curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)"
+ [ ! -z "${curl}" ] && run curl -s -L "${1}" && return 0
+
+ local wget="$(which wget 2>/dev/null || command -v wget 2>/dev/null)"
+ [ ! -z "${wget}" ] && run wget -q -O - "${1}" && return 0
+
+ echo >&2 "Cannot find 'curl' or 'wget' in this system." && exit 1
+ }
+
+ [ ! -d /usr/src ] && run mkdir -p /usr/src
+ [ ! -d /usr/local/bin ] && run mkdir -p /usr/local/bin
+
+ run cd /usr/src
+
+ if [ -d fping-4.2 ]
+ then
+ run rm -rf fping-4.2 || exit 1
+ fi
+
+ download 'https://github.com/schweikert/fping/releases/download/v4.2/fping-4.2.tar.gz' | run tar -zxvpf -
+ [ $? -ne 0 ] && exit 1
+ run cd fping-4.2 || exit 1
+
+ run ./configure --prefix=/usr/local
+ run make clean
+ run make
+ if [ -f /usr/local/bin/fping ]
+ then
+ run mv -f /usr/local/bin/fping /usr/local/bin/fping.old
+ fi
+ run mv src/fping /usr/local/bin/fping
+ run chown root:root /usr/local/bin/fping
+ run chmod 4755 /usr/local/bin/fping
+ echo >&2
+ echo >&2 "All done, you have a compatible fping now at /usr/local/bin/fping."
+ echo >&2
+
+ fping="$(which fping 2>/dev/null || command -v fping 2>/dev/null)"
+ if [ "${fping}" != "/usr/local/bin/fping" ]
+ then
+ echo >&2 "You have another fping installed at: ${fping}."
+ echo >&2 "Please set:"
+ echo >&2
+ echo >&2 " fping=\"/usr/local/bin/fping\""
+ echo >&2
+ echo >&2 "at /etc/netdata/fping.conf"
+ echo >&2
+ fi
+ exit 0
+fi
+
+# -----------------------------------------------------------------------------
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ echo "DISABLE"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+
+# store in ${plugin} the name we run under
+# this allows us to copy/link fping.plugin under a different name
+# to have multiple fping plugins running with different settings
+plugin="${PROGRAM_NAME/.plugin/}"
+
+
+# -----------------------------------------------------------------------------
+
+# the frequency to send info to netdata
+# passed by netdata as the first parameter
+update_every="${1-1}"
+
+# the netdata configuration directory
+# passed by netdata as an environment variable
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/lib/netdata/conf.d"
+
+# -----------------------------------------------------------------------------
+# configuration options
+# can be overwritten at /etc/netdata/fping.conf
+
+# the fping binary to use
+# we need one that can output netdata friendly info (supporting: -N)
+# if you have multiple versions, put here the full filename of the right one
+fping="$( which fping 2>/dev/null || command -v fping 2>/dev/null )"
+
+# a space separated list of hosts to fping
+# we suggest to put names here and the IPs of these names in /etc/hosts
+hosts=""
+
+# the time in milliseconds (1 sec = 1000 ms)
+# to ping the hosts - by default 5 pings per host per iteration
+ping_every="$((update_every * 1000 / 5))"
+
+# fping options
+fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
+
+# -----------------------------------------------------------------------------
+# load the configuration files
+
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf"
+do
+ if [ -f "${CONFIG}" ]
+ then
+ info "Loading config file '${CONFIG}'..."
+ source "${CONFIG}"
+ [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+done
+
+if [ -z "${hosts}" ]
+then
+ fatal "no hosts configured - nothing to do."
+fi
+
+if [ -z "${fping}" ]
+then
+ fatal "fping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'"
+fi
+
+if [ ! -x "${fping}" ]
+then
+ fatal "fping command '${fping}' is not executable - cannot proceed."
+fi
+
+if [ ${ping_every} -lt 20 ]
+ then
+ warning "ping every was set to ${ping_every} but 20 is the minimum for non-root users. Setting it to 20 ms."
+ ping_every=20
+fi
+
+# the fping options we will use
+options=( -N -l -Q ${update_every} -p ${ping_every} ${fping_opts} ${hosts} )
+
+# execute fping
+info "starting fping: ${fping} ${options[*]}"
+exec "${fping}" "${options[@]}"
+
+# if we cannot execute fping, stop
+fatal "command '${fping} ${options[*]}' failed to be executed (returned code $?)."
diff --git a/collectors/freebsd.plugin/Makefile.in b/collectors/freebsd.plugin/Makefile.in
new file mode 100644
index 00000000..925701e7
--- /dev/null
+++ b/collectors/freebsd.plugin/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/freebsd.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/freebsd.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/freebsd.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/freebsd.plugin/README.md b/collectors/freebsd.plugin/README.md
index 618e053d..07da72e1 100644
--- a/collectors/freebsd.plugin/README.md
+++ b/collectors/freebsd.plugin/README.md
@@ -2,6 +2,6 @@
Collects resource usage and performance data on FreeBSD systems
-By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffreebsd.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffreebsd.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/freeipmi.plugin/Makefile.in b/collectors/freeipmi.plugin/Makefile.in
new file mode 100644
index 00000000..d5efb6bc
--- /dev/null
+++ b/collectors/freeipmi.plugin/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/freeipmi.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/freeipmi.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/freeipmi.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/freeipmi.plugin/README.md b/collectors/freeipmi.plugin/README.md
index 246955f9..058c2edb 100644
--- a/collectors/freeipmi.plugin/README.md
+++ b/collectors/freeipmi.plugin/README.md
@@ -6,32 +6,31 @@ Netdata has a [freeipmi](https://www.gnu.org/software/freeipmi/) plugin.
## Compile `freeipmi.plugin`
-1. install `libipmimonitoring-dev` or `libipmimonitoring-devel` (`freeipmi-devel` on RHEL based OS) using the package manager of your system.
+1. install `libipmimonitoring-dev` or `libipmimonitoring-devel` (`freeipmi-devel` on RHEL based OS) using the package manager of your system.
-2. re-install netdata from source. The installer will detect that the required libraries are now available and will also build `freeipmi.plugin`.
+2. re-install Netdata from source. The installer will detect that the required libraries are now available and will also build `freeipmi.plugin`.
Keep in mind IPMI requires root access, so the plugin is setuid to root.
-If you just installed the required IPMI tools, please run at least once the command `ipmimonitoring` and verify it returns sensors information. This command initialises IPMI configuration, so that the netdata plugin will be able to work.
+If you just installed the required IPMI tools, please run at least once the command `ipmimonitoring` and verify it returns sensors information. This command initialises IPMI configuration, so that the Netdata plugin will be able to work.
## Netdata use
The plugin creates (up to) 8 charts, based on the information collected from IPMI:
-1. number of sensors by state
-2. number of events in SEL
-3. Temperatures CELCIUS
-4. Temperatures FAHRENHEIT
-5. Voltages
-6. Currents
-7. Power
-8. Fans
-
+1. number of sensors by state
+2. number of events in SEL
+3. Temperatures CELCIUS
+4. Temperatures FAHRENHEIT
+5. Voltages
+6. Currents
+7. Power
+8. Fans
It also adds 2 alarms:
-1. Sensors in non-nominal state (i.e. warning and critical)
-2. SEL is non empty
+1. Sensors in non-nominal state (i.e. warning and critical)
+2. SEL is non empty
![image](https://cloud.githubusercontent.com/assets/2662304/23674138/88926a20-037d-11e7-89c0-20e74ee10cd1.png)
@@ -96,7 +95,6 @@ The plugin supports a few options. To see them, run:
For more information:
https://github.com/netdata/netdata/tree/master/collectors/freeipmi.plugin
-
```
You can set these options in `/etc/netdata/netdata.conf` at this section:
@@ -107,11 +105,11 @@ You can set these options in `/etc/netdata/netdata.conf` at this section:
command options =
```
-Append to `command options = ` the settings you need. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.
+Append to `command options =` the settings you need. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.
## Ignoring specific sensors
-Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`. **However this file is not used by `libipmimonitoring`** (the library used by netdata's `freeipmi.plugin`).
+Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`. **However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).
So, `freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it, edit `/etc/netdata/netdata.conf` and set:
@@ -142,7 +140,6 @@ ID | Name | Type | State | Reading | Unit
...
```
-
## Debugging
You can run the plugin by hand:
@@ -180,11 +177,11 @@ options ipmi_si kipmid_max_busy_us=10
This instructs the kernel IPMI module to pause for a tick between checking IPMI. Querying IPMI will be a lot slower now (e.g. several seconds for IPMI to respond), but `kipmi` will not use any noticeable CPU. You can also use a higher number (this is the number of microseconds to poll IPMI for a response, before waiting for a tick).
-If you need to disable IPMI for netdata, edit `/etc/netdata/netdata.conf` and set:
+If you need to disable IPMI for Netdata, edit `/etc/netdata/netdata.conf` and set:
```
[plugins]
freeipmi = no
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffreeipmi.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffreeipmi.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/idlejitter.plugin/Makefile.in b/collectors/idlejitter.plugin/Makefile.in
new file mode 100644
index 00000000..caa83146
--- /dev/null
+++ b/collectors/idlejitter.plugin/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/idlejitter.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/idlejitter.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/idlejitter.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/idlejitter.plugin/README.md b/collectors/idlejitter.plugin/README.md
index e8e78085..d1c2998b 100644
--- a/collectors/idlejitter.plugin/README.md
+++ b/collectors/idlejitter.plugin/README.md
@@ -8,8 +8,8 @@ The difference between the requested and the actual duration of the sleep, is th
This is done at most 50 times per second, to ensure we have a good average.
This number is useful:
-
- 1. in real-time environments, when the CPU jitter can affect the quality of the service (like VoIP media gateways).
- 2. in cloud infrastructure, at can pause the VM or container for a small duration to perform operations at the host.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fidlejitter.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+1. in real-time environments, when the CPU jitter can affect the quality of the service (like VoIP media gateways).
+2. in cloud infrastructure, at can pause the VM or container for a small duration to perform operations at the host.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fidlejitter.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/ioping.plugin/Makefile.in b/collectors/ioping.plugin/Makefile.in
new file mode 100644
index 00000000..63953098
--- /dev/null
+++ b/collectors/ioping.plugin/Makefile.in
@@ -0,0 +1,641 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/ioping.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_libconfig_DATA) $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)" \
+ "$(DESTDIR)$(libconfigdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ ioping.plugin \
+ $(NULL)
+
+SUFFIXES = .in
+dist_plugins_SCRIPTS = \
+ ioping.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ ioping.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ ioping.conf \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/ioping.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/ioping.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_libconfigDATA \
+ install-dist_pluginsSCRIPTS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_libconfigDATA \
+ uninstall-dist_pluginsSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_libconfigDATA \
+ install-dist_pluginsSCRIPTS install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS
+
+.PRECIOUS: Makefile
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/ioping.plugin/README.md b/collectors/ioping.plugin/README.md
index 15fdf6ff..4b18ce49 100644
--- a/collectors/ioping.plugin/README.md
+++ b/collectors/ioping.plugin/README.md
@@ -3,14 +3,14 @@
The ioping plugin supports monitoring latency for any number of directories/files/devices,
by pinging them with `ioping`.
-A recent version of `ioping` is required (one that supports option ` -N `).
+A recent version of `ioping` is required (one that supports option `-N`).
The supplied plugin can install it, by running:
```sh
/usr/libexec/netdata/plugins.d/ioping.plugin install
```
-The `-e` option can be supplied to indicate where the netdata environment file is installed. The default path is `/etc/netdata/.environment`.
+The `-e` option can be supplied to indicate where the Netdata environment file is installed. The default path is `/etc/netdata/.environment`.
The above will download, build and install the right version as `/usr/libexec/netdata/plugins.d/ioping`.
@@ -24,7 +24,7 @@ ioping="/usr/libexec/netdata/plugins.d/ioping"
# set here the directory/file/device, you need to ping
destination="destination"
-# override the chart update frequency - the default is inherited from netdata
+# override the chart update frequency - the default is inherited from Netdata
update_every="1s"
# the request size in bytes to ping the destination
@@ -36,7 +36,7 @@ ioping_opts="-T 1000000 -R"
## alarms
-netdata will automatically attach a few alarms for each host.
+Netdata will automatically attach a few alarms for each host.
Check the [latest versions of the ioping alarms](../../health/health.d/ioping.conf)
## Multiple ioping Plugins With Different Settings
@@ -44,13 +44,12 @@ Check the [latest versions of the ioping alarms](../../health/health.d/ioping.co
You may need to run multiple ioping plugins with different settings or different end points.
For example, you may need to ping one destination once per 10 seconds, and another once per second.
-netdata allows you to add as many `ioping` plugins as you like.
+Netdata allows you to add as many `ioping` plugins as you like.
Follow this procedure:
**1. Create New ioping Configuration File**
-
```sh
# Step Into Configuration Directory
cd /etc/netdata
@@ -74,9 +73,9 @@ cd /usr/libexec/netdata/plugins.d
ln -s ioping.plugin ioping2.plugin
```
-That's it. netdata will detect the new plugin and start it.
+That's it. Netdata will detect the new plugin and start it.
You can name the new plugin any name you like.
Just make sure the plugin and the configuration file have the same name.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fioping.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fioping.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/ioping.plugin/ioping.plugin b/collectors/ioping.plugin/ioping.plugin
new file mode 100644
index 00000000..648c2e37
--- /dev/null
+++ b/collectors/ioping.plugin/ioping.plugin
@@ -0,0 +1,212 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+# This plugin requires a latest version of ioping.
+# You can compile it from source, by running me with option: install
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+usage="$(basename "$0") [install] [-h] [-e]
+
+where:
+ install install ioping binary
+ -e, --env path to environment file (defauls to '/etc/netdata/.environment'
+ -h show this help text"
+
+INSTALL=0
+ENVIRONMENT_FILE="/etc/netdata/.environment"
+
+while :; do
+ case "$1" in
+ -h | --help)
+ echo "$usage" >&2
+ exit 1
+ ;;
+ install)
+ INSTALL=1
+ shift
+ ;;
+ -e | --env)
+ ENVIRONMENT_FILE="$2"
+ shift 2
+ ;;
+ -*)
+ echo "$usage" >&2
+ exit 1
+ ;;
+ *) break ;;
+ esac
+done
+
+if [ "$INSTALL" == "1" ]
+ then
+ [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/libexec/netdata/plugins.d/ioping." && exit 1
+
+ source "${ENVIRONMENT_FILE}" || exit 1
+
+ run() {
+ printf >&2 " > "
+ printf >&2 "%q " "${@}"
+ printf >&2 "\n"
+ "${@}" || exit 1
+ }
+
+ download() {
+ local git="$(which git 2>/dev/null || command -v git 2>/dev/null)"
+ [ ! -z "${git}" ] && run git clone "${1}" "${2}" && return 0
+
+ echo >&2 "Cannot find 'git' in this system." && exit 1
+ }
+
+ tmp=$(mktemp -d /tmp/netdata-ioping-XXXXXX)
+ [ ! -d "${NETDATA_PREFIX}/usr/libexec/netdata" ] && run mkdir -p "${NETDATA_PREFIX}/usr/libexec/netdata"
+
+ run cd "${tmp}"
+
+ if [ -d ioping-netdata ]
+ then
+ run rm -rf ioping-netdata || exit 1
+ fi
+
+ download 'https://github.com/netdata/ioping.git' 'ioping-netdata'
+ [ $? -ne 0 ] && exit 1
+ run cd ioping-netdata || exit 1
+
+ INSTALL_PATH="${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/ioping"
+
+ run make clean
+ run make
+ run mv ioping "${INSTALL_PATH}"
+ run chown root:"${NETDATA_GROUP}" "${INSTALL_PATH}"
+ run chmod 4750 "${INSTALL_PATH}"
+ echo >&2
+ echo >&2 "All done, you have a compatible ioping now at ${INSTALL_PATH}."
+ echo >&2
+
+ exit 0
+fi
+
+# -----------------------------------------------------------------------------
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ echo "DISABLE"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+
+# store in ${plugin} the name we run under
+# this allows us to copy/link ioping.plugin under a different name
+# to have multiple ioping plugins running with different settings
+plugin="${PROGRAM_NAME/.plugin/}"
+
+
+# -----------------------------------------------------------------------------
+
+# the frequency to send info to netdata
+# passed by netdata as the first parameter
+update_every="${1-1}"
+
+# the netdata configuration directory
+# passed by netdata as an environment variable
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/lib/netdata/conf.d"
+
+# the netdata directory for internal binaries
+[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="/usr/libexec/netdata/plugins.d"
+
+# -----------------------------------------------------------------------------
+# configuration options
+# can be overwritten at /etc/netdata/ioping.conf
+
+# the ioping binary to use
+# we need one that can output netdata friendly info (supporting: -N)
+# if you have multiple versions, put here the full filename of the right one
+ioping="${NETDATA_PLUGINS_DIR}/ioping"
+
+# the destination to ioping
+destination=""
+
+# the request size in bytes to ping the disk
+request_size="4k"
+
+# ioping options
+ioping_opts="-T 1000000"
+
+# -----------------------------------------------------------------------------
+# load the configuration files
+
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf"
+do
+ if [ -f "${CONFIG}" ]
+ then
+ info "Loading config file '${CONFIG}'..."
+ source "${CONFIG}"
+ [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+done
+
+if [ -z "${destination}" ]
+then
+ fatal "destination is not configured - nothing to do."
+fi
+
+if [ ! -f "${ioping}" ]
+then
+ fatal "ioping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'"
+fi
+
+if [ ! -x "${ioping}" ]
+then
+ fatal "ioping command '${ioping}' is not executable - cannot proceed."
+fi
+
+# the ioping options we will use
+options=( -N -i ${update_every} -s ${request_size} ${ioping_opts} ${destination} )
+
+# execute ioping
+info "starting ioping: ${ioping} ${options[*]}"
+exec "${ioping}" "${options[@]}"
+
+# if we cannot execute ioping, stop
+fatal "command '${ioping} ${options[*]}' failed to be executed (returned code $?)."
diff --git a/collectors/macos.plugin/Makefile.in b/collectors/macos.plugin/Makefile.in
new file mode 100644
index 00000000..c8f62c66
--- /dev/null
+++ b/collectors/macos.plugin/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/macos.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/macos.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/macos.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/macos.plugin/README.md b/collectors/macos.plugin/README.md
index fcb4670e..d3fa9369 100644
--- a/collectors/macos.plugin/README.md
+++ b/collectors/macos.plugin/README.md
@@ -2,6 +2,6 @@
Collects resource usage and performance data on MacOS systems
-By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fmacos.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fmacos.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/nfacct.plugin/Makefile.in b/collectors/nfacct.plugin/Makefile.in
new file mode 100644
index 00000000..a56f0605
--- /dev/null
+++ b/collectors/nfacct.plugin/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/nfacct.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/nfacct.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/nfacct.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/nfacct.plugin/README.md b/collectors/nfacct.plugin/README.md
index b60fc6a4..64bb1f71 100644
--- a/collectors/nfacct.plugin/README.md
+++ b/collectors/nfacct.plugin/README.md
@@ -4,9 +4,9 @@
## Prerequisites
-1. install `libmnl-dev` and `libnetfilter_acct-dev` using the package manager of your system.
+1. install `libmnl-dev` and `libnetfilter_acct-dev` using the package manager of your system.
-2. re-install netdata from source. The installer will detect that the required libraries are now available and will also build netdata.plugin.
+2. re-install Netdata from source. The installer will detect that the required libraries are now available and will also build `netdata.plugin`.
Keep in mind that NFACCT requires root access, so the plugin is setuid to root.
@@ -15,19 +15,21 @@ Keep in mind that NFACCT requires root access, so the plugin is setuid to root.
The plugin provides Netfilter connection tracker statistics and nfacct packet and bandwidth accounting:
Connection tracker:
-1. Connections.
-2. Changes.
-3. Expectations.
-4. Errors.
-5. Searches.
+
+1. Connections.
+2. Changes.
+3. Expectations.
+4. Errors.
+5. Searches.
Netfilter accounting:
-1. Packets.
-2. Bandwidth.
+
+1. Packets.
+2. Bandwidth.
## Configuration
-If you need to disable NFACCT for netdata, edit /etc/netdata/netdata.conf and set:
+If you need to disable NFACCT for Netdata, edit /etc/netdata/netdata.conf and set:
```
[plugins]
@@ -44,4 +46,4 @@ sudo /usr/libexec/netdata/plugins.d/nfacct.plugin 1 debug
You will get verbose output on what the plugin does.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnfacct.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnfacct.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/nfacct.plugin/plugin_nfacct.c b/collectors/nfacct.plugin/plugin_nfacct.c
index feec34b8..21c2e4ae 100644
--- a/collectors/nfacct.plugin/plugin_nfacct.c
+++ b/collectors/nfacct.plugin/plugin_nfacct.c
@@ -715,15 +715,18 @@ static void nfacct_send_metrics() {
printf("CHART netfilter.nfacct_packets '' 'Netfilter Accounting Packets' 'packets/s'\n");
printf("DIMENSION %s '' incremental 1 %d\n", d->name, nfacct_root.update_every);
}
- printf(
- "BEGIN netfilter.nfacct_packets\n"
- "SET %s = %lld\n"
- "END\n"
+ }
+ }
+ printf("BEGIN netfilter.nfacct_packets\n");
+ for(d = nfacct_root.nfacct_metrics; d ; d = d->next) {
+ if(likely(d->updated)) {
+ printf("SET %s = %lld\n"
, d->name
, (collected_number)d->pkts
);
}
}
+ printf("END\n");
// ----------------------------------------------------------------
@@ -743,15 +746,18 @@ static void nfacct_send_metrics() {
printf("CHART netfilter.nfacct_bytes '' 'Netfilter Accounting Bandwidth' 'kilobytes/s'\n");
printf("DIMENSION %s '' incremental 1 %d\n", d->name, 1000 * nfacct_root.update_every);
}
- printf(
- "BEGIN netfilter.nfacct_bytes\n"
- "SET %s = %lld\n"
- "END\n"
+ }
+ }
+ printf("BEGIN netfilter.nfacct_bytes\n");
+ for(d = nfacct_root.nfacct_metrics; d ; d = d->next) {
+ if(likely(d->updated)) {
+ printf("SET %s = %lld\n"
, d->name
, (collected_number)d->bytes
);
}
}
+ printf("END\n");
}
#endif // HAVE_LIBNETFILTER_ACCT
diff --git a/collectors/node.d.plugin/Makefile.in b/collectors/node.d.plugin/Makefile.in
new file mode 100644
index 00000000..89067e63
--- /dev/null
+++ b/collectors/node.d.plugin/Makefile.in
@@ -0,0 +1,855 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/node.d.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_libconfig_DATA) $(dist_node_DATA) \
+ $(dist_nodeconfig_DATA) $(dist_nodemodules_DATA) \
+ $(dist_nodemoduleslibber_DATA) $(dist_noinst_DATA) \
+ $(dist_usernodeconfig_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)" \
+ "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(nodedir)" \
+ "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(nodemodulesdir)" \
+ "$(DESTDIR)$(nodemoduleslibberdir)" \
+ "$(DESTDIR)$(usernodeconfigdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_libconfig_DATA) $(dist_node_DATA) \
+ $(dist_nodeconfig_DATA) $(dist_nodemodules_DATA) \
+ $(dist_nodemoduleslibber_DATA) $(dist_noinst_DATA) \
+ $(dist_usernodeconfig_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/fronius/Makefile.inc \
+ $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc \
+ $(srcdir)/snmp/Makefile.inc \
+ $(srcdir)/stiebeleltron/Makefile.inc \
+ $(top_srcdir)/build/subst.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ node.d.plugin \
+ $(NULL)
+
+SUFFIXES = .in
+dist_libconfig_DATA = \
+ node.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ node.d.plugin \
+ $(NULL)
+
+# dist_nodeconfig_DATA += fronius/fronius.conf
+
+# do not install these files, but include them in the distribution
+# dist_nodeconfig_DATA += named/named.conf
+
+# do not install these files, but include them in the distribution
+# dist_nodeconfig_DATA += sma_webbox/sma_webbox.conf
+
+# do not install these files, but include them in the distribution
+# dist_nodeconfig_DATA += snmp/snmp.conf
+
+# do not install these files, but include them in the distribution
+# dist_nodeconfig_DATA += stiebeleltron/stiebeleltron.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA = node.d.plugin.in README.md $(NULL) \
+ fronius/README.md fronius/Makefile.inc named/README.md \
+ named/Makefile.inc sma_webbox/README.md \
+ sma_webbox/Makefile.inc snmp/README.md snmp/Makefile.inc \
+ stiebeleltron/README.md stiebeleltron/Makefile.inc
+usernodeconfigdir = $(configdir)/node.d
+dist_usernodeconfig_DATA = \
+ .keep \
+ $(NULL)
+
+nodeconfigdir = $(libconfigdir)/node.d
+dist_nodeconfig_DATA = \
+ $(NULL)
+
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+dist_node_DATA = $(NULL) fronius/fronius.node.js named/named.node.js \
+ sma_webbox/sma_webbox.node.js snmp/snmp.node.js \
+ stiebeleltron/stiebeleltron.node.js
+nodemodulesdir = $(nodedir)/node_modules
+dist_nodemodules_DATA = \
+ node_modules/netdata.js \
+ node_modules/extend.js \
+ node_modules/pixl-xml.js \
+ node_modules/net-snmp.js \
+ node_modules/asn1-ber.js \
+ $(NULL)
+
+nodemoduleslibberdir = $(nodedir)/node_modules/lib/ber
+dist_nodemoduleslibber_DATA = \
+ node_modules/lib/ber/index.js \
+ node_modules/lib/ber/errors.js \
+ node_modules/lib/ber/reader.js \
+ node_modules/lib/ber/types.js \
+ node_modules/lib/ber/writer.js \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc $(srcdir)/stiebeleltron/Makefile.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/node.d.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/node.d.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc $(srcdir)/stiebeleltron/Makefile.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_nodeDATA: $(dist_node_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodedir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodedir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodedir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(nodedir)" || exit $$?; \
+ done
+
+uninstall-dist_nodeDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(nodedir)'; $(am__uninstall_files_from_dir)
+install-dist_nodeconfigDATA: $(dist_nodeconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodeconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodeconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodeconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(nodeconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_nodeconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(nodeconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_nodemodulesDATA: $(dist_nodemodules_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodemodulesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodemodulesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemodulesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemodulesdir)" || exit $$?; \
+ done
+
+uninstall-dist_nodemodulesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(nodemodulesdir)'; $(am__uninstall_files_from_dir)
+install-dist_nodemoduleslibberDATA: $(dist_nodemoduleslibber_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodemoduleslibberdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodemoduleslibberdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemoduleslibberdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemoduleslibberdir)" || exit $$?; \
+ done
+
+uninstall-dist_nodemoduleslibberDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(nodemoduleslibberdir)'; $(am__uninstall_files_from_dir)
+install-dist_usernodeconfigDATA: $(dist_usernodeconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_usernodeconfig_DATA)'; test -n "$(usernodeconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(usernodeconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(usernodeconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(usernodeconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(usernodeconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_usernodeconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_usernodeconfig_DATA)'; test -n "$(usernodeconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(usernodeconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(nodedir)" "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(nodemodulesdir)" "$(DESTDIR)$(nodemoduleslibberdir)" "$(DESTDIR)$(usernodeconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_libconfigDATA install-dist_nodeDATA \
+ install-dist_nodeconfigDATA install-dist_nodemodulesDATA \
+ install-dist_nodemoduleslibberDATA install-dist_pluginsSCRIPTS \
+ install-dist_usernodeconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_libconfigDATA uninstall-dist_nodeDATA \
+ uninstall-dist_nodeconfigDATA uninstall-dist_nodemodulesDATA \
+ uninstall-dist_nodemoduleslibberDATA \
+ uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_usernodeconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_libconfigDATA \
+ install-dist_nodeDATA install-dist_nodeconfigDATA \
+ install-dist_nodemodulesDATA \
+ install-dist_nodemoduleslibberDATA install-dist_pluginsSCRIPTS \
+ install-dist_usernodeconfigDATA install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-dist_libconfigDATA uninstall-dist_nodeDATA \
+ uninstall-dist_nodeconfigDATA uninstall-dist_nodemodulesDATA \
+ uninstall-dist_nodemoduleslibberDATA \
+ uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_usernodeconfigDATA
+
+.PRECIOUS: Makefile
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/node.d.plugin/README.md b/collectors/node.d.plugin/README.md
index 265b1ac5..6d7c1f87 100644
--- a/collectors/node.d.plugin/README.md
+++ b/collectors/node.d.plugin/README.md
@@ -1,13 +1,13 @@
# node.d.plugin
-`node.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `node.js`.
+`node.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `node.js`.
-1. It runs as an independent process `ps fax` shows it
-2. It is started and stopped automatically by netdata
-3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon)
-4. Supports any number of data collection **modules**
-5. Allows each **module** to have one or more data collection **jobs**
-6. Each **job** is collecting one or more metrics from a single data source
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by Netdata
+3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon)
+4. Supports any number of data collection **modules**
+5. Allows each **module** to have one or more data collection **jobs**
+6. Each **job** is collecting one or more metrics from a single data source
## Pull Request Checklist for Node.js Plugins
@@ -15,20 +15,20 @@ This is a generic checklist for submitting a new Node.js plugin for Netdata. It
At minimum, to be buildable and testable, the PR needs to include:
-* The module itself, following proper naming conventions: `node.d/<module_dir>/<module_name>.node.js`
-* A README.md file for the plugin.
-* The configuration file for the module
-* A basic configuration for the plugin in the appropriate global config file: `conf.d/node.d.conf`, which is also in JSON format. If the module should be enabled by default, add a section for it in the `modules` dictionary.
-* A line for the plugin in the appropriate `Makefile.am` file: `node.d/Makefile.am` under `dist_node_DATA`.
-* A line for the plugin configuration file in `conf.d/Makefile.am`: under `dist_nodeconfig_DATA`
-* Optionally, chart information in `web/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
+- The module itself, following proper naming conventions: `node.d/<module_dir>/<module_name>.node.js`
+- A README.md file for the plugin.
+- The configuration file for the module
+- A basic configuration for the plugin in the appropriate global config file: `conf.d/node.d.conf`, which is also in JSON format. If the module should be enabled by default, add a section for it in the `modules` dictionary.
+- A line for the plugin in the appropriate `Makefile.am` file: `node.d/Makefile.am` under `dist_node_DATA`.
+- A line for the plugin configuration file in `conf.d/Makefile.am`: under `dist_nodeconfig_DATA`
+- Optionally, chart information in `web/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
## Motivation
Node.js is perfect for asynchronous operations. It is very fast and quite common (actually the whole web is based on it).
Since data collection is not a CPU intensive task, node.js is an ideal solution for it.
-`node.d.plugin` is a netdata plugin that provides an abstraction layer to allow easy and quick development of data
+`node.d.plugin` is a Netdata plugin that provides an abstraction layer to allow easy and quick development of data
collectors in node.js. It also manages all its data collectors (placed in `/usr/libexec/netdata/node.d`) using a single
instance of node, thus lowering the memory footprint of data collection.
@@ -40,7 +40,7 @@ To run `node.js` plugins you need to have `node` installed in your system.
In some older systems, the package named `node` is not node.js. It is a terminal emulation program called `ax25-node`.
In this case the node.js package may be referred as `nodejs`. Once you install `nodejs`, we suggest to link
`/usr/bin/nodejs` to `/usr/bin/node`, so that typing `node` in your terminal, opens node.js.
-For more information check the **[[Installation]]** guide.
+For more information check the **\[[Installation]]** guide.
## configuring `node.d.plugin`
@@ -54,12 +54,11 @@ For more information check the **[[Installation]]** guide.
Unfortunately, `JSON` files do not accept comments. So, the best way to describe them is to have markdown text files
with instructions.
-`JSON` has a very strict formatting. If you get errors from netdata at `/var/log/netdata/error.log` that a certain
-configuration file cannot be loaded, we suggest to verify it at [http://jsonlint.com/](http://jsonlint.com/).
+`JSON` has a very strict formatting. If you get errors from Netdata at `/var/log/netdata/error.log` that a certain
+configuration file cannot be loaded, we suggest to verify it at <http://jsonlint.com/>.
The files in this directory, provide usable examples for configuring each `node.d.plugin` module.
-
## debugging modules written for node.d.plugin
To test `node.d.plugin` modules, which are placed in `/usr/libexec/netdata/node.d`, you can run `node.d.plugin` by hand,
@@ -89,15 +88,15 @@ export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata"
Your data collection module should be split in 3 parts:
- - a function to fetch the data from its source. `node.d.plugin` already can fetch data from web sources,
- so you don't need to do anything about it for http.
+- a function to fetch the data from its source. `node.d.plugin` already can fetch data from web sources,
+ so you don't need to do anything about it for http.
- - a function to process the fetched/manipulate the data fetched. This function will make a number of calls
- to create charts and dimensions and pass the collected values to netdata.
- This is the only function you need to write for collecting http JSON data.
+- a function to process the fetched/manipulate the data fetched. This function will make a number of calls
+ to create charts and dimensions and pass the collected values to Netdata.
+ This is the only function you need to write for collecting http JSON data.
- - a `configure` and an `update` function, which take care of your module configuration and data refresh
- respectively. You can use the supplied ones.
+- a `configure` and an `update` function, which take care of your module configuration and data refresh
+ respectively. You can use the supplied ones.
Your module will automatically be able to process any number of servers, with different settings (even different
data collection frequencies). You will write just the work needed for one and `node.d.plugin` will do the rest.
@@ -108,7 +107,6 @@ For each server you are going to fetch data from, you will have to create a `ser
To provide a module called `mymodule`, you have create the file `/usr/libexec/netdata/node.d/mymodule.node.js`, with this structure:
```js
-
// the processor is needed only
// if you need a custom processor
// other than http
@@ -127,7 +125,7 @@ netdata.processors.myprocessor = {
var mymodule = {
processResponse: function(service, data) {
- /* send information to the netdata server here */
+ /* send information to the Netdata server here */
},
@@ -221,14 +219,14 @@ The configuration file `/etc/netdata/node.d/mymodule.conf` may contain whatever
`data` may be `null` or whatever the processor specified in the `service` returned.
-The `service` object defines a set of functions to allow you send information to the netdata core about:
+The `service` object defines a set of functions to allow you send information to the Netdata core about:
-1. Charts and dimension definitions
-2. Updated values, from the collected values
+1. Charts and dimension definitions
+2. Updated values, from the collected values
---
-*FIXME: document an operational node.d.plugin data collector - the best example is the
-[snmp collector](snmp/snmp.node.js)*
+_FIXME: document an operational node.d.plugin data collector - the best example is the
+[snmp collector](snmp/snmp.node.js)_
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/node.d.plugin/fronius/README.md b/collectors/node.d.plugin/fronius/README.md
index 72522637..f109f799 100644
--- a/collectors/node.d.plugin/fronius/README.md
+++ b/collectors/node.d.plugin/fronius/README.md
@@ -3,32 +3,37 @@
This module collects metrics from the configured solar power installation from Fronius Symo.
**Requirements**
- * Configuration file `fronius.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/fronius.conf`)
- * Fronius Symo with network access (http)
+
+- Configuration file `fronius.conf` in the node.d Netdata config dir (default: `/etc/netdata/node.d/fronius.conf`)
+- Fronius Symo with network access (http)
It produces per server:
-1. **Power**
- * Current power input from the grid (positive values), output to the grid (negative values), in W
- * Current power input from the solar panels, in W
- * Current power stored in the accumulator (if present), in W (in theory, untested)
+1. **Power**
+
+- Current power input from the grid (positive values), output to the grid (negative values), in W
+- Current power input from the solar panels, in W
+- Current power stored in the accumulator (if present), in W (in theory, untested)
+
+2. **Consumption**
+
+- Local consumption in W
+
+3. **Autonomy**
-2. **Consumption**
- * Local consumption in W
+- Relative autonomy in %. 100 % autonomy means that the solar panels are delivering more power than it is needed by local consumption.
+- Relative self consumption in %. The lower the better
-3. **Autonomy**
- * Relative autonomy in %. 100 % autonomy means that the solar panels are delivering more power than it is needed by local consumption.
- * Relative self consumption in %. The lower the better
+4. **Energy**
-4. **Energy**
- * The energy produced during the current day, in kWh
- * The energy produced during the current year, in kWh
+- The energy produced during the current day, in kWh
+- The energy produced during the current year, in kWh
-5. **Inverter**
- * The current power output from the connected inverters, in W, one dimension per inverter. At least one is always present.
-
-
-### configuration
+5. **Inverter**
+
+- The current power output from the connected inverters, in W, one dimension per inverter. At least one is always present.
+
+## configuration
Sample:
@@ -55,14 +60,15 @@ If no configuration is given, the module will be disabled. Each `update_every` i
The plugin has been tested with a single inverter, namely Fronius Symo 8.2-3-M:
-- Datalogger version: 240.162630
-- Software version: 3.7.4-6
-- Hardware version: 2.4D
+- Datalogger version: 240.162630
+- Software version: 3.7.4-6
+- Hardware version: 2.4D
Other products and versions may work, but without any guarantees.
-Example netdata configuration for node.d/fronius.conf. Copy this section to fronius.conf and change name/ip.
+Example Netdata configuration for node.d/fronius.conf. Copy this section to fronius.conf and change name/ip.
The module supports any number of servers. Sometimes there is a lag when collecting every 3 seconds, so 5 should be okay too. You can modify this per server.
+
```json
{
"enable_autodetect": false,
@@ -79,6 +85,7 @@ The module supports any number of servers. Sometimes there is a lag when collect
```
The output of /solar_api/v1/GetPowerFlowRealtimeData.fcgi looks like this:
+
```json
{
"Head" : {
@@ -119,4 +126,4 @@ The output of /solar_api/v1/GetPowerFlowRealtimeData.fcgi looks like this:
}
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Ffronius%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Ffronius%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/node.d.plugin/named/README.md b/collectors/node.d.plugin/named/README.md
index 480cbc19..288292ba 100644
--- a/collectors/node.d.plugin/named/README.md
+++ b/collectors/node.d.plugin/named/README.md
@@ -1,344 +1,342 @@
-# ISC Bind Statistics
-
-Using this netdata collector, you can monitor one or more ISC Bind servers.
-
-## Example netdata charts
-
-Depending on the number of views your bind has, you may get a large number of charts.
-Here this is with just one view:
-
-![image](https://cloud.githubusercontent.com/assets/2662304/12765473/879b8e04-ca07-11e5-817d-b0651996c42b.png)
-![image](https://cloud.githubusercontent.com/assets/2662304/12766538/12b272fa-ca0d-11e5-81e1-6a9f8ff488ff.png)
-
-## How it works
-
-The plugin will execute (from within node.js) the equivalent of:
-
-```sh
-curl "http://localhost:8888/json/v1/server"
-```
-
-Here is a sample of the output this command produces.
-
-```js
-{
- "json-stats-version":"1.0",
- "boot-time":"2016-01-31T08:20:48Z",
- "config-time":"2016-01-31T09:28:03Z",
- "current-time":"2016-02-02T22:22:20Z",
- "opcodes":{
- "QUERY":247816,
- "IQUERY":0,
- "STATUS":0,
- "RESERVED3":0,
- "NOTIFY":0,
- "UPDATE":3813,
- "RESERVED6":0,
- "RESERVED7":0,
- "RESERVED8":0,
- "RESERVED9":0,
- "RESERVED10":0,
- "RESERVED11":0,
- "RESERVED12":0,
- "RESERVED13":0,
- "RESERVED14":0,
- "RESERVED15":0
- },
- "qtypes":{
- "A":89519,
- "NS":863,
- "CNAME":1,
- "SOA":1,
- "PTR":116779,
- "MX":276,
- "TXT":198,
- "AAAA":39324,
- "SRV":850,
- "ANY":5
- },
- "nsstats":{
- "Requestv4":251630,
- "ReqEdns0":1255,
- "ReqTSIG":3813,
- "ReqTCP":57,
- "AuthQryRej":1455,
- "RecQryRej":122,
- "Response":245918,
- "TruncatedResp":44,
- "RespEDNS0":1255,
- "RespTSIG":3813,
- "QrySuccess":205159,
- "QryAuthAns":119495,
- "QryNoauthAns":120770,
- "QryNxrrset":32711,
- "QrySERVFAIL":262,
- "QryNXDOMAIN":2395,
- "QryRecursion":40885,
- "QryDuplicate":5712,
- "QryFailure":1577,
- "UpdateDone":2514,
- "UpdateFail":1299,
- "UpdateBadPrereq":1276,
- "QryUDP":246194,
- "QryTCP":45,
- "OtherOpt":101
- },
- "views":{
- "local":{
- "resolver":{
- "stats":{
- "Queryv4":74577,
- "Responsev4":67032,
- "NXDOMAIN":601,
- "SERVFAIL":5,
- "FORMERR":7,
- "EDNS0Fail":7,
- "Truncated":3071,
- "Lame":4,
- "Retry":11826,
- "QueryTimeout":1838,
- "GlueFetchv4":6864,
- "GlueFetchv4Fail":30,
- "QryRTT10":112,
- "QryRTT100":42900,
- "QryRTT500":23275,
- "QryRTT800":534,
- "QryRTT1600":97,
- "QryRTT1600+":20,
- "BucketSize":31,
- "REFUSED":13
- },
- "qtypes":{
- "A":64931,
- "NS":870,
- "CNAME":185,
- "PTR":5,
- "MX":49,
- "TXT":149,
- "AAAA":7972,
- "SRV":416
- },
- "cache":{
- "A":40356,
- "NS":8032,
- "CNAME":14477,
- "PTR":2,
- "MX":21,
- "TXT":32,
- "AAAA":3301,
- "SRV":94,
- "DS":237,
- "RRSIG":2301,
- "NSEC":126,
- "!A":52,
- "!NS":4,
- "!TXT":1,
- "!AAAA":3797,
- "!SRV":9,
- "NXDOMAIN":590
- },
- "cachestats":{
- "CacheHits":1085188,
- "CacheMisses":109,
- "QueryHits":464755,
- "QueryMisses":55624,
- "DeleteLRU":0,
- "DeleteTTL":42615,
- "CacheNodes":5188,
- "CacheBuckets":2079,
- "TreeMemTotal":2326026,
- "TreeMemInUse":1508075,
- "HeapMemMax":132096,
- "HeapMemTotal":393216,
- "HeapMemInUse":132096
- },
- "adb":{
- "nentries":1021,
- "entriescnt":3157,
- "nnames":1021,
- "namescnt":3022
- }
- }
- },
- "public":{
- "resolver":{
- "stats":{
- "BucketSize":31
- },
- "qtypes":{
- },
- "cache":{
- },
- "cachestats":{
- "CacheHits":0,
- "CacheMisses":0,
- "QueryHits":0,
- "QueryMisses":0,
- "DeleteLRU":0,
- "DeleteTTL":0,
- "CacheNodes":0,
- "CacheBuckets":64,
- "TreeMemTotal":287392,
- "TreeMemInUse":29608,
- "HeapMemMax":1024,
- "HeapMemTotal":262144,
- "HeapMemInUse":1024
- },
- "adb":{
- "nentries":1021,
- "nnames":1021
- }
- }
- },
- "_bind":{
- "resolver":{
- "stats":{
- "BucketSize":31
- },
- "qtypes":{
- },
- "cache":{
- },
- "cachestats":{
- "CacheHits":0,
- "CacheMisses":0,
- "QueryHits":0,
- "QueryMisses":0,
- "DeleteLRU":0,
- "DeleteTTL":0,
- "CacheNodes":0,
- "CacheBuckets":64,
- "TreeMemTotal":287392,
- "TreeMemInUse":29608,
- "HeapMemMax":1024,
- "HeapMemTotal":262144,
- "HeapMemInUse":1024
- },
- "adb":{
- "nentries":1021,
- "nnames":1021
- }
- }
- }
- }
-}
-```
-
-
-From this output it collects:
-
-- Global Received Requests by IP version (IPv4, IPv6)
-- Global Successful Queries
-- Current Recursive Clients
-- Global Queries by IP Protocol (TCP, UDP)
-- Global Queries Analysis
-- Global Received Updates
-- Global Query Failures
-- Global Query Failures Analysis
-- Other Global Server Statistics
-- Global Incoming Requests by OpCode
-- Global Incoming Requests by Query Type
-- Global Socket Statistics (will only work if the url is `http://127.0.0.1:8888/json/v1`, i.e. without `/server`, but keep in mind this produces a very long output and probably will account for 0.5% CPU overhead alone, per bind server added)
-- Per View Statistics (the following set will be added for each bind view):
- - View, Resolver Active Queries
- - View, Resolver Statistics
- - View, Resolver Round Trip Timings
- - View, Requests by Query Type
-
-## Configuration
-
-The collector (optionally) reads a configuration file named `/etc/netdata/node.d/named.conf`, with the following contents:
-
-```js
-{
- "enable_autodetect": true,
- "update_every": 5,
- "servers": [
- {
- "name": "bind1",
- "url": "http://127.0.0.1:8888/json/v1/server",
- "update_every": 1
- },
- {
- "name": "bind2",
- "url": "http://10.1.2.3:8888/json/v1/server",
- "update_every": 2
- }
- ]
-}
-```
-
-You can add any number of bind servers.
-
-If the configuration file is missing, or the key `enable_autodetect` is `true`, the collector will also attempt to fetch `http://localhost:8888/json/v1/server` which, if successful will be added too.
-
-### XML instead of JSON, from bind
-
-The collector can also accept bind URLs that return XML output. This might required if you cannot have bind 9.10+ with JSON but you have an version of bind that supports XML statistics v3. Check [this](https://www.isc.org/blogs/bind-9-10-statistics-troubleshooting-and-zone-configuration/) for versions supported.
-
-In such cases, use a URL like this:
-
-```sh
-curl "http://localhost:8888/xml/v3/server"
-```
-
-Only `xml` and `v3` has been tested.
-
-Keep in mind though, that XML parsing is done using javascript code, which requires a triple conversion:
-
-1. from XML to JSON using a javascript XML parser (**CPU intensive**),
-2. which is then transformed to emulate the output of the JSON output of bind (**CPU intensive** - and yes the converted JSON from XML is different to the native JSON - even bind produces different names for various attributes),
-3. which is then processed to generate the data for the charts (this will happen even if bind is producing JSON).
-
-In general, expect XML parsing to be 2 to 3 times more CPU intensive than JSON.
-
-**So, if you can use the JSON output of bind, prefer it over XML**. Keep also in mind that even bind will use more CPU when generating XML instead of JSON.
-
-The XML interface of bind is not autodetected.
-You will have to provide the config file `/etc/netdata/node.d/named.conf`, like this:
-
-```js
-{
- "enable_autodetect": false,
- "update_every": 1,
- "servers": [
- {
- "name": "local",
- "url": "http://localhost:8888/xml/v3/server",
- "update_every": 1
- }
- ]
-}
-```
-
-Of course, you can monitor more than one bind servers. Each one can be configured with either JSON or XML output.
-
-## Auto-detection
-
-Auto-detection is controlled by `enable_autodetect` in the config file. The default is enabled, so that if the collector can connect to `http://localhost:8888/json/v1/server` to receive bind statistics, it will automatically enable it.
-
-## Bind (named) configuration
-
-To use this plugin, you have to have bind v9.10+ properly compiled to provide statistics in `JSON` format.
-
-For more information on how to get your bind installation ready, please refer to the [bind statistics channel developer comments](http://jpmens.net/2013/03/18/json-in-bind-9-s-statistics-server/) and to [bind documentation](https://ftp.isc.org/isc/bind/9.10.3/doc/arm/Bv9ARM.ch06.html#statistics) or [bind Knowledge Base article AA-01123](https://kb.isc.org/article/AA-01123/0).
-
-Normally, you will need something like this in your `named.conf`:
-
-```
-statistics-channels {
- inet 127.0.0.1 port 8888 allow { 127.0.0.1; };
- inet ::1 port 8888 allow { ::1; };
-};
-```
-
-(use the IPv4 or IPv6 line depending on what you are using, you can also use both)
-
-Verify it works by running the following command (the collector is written in node.js and will query your bind server directly, but if this command works, the collector should be able to work too):
-
-```sh
-curl "http://localhost:8888/json/v1/server"
-```
-
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fnamed%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+# ISC Bind Statistics
+
+Using this Netdata collector, you can monitor one or more ISC Bind servers.
+
+## Example Netdata charts
+
+Depending on the number of views your bind has, you may get a large number of charts.
+Here this is with just one view:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/12765473/879b8e04-ca07-11e5-817d-b0651996c42b.png)
+![image](https://cloud.githubusercontent.com/assets/2662304/12766538/12b272fa-ca0d-11e5-81e1-6a9f8ff488ff.png)
+
+## How it works
+
+The plugin will execute (from within node.js) the equivalent of:
+
+```sh
+curl "http://localhost:8888/json/v1/server"
+```
+
+Here is a sample of the output this command produces.
+
+```js
+{
+ "json-stats-version":"1.0",
+ "boot-time":"2016-01-31T08:20:48Z",
+ "config-time":"2016-01-31T09:28:03Z",
+ "current-time":"2016-02-02T22:22:20Z",
+ "opcodes":{
+ "QUERY":247816,
+ "IQUERY":0,
+ "STATUS":0,
+ "RESERVED3":0,
+ "NOTIFY":0,
+ "UPDATE":3813,
+ "RESERVED6":0,
+ "RESERVED7":0,
+ "RESERVED8":0,
+ "RESERVED9":0,
+ "RESERVED10":0,
+ "RESERVED11":0,
+ "RESERVED12":0,
+ "RESERVED13":0,
+ "RESERVED14":0,
+ "RESERVED15":0
+ },
+ "qtypes":{
+ "A":89519,
+ "NS":863,
+ "CNAME":1,
+ "SOA":1,
+ "PTR":116779,
+ "MX":276,
+ "TXT":198,
+ "AAAA":39324,
+ "SRV":850,
+ "ANY":5
+ },
+ "nsstats":{
+ "Requestv4":251630,
+ "ReqEdns0":1255,
+ "ReqTSIG":3813,
+ "ReqTCP":57,
+ "AuthQryRej":1455,
+ "RecQryRej":122,
+ "Response":245918,
+ "TruncatedResp":44,
+ "RespEDNS0":1255,
+ "RespTSIG":3813,
+ "QrySuccess":205159,
+ "QryAuthAns":119495,
+ "QryNoauthAns":120770,
+ "QryNxrrset":32711,
+ "QrySERVFAIL":262,
+ "QryNXDOMAIN":2395,
+ "QryRecursion":40885,
+ "QryDuplicate":5712,
+ "QryFailure":1577,
+ "UpdateDone":2514,
+ "UpdateFail":1299,
+ "UpdateBadPrereq":1276,
+ "QryUDP":246194,
+ "QryTCP":45,
+ "OtherOpt":101
+ },
+ "views":{
+ "local":{
+ "resolver":{
+ "stats":{
+ "Queryv4":74577,
+ "Responsev4":67032,
+ "NXDOMAIN":601,
+ "SERVFAIL":5,
+ "FORMERR":7,
+ "EDNS0Fail":7,
+ "Truncated":3071,
+ "Lame":4,
+ "Retry":11826,
+ "QueryTimeout":1838,
+ "GlueFetchv4":6864,
+ "GlueFetchv4Fail":30,
+ "QryRTT10":112,
+ "QryRTT100":42900,
+ "QryRTT500":23275,
+ "QryRTT800":534,
+ "QryRTT1600":97,
+ "QryRTT1600+":20,
+ "BucketSize":31,
+ "REFUSED":13
+ },
+ "qtypes":{
+ "A":64931,
+ "NS":870,
+ "CNAME":185,
+ "PTR":5,
+ "MX":49,
+ "TXT":149,
+ "AAAA":7972,
+ "SRV":416
+ },
+ "cache":{
+ "A":40356,
+ "NS":8032,
+ "CNAME":14477,
+ "PTR":2,
+ "MX":21,
+ "TXT":32,
+ "AAAA":3301,
+ "SRV":94,
+ "DS":237,
+ "RRSIG":2301,
+ "NSEC":126,
+ "!A":52,
+ "!NS":4,
+ "!TXT":1,
+ "!AAAA":3797,
+ "!SRV":9,
+ "NXDOMAIN":590
+ },
+ "cachestats":{
+ "CacheHits":1085188,
+ "CacheMisses":109,
+ "QueryHits":464755,
+ "QueryMisses":55624,
+ "DeleteLRU":0,
+ "DeleteTTL":42615,
+ "CacheNodes":5188,
+ "CacheBuckets":2079,
+ "TreeMemTotal":2326026,
+ "TreeMemInUse":1508075,
+ "HeapMemMax":132096,
+ "HeapMemTotal":393216,
+ "HeapMemInUse":132096
+ },
+ "adb":{
+ "nentries":1021,
+ "entriescnt":3157,
+ "nnames":1021,
+ "namescnt":3022
+ }
+ }
+ },
+ "public":{
+ "resolver":{
+ "stats":{
+ "BucketSize":31
+ },
+ "qtypes":{
+ },
+ "cache":{
+ },
+ "cachestats":{
+ "CacheHits":0,
+ "CacheMisses":0,
+ "QueryHits":0,
+ "QueryMisses":0,
+ "DeleteLRU":0,
+ "DeleteTTL":0,
+ "CacheNodes":0,
+ "CacheBuckets":64,
+ "TreeMemTotal":287392,
+ "TreeMemInUse":29608,
+ "HeapMemMax":1024,
+ "HeapMemTotal":262144,
+ "HeapMemInUse":1024
+ },
+ "adb":{
+ "nentries":1021,
+ "nnames":1021
+ }
+ }
+ },
+ "_bind":{
+ "resolver":{
+ "stats":{
+ "BucketSize":31
+ },
+ "qtypes":{
+ },
+ "cache":{
+ },
+ "cachestats":{
+ "CacheHits":0,
+ "CacheMisses":0,
+ "QueryHits":0,
+ "QueryMisses":0,
+ "DeleteLRU":0,
+ "DeleteTTL":0,
+ "CacheNodes":0,
+ "CacheBuckets":64,
+ "TreeMemTotal":287392,
+ "TreeMemInUse":29608,
+ "HeapMemMax":1024,
+ "HeapMemTotal":262144,
+ "HeapMemInUse":1024
+ },
+ "adb":{
+ "nentries":1021,
+ "nnames":1021
+ }
+ }
+ }
+ }
+}
+```
+
+From this output it collects:
+
+- Global Received Requests by IP version (IPv4, IPv6)
+- Global Successful Queries
+- Current Recursive Clients
+- Global Queries by IP Protocol (TCP, UDP)
+- Global Queries Analysis
+- Global Received Updates
+- Global Query Failures
+- Global Query Failures Analysis
+- Other Global Server Statistics
+- Global Incoming Requests by OpCode
+- Global Incoming Requests by Query Type
+- Global Socket Statistics (will only work if the url is `http://127.0.0.1:8888/json/v1`, i.e. without `/server`, but keep in mind this produces a very long output and probably will account for 0.5% CPU overhead alone, per bind server added)
+- Per View Statistics (the following set will be added for each bind view):
+ - View, Resolver Active Queries
+ - View, Resolver Statistics
+ - View, Resolver Round Trip Timings
+ - View, Requests by Query Type
+
+## Configuration
+
+The collector (optionally) reads a configuration file named `/etc/netdata/node.d/named.conf`, with the following contents:
+
+```js
+{
+ "enable_autodetect": true,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "bind1",
+ "url": "http://127.0.0.1:8888/json/v1/server",
+ "update_every": 1
+ },
+ {
+ "name": "bind2",
+ "url": "http://10.1.2.3:8888/json/v1/server",
+ "update_every": 2
+ }
+ ]
+}
+```
+
+You can add any number of bind servers.
+
+If the configuration file is missing, or the key `enable_autodetect` is `true`, the collector will also attempt to fetch `http://localhost:8888/json/v1/server` which, if successful will be added too.
+
+### XML instead of JSON, from bind
+
+The collector can also accept bind URLs that return XML output. This might required if you cannot have bind 9.10+ with JSON but you have an version of bind that supports XML statistics v3. Check [this](https://www.isc.org/blogs/bind-9-10-statistics-troubleshooting-and-zone-configuration/) for versions supported.
+
+In such cases, use a URL like this:
+
+```sh
+curl "http://localhost:8888/xml/v3/server"
+```
+
+Only `xml` and `v3` has been tested.
+
+Keep in mind though, that XML parsing is done using javascript code, which requires a triple conversion:
+
+1. from XML to JSON using a javascript XML parser (**CPU intensive**),
+2. which is then transformed to emulate the output of the JSON output of bind (**CPU intensive** - and yes the converted JSON from XML is different to the native JSON - even bind produces different names for various attributes),
+3. which is then processed to generate the data for the charts (this will happen even if bind is producing JSON).
+
+In general, expect XML parsing to be 2 to 3 times more CPU intensive than JSON.
+
+**So, if you can use the JSON output of bind, prefer it over XML**. Keep also in mind that even bind will use more CPU when generating XML instead of JSON.
+
+The XML interface of bind is not autodetected.
+You will have to provide the config file `/etc/netdata/node.d/named.conf`, like this:
+
+```js
+{
+ "enable_autodetect": false,
+ "update_every": 1,
+ "servers": [
+ {
+ "name": "local",
+ "url": "http://localhost:8888/xml/v3/server",
+ "update_every": 1
+ }
+ ]
+}
+```
+
+Of course, you can monitor more than one bind servers. Each one can be configured with either JSON or XML output.
+
+## Auto-detection
+
+Auto-detection is controlled by `enable_autodetect` in the config file. The default is enabled, so that if the collector can connect to `http://localhost:8888/json/v1/server` to receive bind statistics, it will automatically enable it.
+
+## Bind (named) configuration
+
+To use this plugin, you have to have bind v9.10+ properly compiled to provide statistics in `JSON` format.
+
+For more information on how to get your bind installation ready, please refer to the [bind statistics channel developer comments](http://jpmens.net/2013/03/18/json-in-bind-9-s-statistics-server/) and to [bind documentation](https://ftp.isc.org/isc/bind/9.10.3/doc/arm/Bv9ARM.ch06.html#statistics) or [bind Knowledge Base article AA-01123](https://kb.isc.org/article/AA-01123/0).
+
+Normally, you will need something like this in your `named.conf`:
+
+```
+statistics-channels {
+ inet 127.0.0.1 port 8888 allow { 127.0.0.1; };
+ inet ::1 port 8888 allow { ::1; };
+};
+```
+
+(use the IPv4 or IPv6 line depending on what you are using, you can also use both)
+
+Verify it works by running the following command (the collector is written in node.js and will query your bind server directly, but if this command works, the collector should be able to work too):
+
+```sh
+curl "http://localhost:8888/json/v1/server"
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fnamed%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/node.d.plugin/node.d.plugin b/collectors/node.d.plugin/node.d.plugin
new file mode 100644
index 00000000..57369d0d
--- /dev/null
+++ b/collectors/node.d.plugin/node.d.plugin
@@ -0,0 +1,303 @@
+#!/usr/bin/env bash
+':' //; exec "$(command -v nodejs || command -v node || echo "ERROR node IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@"
+
+// shebang hack from:
+// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang
+
+// Initially this is run as a shell script.
+// Then, the second line, finds nodejs or node or js in the system path
+// and executes it with the shell parameters.
+
+// netdata
+// real-time performance and health monitoring, done right!
+// (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// --------------------------------------------------------------------------------------------------------------------
+
+'use strict';
+
+// --------------------------------------------------------------------------------------------------------------------
+// get NETDATA environment variables
+
+var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname;
+var NETDATA_USER_CONFIG_DIR = process.env.NETDATA_USER_CONFIG_DIR || '/etc/netdata';
+var NETDATA_STOCK_CONFIG_DIR = process.env.NETDATA_STOCK_CONFIG_DIR || '/usr/lib/netdata/conf.d';
+var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1;
+var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d';
+
+// make sure the modules are found
+process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules');
+process.mainModule.paths.unshift(NODE_D_DIR);
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// load required modules
+
+var fs = require('fs');
+var url = require('url');
+var util = require('util');
+var http = require('http');
+var path = require('path');
+var extend = require('extend');
+var netdata = require('netdata');
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// configuration
+
+function netdata_read_json_config_file(module_filename) {
+ var f = path.basename(module_filename);
+
+ var ufilename, sfilename;
+
+ var m = f.match('.plugin' + '$');
+ if(m !== null) {
+ ufilename = netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf';
+ sfilename = netdata.options.paths.stock_config + '/' + f.substring(0, m.index) + '.conf';
+ }
+
+ m = f.match('.node.js' + '$');
+ if(m !== null) {
+ ufilename = netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf';
+ sfilename = netdata.options.paths.stock_config + '/node.d/' + f.substring(0, m.index) + '.conf';
+ }
+
+ try {
+ netdata.debug('loading module\'s ' + module_filename + ' user-config ' + ufilename);
+ return JSON.parse(fs.readFileSync(ufilename, 'utf8'));
+ }
+ catch(e) {
+ netdata.error('Cannot read user-configuration file ' + ufilename + ': ' + e.message + '.');
+ dumpError(e);
+ }
+
+ try {
+ netdata.debug('loading module\'s ' + module_filename + ' stock-config ' + sfilename);
+ return JSON.parse(fs.readFileSync(sfilename, 'utf8'));
+ }
+ catch(e) {
+ netdata.error('Cannot read stock-configuration file ' + sfilename + ': ' + e.message + ', using internal defaults.');
+ dumpError(e);
+ }
+
+ return {};
+}
+
+// internal defaults
+extend(true, netdata.options, {
+ filename: path.basename(__filename),
+
+ update_every: NETDATA_UPDATE_EVERY,
+
+ paths: {
+ plugins: NETDATA_PLUGINS_DIR,
+ config: NETDATA_USER_CONFIG_DIR,
+ stock_config: NETDATA_STOCK_CONFIG_DIR,
+ modules: []
+ },
+
+ modules_enable_autodetect: true,
+ modules_enable_all: true,
+ modules: {}
+});
+
+// load configuration file
+netdata.options_loaded = netdata_read_json_config_file(__filename);
+extend(true, netdata.options, netdata.options_loaded);
+
+if(!netdata.options.paths.plugins)
+ netdata.options.paths.plugins = NETDATA_PLUGINS_DIR;
+
+if(!netdata.options.paths.config)
+ netdata.options.paths.config = NETDATA_USER_CONFIG_DIR;
+
+if(!netdata.options.paths.stock_config)
+ netdata.options.paths.stock_config = NETDATA_STOCK_CONFIG_DIR;
+
+// console.error('merged netdata object:');
+// console.error(util.inspect(netdata, {depth: 10}));
+
+
+// apply module paths to node.js process
+function applyModulePaths() {
+ var len = netdata.options.paths.modules.length;
+ while(len--)
+ process.mainModule.paths.unshift(netdata.options.paths.modules[len]);
+}
+applyModulePaths();
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// tracing
+
+function dumpError(err) {
+ if (typeof err === 'object') {
+ if (err.stack) {
+ netdata.debug(err.stack);
+ }
+ }
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// get command line arguments
+{
+ var found_myself = false;
+ var found_number = false;
+ var found_modules = false;
+ process.argv.forEach(function (val, index, array) {
+ netdata.debug('PARAM: ' + val);
+
+ if(!found_myself) {
+ if(val === __filename)
+ found_myself = true;
+ }
+ else {
+ switch(val) {
+ case 'debug':
+ netdata.options.DEBUG = true;
+ netdata.debug('DEBUG enabled');
+ break;
+
+ default:
+ if(found_number === true) {
+ if(found_modules === false) {
+ for(var i in netdata.options.modules)
+ netdata.options.modules[i].enabled = false;
+ }
+
+ if(typeof netdata.options.modules[val] === 'undefined')
+ netdata.options.modules[val] = {};
+
+ netdata.options.modules[val].enabled = true;
+ netdata.options.modules_enable_all = false;
+ netdata.debug('enabled module ' + val);
+ }
+ else {
+ try {
+ var x = parseInt(val);
+ if(x > 0) {
+ netdata.options.update_every = x;
+ if(netdata.options.update_every < NETDATA_UPDATE_EVERY) {
+ netdata.options.update_every = NETDATA_UPDATE_EVERY;
+ netdata.debug('Update frequency ' + x + 's is too low');
+ }
+
+ found_number = true;
+ netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds');
+ }
+ else netdata.error('Ignoring parameter: ' + val);
+ }
+ catch(e) {
+ netdata.error('Cannot get value of parameter: ' + val);
+ dumpError(e);
+ }
+ }
+ break;
+ }
+ }
+ });
+}
+
+if(netdata.options.update_every < 1) {
+ netdata.debug('Adjusting update frequency to 1 second');
+ netdata.options.update_every = 1;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// find modules
+
+function findModules() {
+ var found = 0;
+
+ var files = fs.readdirSync(NODE_D_DIR);
+ var len = files.length;
+ while(len--) {
+ var m = files[len].match('.node.js' + '$');
+ if(m !== null) {
+ var n = files[len].substring(0, m.index);
+
+ if(typeof(netdata.options.modules[n]) === 'undefined')
+ netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all };
+
+ if(netdata.options.modules[n].enabled === true) {
+ netdata.options.modules[n].name = n;
+ netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len];
+ netdata.options.modules[n].loaded = false;
+
+ // load the module
+ try {
+ netdata.debug('loading module ' + netdata.options.modules[n].filename);
+ netdata.options.modules[n].module = require(netdata.options.modules[n].filename);
+ netdata.options.modules[n].module.name = n;
+ netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename);
+ }
+ catch(e) {
+ netdata.options.modules[n].enabled = false;
+ netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e);
+ dumpError(e);
+ continue;
+ }
+
+ // load its configuration
+ var c = {
+ enable_autodetect: netdata.options.modules_enable_autodetect,
+ update_every: netdata.options.update_every
+ };
+
+ var c2 = netdata_read_json_config_file(files[len]);
+ extend(true, c, c2);
+
+ // call module auto-detection / configuration
+ try {
+ netdata.modules_configuring++;
+ netdata.debug('Configuring module ' + netdata.options.modules[n].name);
+ var serv = netdata.configure(netdata.options.modules[n].module, c, function() {
+ netdata.debug('Configured module ' + netdata.options.modules[n].name);
+ netdata.modules_configuring--;
+ });
+
+ netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.');
+ }
+ catch(e) {
+ netdata.modules_configuring--;
+ netdata.options.modules[n].enabled = false;
+ netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.');
+ dumpError(e);
+ continue;
+ }
+
+ netdata.options.modules[n].loaded = true;
+ found++;
+ }
+ }
+ }
+
+ // netdata.debug(netdata.options.modules);
+ return found;
+}
+
+if(findModules() === 0) {
+ netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR);
+ netdata.disableNodePlugin();
+ process.exit(1);
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// start
+
+function start_when_configuring_ends() {
+ if(netdata.modules_configuring > 0) {
+ netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring);
+ setTimeout(start_when_configuring_ends, 500);
+ return;
+ }
+
+ netdata.modules_configuring = 0;
+ netdata.start();
+}
+start_when_configuring_ends();
+
+//netdata.debug('netdata object:')
+//netdata.debug(netdata);
diff --git a/collectors/node.d.plugin/sma_webbox/README.md b/collectors/node.d.plugin/sma_webbox/README.md
index cff7645d..29697562 100644
--- a/collectors/node.d.plugin/sma_webbox/README.md
+++ b/collectors/node.d.plugin/sma_webbox/README.md
@@ -1,29 +1,28 @@
-
-# SMA Sunny Webbox
-
-[SMA Sunny Webbox](http://files.sma.de/dl/4253/WEBBOX-DUS131916W.pdf)
-
-Example netdata configuration for node.d/sma_webbox.conf
-
-The module supports any number of name servers, like this:
-
-```json
-{
- "enable_autodetect": false,
- "update_every": 5,
- "servers": [
- {
- "name": "plant1",
- "hostname": "10.0.1.1",
- "update_every": 10
- },
- {
- "name": "plant2",
- "hostname": "10.0.2.1",
- "update_every": 15
- }
- ]
-}
-```
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fsma_webbox%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+# SMA Sunny Webbox
+
+[SMA Sunny Webbox](http://files.sma.de/dl/4253/WEBBOX-DUS131916W.pdf)
+
+Example Netdata configuration for node.d/sma_webbox.conf
+
+The module supports any number of name servers, like this:
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "plant1",
+ "hostname": "10.0.1.1",
+ "update_every": 10
+ },
+ {
+ "name": "plant2",
+ "hostname": "10.0.2.1",
+ "update_every": 15
+ }
+ ]
+}
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fsma_webbox%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/node.d.plugin/snmp/README.md b/collectors/node.d.plugin/snmp/README.md
index 832108b9..8fd66edb 100644
--- a/collectors/node.d.plugin/snmp/README.md
+++ b/collectors/node.d.plugin/snmp/README.md
@@ -1,14 +1,14 @@
# SNMP Data Collector
-Using this collector, netdata can collect data from any SNMP device.
+Using this collector, Netdata can collect data from any SNMP device.
This collector supports:
-- any number of SNMP devices
-- each SNMP device can be used to collect data for any number of charts
-- each chart may have any number of dimensions
-- each SNMP device may have a different update frequency
-- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches).
+- any number of SNMP devices
+- each SNMP device can be used to collect data for any number of charts
+- each chart may have any number of dimensions
+- each SNMP device may have a different update frequency
+- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches).
## Configuration
@@ -16,10 +16,10 @@ You will need to create the file `/etc/netdata/node.d/snmp.conf` with data like
In this example:
- - the SNMP device is `10.11.12.8`.
- - the SNMP community is `public`.
- - we will update the values every 10 seconds (`update_every: 10` under the server `10.11.12.8`).
- - we define 2 charts `snmp_switch.bandwidth_port1` and `snmp_switch.bandwidth_port2`, each having 2 dimensions: `in` and `out`.
+- the SNMP device is `10.11.12.8`.
+- the SNMP community is `public`.
+- we will update the values every 10 seconds (`update_every: 10` under the server `10.11.12.8`).
+- we define 2 charts `snmp_switch.bandwidth_port1` and `snmp_switch.bandwidth_port2`, each having 2 dimensions: `in` and `out`.
```json
{
@@ -88,7 +88,7 @@ In this example:
`update_every` is the update frequency for each server, in seconds.
-`max_request_size` limits the maximum number of OIDs that will be requested in a single call. The default is 50. Lower this number of you get `TooBig` errors in netdata error.log.
+`max_request_size` limits the maximum number of OIDs that will be requested in a single call. The default is 50. Lower this number of you get `TooBig` errors in Netdata's `error.log`.
`family` sets the name of the submenu of the dashboard each chart will appear under.
@@ -100,7 +100,6 @@ The SNMP plugin supports Counter64 metrics with the only limitation that the `of
<br>
If you need to define many charts using incremental OIDs, you can use something like this:
-
```json
{
"enable_autodetect": false,
@@ -146,19 +145,18 @@ This is like the previous, but the option `multiply_range` given, will multiply
Each of the 24 new charts will have its id (1-24) appended at:
-1. its chart unique id, i.e. `snmp_switch.bandwidth_port1` to `snmp_switch.bandwidth_port24`
-2. its `title`, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`
-3. its `oid` (for all dimensions), i.e. dimension `in` will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`
-3. its priority (which will be incremented for each chart so that the charts will appear on the dashboard in this order)
-
+1. its chart unique id, i.e. `snmp_switch.bandwidth_port1` to `snmp_switch.bandwidth_port24`
+2. its `title`, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`
+3. its `oid` (for all dimensions), i.e. dimension `in` will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`
+4. its priority (which will be incremented for each chart so that the charts will appear on the dashboard in this order)
The `options` given for each server, are:
- - `timeout`, the time to wait for the SNMP device to respond. The default is 5000 ms.
- - `version`, the SNMP version to use. `0` is Version 1, `1` is Version 2c. The default is Version 1 (`0`).
- - `transport`, the default is `udp4`.
- - `port`, the port of the SNMP device to connect to. The default is `161`.
- - `retries`, the number of attempts to make to fetch the data. The default is `1`.
+- `timeout`, the time to wait for the SNMP device to respond. The default is 5000 ms.
+- `version`, the SNMP version to use. `0` is Version 1, `1` is Version 2c. The default is Version 1 (`0`).
+- `transport`, the default is `udp4`.
+- `port`, the port of the SNMP device to connect to. The default is `161`.
+- `retries`, the number of attempts to make to fetch the data. The default is `1`.
## Retrieving names from snmp
@@ -168,7 +166,6 @@ You can set a dimension name to a value retrieved from SNMP, by adding `oidname`
Both of the above will participate in `multiply_range`.
-
## Testing the configuration
To test it, you can run:
@@ -177,9 +174,9 @@ To test it, you can run:
/usr/libexec/netdata/plugins.d/node.d.plugin 1 snmp
```
-The above will run it on your console and you will be able to see what netdata sees, but also errors. You can get a very detailed output by appending `debug` to the command line.
+The above will run it on your console and you will be able to see what Netdata sees, but also errors. You can get a very detailed output by appending `debug` to the command line.
-If it works, restart netdata to activate the snmp collector and refresh the dashboard (if your SNMP device responds with a delay, you may need to refresh the dashboard in a few seconds).
+If it works, restart Netdata to activate the snmp collector and refresh the dashboard (if your SNMP device responds with a delay, you may need to refresh the dashboard in a few seconds).
## Data collection speed
@@ -195,11 +192,11 @@ Use `snmpwalk`, like this:
snmpwalk -t 20 -v 1 -O fn -c public 10.11.12.8
```
-- `-t 20` is the timeout in seconds
-- `-v 1` is the SNMP version
-- `-O fn` will display full OIDs in numeric format (you may want to run it also without this option to see human readable output of OIDs)
-- `-c public` is the SNMP community
-- `10.11.12.8` is the SNMP device
+- `-t 20` is the timeout in seconds
+- `-v 1` is the SNMP version
+- `-O fn` will display full OIDs in numeric format (you may want to run it also without this option to see human readable output of OIDs)
+- `-c public` is the SNMP community
+- `10.11.12.8` is the SNMP device
Keep in mind that `snmpwalk` outputs the OIDs with a dot in front them. You should remove this dot when adding OIDs to the configuration file of this collector.
@@ -207,10 +204,10 @@ Keep in mind that `snmpwalk` outputs the OIDs with a dot in front them. You shou
This is what I use for my Linksys SRW2024P. It creates:
-1. A chart for power consumption (it is a PoE switch)
-2. Two charts for packets received (total packets received and packets received with errors)
-3. One chart for packets output
-4. 24 charts, one for each port of the switch. It also appends the port names, as defined at the switch, to the chart titles.
+1. A chart for power consumption (it is a PoE switch)
+2. Two charts for packets received (total packets received and packets received with errors)
+3. One chart for packets output
+4. 24 charts, one for each port of the switch. It also appends the port names, as defined at the switch, to the chart titles.
This switch also reports various other metrics, like snmp, packets per port, etc. Unfortunately it does not report CPU utilization or backplane utilization.
@@ -364,4 +361,4 @@ This switch has a very slow SNMP processors. To respond, it needs about 8 second
}
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fsnmp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fsnmp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/node.d.plugin/stiebeleltron/README.md b/collectors/node.d.plugin/stiebeleltron/README.md
index 4aa5a43e..80adc86b 100644
--- a/collectors/node.d.plugin/stiebeleltron/README.md
+++ b/collectors/node.d.plugin/stiebeleltron/README.md
@@ -3,48 +3,56 @@
This module collects metrics from the configured heat pump and hot water installation from Stiebel Eltron ISG web.
**Requirements**
- * Configuration file `stiebeleltron.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/stiebeleltron.conf`)
- * Stiebel Eltron ISG web with network access (http), without password login
+
+- Configuration file `stiebeleltron.conf` in the node.d Netdata config dir (default: `/etc/netdata/node.d/stiebeleltron.conf`)
+- Stiebel Eltron ISG web with network access (http), without password login
The charts are configurable, however, the provided default configuration collects the following:
-1. **General**
- * Outside temperature in C
- * Condenser temperature in C
- * Heating circuit pressure in bar
- * Flow rate in l/min
- * Output of water and heat pumps in %
+1. **General**
+
+ - Outside temperature in C
+ - Condenser temperature in C
+ - Heating circuit pressure in bar
+ - Flow rate in l/min
+ - Output of water and heat pumps in %
+
+2. **Heating**
+
+ - Heat circuit 1 temperature in C (set/actual)
+ - Heat circuit 2 temperature in C (set/actual)
+ - Flow temperature in C (set/actual)
+ - Buffer temperature in C (set/actual)
+ - Pre-flow temperature in C
+
+3. **Hot Water**
+
+ - Hot water temperature in C (set/actual)
+
+4. **Room Temperature**
-2. **Heating**
- * Heat circuit 1 temperature in C (set/actual)
- * Heat circuit 2 temperature in C (set/actual)
- * Flow temperature in C (set/actual)
- * Buffer temperature in C (set/actual)
- * Pre-flow temperature in C
+ - Heat circuit 1 room temperature in C (set/actual)
+ - Heat circuit 2 room temperature in C (set/actual)
-3. **Hot Water**
- * Hot water temperature in C (set/actual)
+5. **Eletric Reheating**
-4. **Room Temperature**
- * Heat circuit 1 room temperature in C (set/actual)
- * Heat circuit 2 room temperature in C (set/actual)
+ - Dual Mode Reheating temperature in C (hot water/heating)
-5. **Eletric Reheating**
- * Dual Mode Reheating temperature in C (hot water/heating)
+6. **Process Data**
-6. **Process Data**
- * Remaining compressor rest time in s
+ - Remaining compressor rest time in s
-7. **Runtime**
- * Compressor runtime hours (hot water/heating)
- * Reheating runtime hours (reheating 1/reheating 2)
+7. **Runtime**
-8. **Energy**
- * Compressor today in kWh (hot water/heating)
- * Compressor Total in kWh (hot water/heating)
-
-
-### configuration
+ - Compressor runtime hours (hot water/heating)
+ - Reheating runtime hours (reheating 1/reheating 2)
+
+8. **Energy**
+
+ - Compressor today in kWh (hot water/heating)
+ - Compressor Total in kWh (hot water/heating)
+
+## configuration
If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `10`.
@@ -57,15 +65,17 @@ Original author: BrainDoctor (github)
The module supports any metrics that are parseable with RegEx. There is no API that gives direct access to the values (AFAIK), so the "workaround" is to parse the HTML output of the ISG.
### Testing
+
This plugin has been tested within the following environment:
- * ISG version: 8.5.6
- * MFG version: 12
- * Controller version: 9
- * July (summer time, not much activity)
- * Interface language: English
- * login- and password-less ISG web access (without HTTPS it's useless anyway)
- * Heatpump model: WPL 25 I-2
- * Hot water boiler model: 820 WT 1
+
+- ISG version: 8.5.6
+- MFG version: 12
+- Controller version: 9
+- July (summer time, not much activity)
+- Interface language: English
+- login- and password-less ISG web access (without HTTPS it's useless anyway)
+- Heatpump model: WPL 25 I-2
+- Hot water boiler model: 820 WT 1
So, if the language is set to english, copy the following configuration into `/etc/netdata/node.d/stiebeleltron.conf` and change the `url`s.
@@ -73,13 +83,15 @@ In my case, the ISG is relatively slow with responding (at least 1s, but also up
### How to update the config
-* The dimensions support variable digits, the default is `1`. Most of the values printed by ISG are using 1 digit, some use 2.
-* The dimensions also support the `multiplier` and `divisor` attributes, however the divisor gets overridden by `digits`, if specified. Default is `1`.
-* The test string for the regex is always the whole HTML output from the url. For each parameter you need to have a regular expression that extracts the value from the HTML source in the first capture group.
- Recommended: [regexr.com](https://regexr.com/) for testing and matching, [freeformatter.com](https://www.freeformatter.com/json-escape.html) for escaping the newly created regex for the JSON config.
+- The dimensions support variable digits, the default is `1`. Most of the values printed by ISG are using 1 digit, some use 2.
+- The dimensions also support the `multiplier` and `divisor` attributes, however the divisor gets overridden by `digits`, if specified. Default is `1`.
+- The test string for the regex is always the whole HTML output from the url. For each parameter you need to have a regular expression that extracts the value from the HTML source in the first capture group.
+ Recommended: [regexr.com](https://regexr.com/) for testing and matching, [freeformatter.com](https://www.freeformatter.com/json-escape.html) for escaping the newly created regex for the JSON config.
The charts are being generated using the configuration below. So if your installation is in another language or has other metrics, just adapt the structure or regexes.
+
### Configuration template
+
```json
{
"enable_autodetect": false,
@@ -504,4 +516,4 @@ The charts are being generated using the configuration below. So if your install
}
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fstiebeleltron%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fstiebeleltron%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/perf.plugin/Makefile.in b/collectors/perf.plugin/Makefile.in
new file mode 100644
index 00000000..1e75ca1c
--- /dev/null
+++ b/collectors/perf.plugin/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/perf.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/perf.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/perf.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/perf.plugin/README.md b/collectors/perf.plugin/README.md
index ce696b06..96405a84 100644
--- a/collectors/perf.plugin/README.md
+++ b/collectors/perf.plugin/README.md
@@ -15,6 +15,7 @@ event for every CPU core needs a separate file descriptor to be opened.
The plugin provides statistics for general hardware and software performance monitoring events:
Hardware events:
+
1. CPU cycles
2. Instructions
3. Branch instructions
@@ -23,11 +24,13 @@ Hardware events:
6. Stalled frontend and backend cycles
Software events:
+
1. CPU migrations
2. Alignment faults
3. Emulation faults
Hardware cache events:
+
1. L1D cache operations
2. L1D prefetch cache operations
3. L1I cache operations
@@ -69,4 +72,4 @@ sudo /usr/libexec/netdata/plugins.d/perf.plugin 1 all debug
You will get verbose output on what the plugin does.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fperf.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fperf.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/plugins.d/Makefile.in b/collectors/plugins.d/Makefile.in
new file mode 100644
index 00000000..966535ae
--- /dev/null
+++ b/collectors/plugins.d/Makefile.in
@@ -0,0 +1,697 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/plugins.d
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/plugins.d/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/plugins.d/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/plugins.d/README.md b/collectors/plugins.d/README.md
index 9134d516..225dfcc9 100644
--- a/collectors/plugins.d/README.md
+++ b/collectors/plugins.d/README.md
@@ -1,61 +1,62 @@
# External plugins overview
-`plugins.d` is the netdata internal plugin that collects metrics
-from external processes, thus allowing netdata to use **external plugins**.
+`plugins.d` is the Netdata internal plugin that collects metrics
+from external processes, thus allowing Netdata to use **external plugins**.
## Provided External Plugins
-plugin|language|O/S|description
-:---:|:---:|:---:|:---
-[apps.plugin](../apps.plugin/)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.
-[charts.d.plugin](../charts.d.plugin/)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+.
-[cups.plugin](../cups.plugin/)|`C`|all|monitors **CUPS**
-[fping.plugin](../fping.plugin/)|`C`|all|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.
-[ioping.plugin](../ioping.plugin/)|`C`|all|measures disk latency.
-[freeipmi.plugin](../freeipmi.plugin/)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers.
-[nfacct.plugin](../nfacct.plugin/)|`C`|linux|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`.
-[xenstat.plugin](../xenstat.plugin/)|`C`|linux|collects XenServer and XCP-ng metrics using `lxenstat`.
-[perf.plugin](../perf.plugin/)|`C`|linux|collects CPU performance metrics using performance monitoring units (PMU).
-[node.d.plugin](../node.d.plugin/)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`.
-[python.d.plugin](../python.d.plugin/)|`python`|all|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).
-
-Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator).
+|plugin|language|O/S|description|
+|:----:|:------:|:-:|:----------|
+|[apps.plugin](../apps.plugin/)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.|
+|[charts.d.plugin](../charts.d.plugin/)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+.|
+|[cups.plugin](../cups.plugin/)|`C`|all|monitors **CUPS**|
+|[fping.plugin](../fping.plugin/)|`C`|all|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.|
+|[ioping.plugin](../ioping.plugin/)|`C`|all|measures disk latency.|
+|[freeipmi.plugin](../freeipmi.plugin/)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers.|
+|[nfacct.plugin](../nfacct.plugin/)|`C`|linux|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`.|
+|[xenstat.plugin](../xenstat.plugin/)|`C`|linux|collects XenServer and XCP-ng metrics using `lxenstat`.|
+|[perf.plugin](../perf.plugin/)|`C`|linux|collects CPU performance metrics using performance monitoring units (PMU).|
+|[node.d.plugin](../node.d.plugin/)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`.|
+|[python.d.plugin](../python.d.plugin/)|`python`|all|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).|
+
+Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native Netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator).
Each of these modular plugins has each own methods for defining modules. Please check the examples and their documentation.
## Motivation
-This plugin allows netdata to use **external plugins** for data collection:
+This plugin allows Netdata to use **external plugins** for data collection:
-1. external data collection plugins may be written in any computer language.
+1. external data collection plugins may be written in any computer language.
-2. external data collection plugins may use O/S capabilities or `setuid` to
- run with escalated privileges (compared to the netdata daemon).
- The communication between the external plugin and netdata is unidirectional
- (from the plugin to netdata), so that netdata cannot manipulate an external
- plugin running with escalated privileges.
+2. external data collection plugins may use O/S capabilities or `setuid` to
+ run with escalated privileges (compared to the `netdata` daemon).
+ The communication between the external plugin and Netdata is unidirectional
+ (from the plugin to Netdata), so that Netdata cannot manipulate an external
+ plugin running with escalated privileges.
## Operation
Each of the external plugins is expected to run forever.
Netdata will start it when it starts and stop it when it exits.
-If the external plugin exits or crashes, netdata will log an error.
-If the external plugin exits or crashes without pushing metrics to netdata, netdata will not start it again.
-- Plugins that exit with any value other than zero, will be disabled. Plugins that exit with zero, will be restarted after some time.
-- Plugins may also be disabled by netdata if they output things that netdata does not understand.
+If the external plugin exits or crashes, Netdata will log an error.
+If the external plugin exits or crashes without pushing metrics to Netdata, Netdata will not start it again.
-The `stdout` of external plugins is connected to netdata to receive metrics,
+- Plugins that exit with any value other than zero, will be disabled. Plugins that exit with zero, will be restarted after some time.
+- Plugins may also be disabled by Netdata if they output things that Netdata does not understand.
+
+The `stdout` of external plugins is connected to Netdata to receive metrics,
with the API defined below.
-The `stderr` of external plugins is connected to netdata `error.log`.
+The `stderr` of external plugins is connected to Netdata's `error.log`.
Plugins can create any number of charts with any number of dimensions each. Each chart can have its own characteristics independently of the others generated by the same plugin. For example, one chart may have an update frequency of 1 second, another may have 5 seconds and a third may have 10 seconds.
## Configuration
-Netdata will supply the environment variables `NETDATA_USER_CONFIG_DIR` (for user supplied) and `NETDATA_STOCK_CONFIG_DIR` (for netdata supplied) configuration files to identify the directory where configuration files are stored. It is up to the plugin to read the configuration it needs.
+Netdata will supply the environment variables `NETDATA_USER_CONFIG_DIR` (for user supplied) and `NETDATA_STOCK_CONFIG_DIR` (for Netdata supplied) configuration files to identify the directory where configuration files are stored. It is up to the plugin to read the configuration it needs.
-The `netdata.conf` section [plugins] section contains a list of all the plugins found at the system where netdata runs, with a boolean setting to enable them or not.
+The `netdata.conf` section [plugins] section contains a list of all the plugins found at the system where Netdata runs, with a boolean setting to enable them or not.
Example:
@@ -90,9 +91,8 @@ For example, for `apps.plugin` the following section is available:
# command options =
```
-- `update every` controls the granularity of the external plugin.
-- `command options` allows giving additional command line options to the plugin.
-
+- `update every` controls the granularity of the external plugin.
+- `command options` allows giving additional command line options to the plugin.
Netdata will provide to the external plugins the environment variable `NETDATA_UPDATE_EVERY`, in seconds (the default is 1). This is the **minimum update frequency** for all charts. A plugin that is updating values more frequently than this, is just wasting resources.
@@ -100,27 +100,27 @@ Netdata will call the plugin with just one command line parameter: the number of
Other than the above, the plugin configuration is up to the plugin.
-Keep in mind, that the user may use netdata configuration to overwrite chart and dimension parameters. This is transparent to the plugin.
+Keep in mind, that the user may use Netdata configuration to overwrite chart and dimension parameters. This is transparent to the plugin.
### Autoconfiguration
Plugins should attempt to autoconfigure themselves when possible.
-For example, if your plugin wants to monitor `squid`, you can search for it on port `3128` or `8080`. If any succeeds, you can proceed. If it fails you can output an error (on stderr) saying that you cannot find `squid` running and giving instructions about the plugin configuration. Then you can stop (exit with non-zero value), so that netdata will not attempt to start the plugin again.
+For example, if your plugin wants to monitor `squid`, you can search for it on port `3128` or `8080`. If any succeeds, you can proceed. If it fails you can output an error (on stderr) saying that you cannot find `squid` running and giving instructions about the plugin configuration. Then you can stop (exit with non-zero value), so that Netdata will not attempt to start the plugin again.
## External Plugins API
-Any program that can print a few values to its standard output can become a netdata external plugin.
+Any program that can print a few values to its standard output can become a Netdata external plugin.
-There are 7 lines netdata parses. lines starting with:
+Netdata parses 7 lines starting with:
-- `CHART` - create or update a chart
-- `DIMENSION` - add or update a dimension to the chart just created
-- `BEGIN` - initialize data collection for a chart
-- `SET` - set the value of a dimension for the initialized chart
-- `END` - complete data collection for the initialized chart
-- `FLUSH` - ignore the last collected values
-- `DISABLE` - disable this plugin
+- `CHART` - create or update a chart
+- `DIMENSION` - add or update a dimension to the chart just created
+- `BEGIN` - initialize data collection for a chart
+- `SET` - set the value of a dimension for the initialized chart
+- `END` - complete data collection for the initialized chart
+- `FLUSH` - ignore the last collected values
+- `DISABLE` - disable this plugin
a single program can produce any number of charts with any number of dimensions each.
@@ -129,7 +129,7 @@ Charts can be added any time (not just the beginning).
### command line parameters
The plugin **MUST** accept just **one** parameter: **the number of seconds it is
-expected to update the values for its charts**. The value passed by netdata
+expected to update the values for its charts**. The value passed by Netdata
to the plugin is controlled via its configuration file (so there is no need
for the plugin to handle this configuration option).
@@ -142,26 +142,25 @@ every 5 seconds.
There are a few environment variables that are set by `netdata` and are
available for the plugin to use.
-variable|description
-:------:|:----------
-`NETDATA_USER_CONFIG_DIR`|The directory where all netdata related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`).
-`NETDATA_STOCK_CONFIG_DIR`|The directory where all netdata related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`).
-`NETDATA_PLUGINS_DIR`|The directory where all netdata plugins are stored.
-`NETDATA_WEB_DIR`|The directory where the web files of netdata are saved.
-`NETDATA_CACHE_DIR`|The directory where the cache files of netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory.
-`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of netdata.
-`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path.
-`NETDATA_DEBUG_FLAGS`|This is a number (probably in hex starting with `0x`), that enables certain netdata debugging features. Check **[[Tracing Options]]** for more information.
-`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds.
-
+|variable|description|
+|:------:|:----------|
+|`NETDATA_USER_CONFIG_DIR`|The directory where all Netdata-related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`).|
+|`NETDATA_STOCK_CONFIG_DIR`|The directory where all Netdata -related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`).|
+|`NETDATA_PLUGINS_DIR`|The directory where all Netdata plugins are stored.|
+|`NETDATA_WEB_DIR`|The directory where the web files of Netdata are saved.|
+|`NETDATA_CACHE_DIR`|The directory where the cache files of Netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory.|
+|`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of Netdata.|
+|`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path.|
+|`NETDATA_DEBUG_FLAGS`|This is a number (probably in hex starting with `0x`), that enables certain Netdata debugging features. Check **\[[Tracing Options]]** for more information.|
+|`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of Netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds.|
### The output of the plugin
-The plugin should output instructions for netdata to its output (`stdout`). Since this uses pipes, please make sure you flush stdout after every iteration.
+The plugin should output instructions for Netdata to its output (`stdout`). Since this uses pipes, please make sure you flush stdout after every iteration.
#### DISABLE
-`DISABLE` will disable this plugin. This will prevent netdata from restarting the plugin. You can also exit with the value `1` to have the same effect.
+`DISABLE` will disable this plugin. This will prevent Netdata from restarting the plugin. You can also exit with the value `1` to have the same effect.
#### CHART
@@ -169,31 +168,32 @@ The plugin should output instructions for netdata to its output (`stdout`). Sinc
the template is:
-> CHART type.id name title units [family [context [charttype [priority [update_every [options [plugin [module]]]]]]]]
+> CHART type.id name title units \[family \[context \[charttype \[priority \[update_every \[options \[plugin [module]]]]]]]]
where:
- - `type.id`
+
+- `type.id`
uniquely identifies the chart,
this is what will be needed to add values to the chart
the `type` part controls the menu the charts will appear in
- - `name`
+- `name`
is the name that will be presented to the user instead of `id` in `type.id`. This means that only the `id` part of `type.id` is changed. When a name has been given, the chart is index (and can be referred) as both `type.id` and `type.name`. You can set name to `''`, or `null`, or `(null)` to disable it.
- - `title`
+- `title`
the text above the chart
- - `units`
+- `units`
the label of the vertical axis of the chart,
all dimensions added to a chart should have the same units
of measurement
- - `family`
+- `family`
is used to group charts together
(for example all eth0 charts should say: eth0),
@@ -201,36 +201,35 @@ the template is:
this controls the sub-menu on the dashboard
- - `context`
+- `context`
the context is giving the template of the chart. For example, if multiple charts present the same information for a different family, they should have the same `context`
this is used for looking up rendering information for the chart (colors, sizes, informational texts) and also apply alarms to it
- - `charttype`
+- `charttype`
one of `line`, `area` or `stacked`,
if empty or missing, the `line` will be used
- - `priority`
+- `priority`
is the relative priority of the charts as rendered on the web page,
lower numbers make the charts appear before the ones with higher numbers,
if empty or missing, `1000` will be used
- - `update_every`
+- `update_every`
overwrite the update frequency set by the server,
if empty or missing, the user configured value will be used
- - `options`
+- `options`
- a space separated list of options, enclosed in quotes. 4 options are currently supported: `obsolete` to mark a chart as obsolete (netdata will hide it and delete it after some time), `detail` to mark a chart as insignificant (this may be used by dashboards to make the charts smaller, or somehow visualize properly a less important chart), `store_first` to make netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to backends). `CHART` options have been added in netdata v1.7 and the `hidden` option was added in 1.10.
+ a space separated list of options, enclosed in quotes. 4 options are currently supported: `obsolete` to mark a chart as obsolete (Netdata will hide it and delete it after some time), `detail` to mark a chart as insignificant (this may be used by dashboards to make the charts smaller, or somehow visualize properly a less important chart), `store_first` to make Netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to backends). `CHART` options have been added in Netdata v1.7 and the `hidden` option was added in 1.10.
- - `plugin` and `module`
-
- both are just names that are used to let the user identify the plugin and the module that generated the chart. If `plugin` is unset or empty, netdata will automatically set the filename of the plugin that generated the chart. `module` has not default.
+- `plugin` and `module`
+ both are just names that are used to let the user identify the plugin and the module that generated the chart. If `plugin` is unset or empty, Netdata will automatically set the filename of the plugin that generated the chart. `module` has not default.
#### DIMENSION
@@ -238,60 +237,59 @@ the template is:
the template is:
-> DIMENSION id [name [algorithm [multiplier [divisor [options]]]]]
+> DIMENSION id \[name \[algorithm \[multiplier \[divisor [options]]]]]
where:
- - `id`
+- `id`
the `id` of this dimension (it is a text value, not numeric),
this will be needed later to add values to the dimension
We suggest to avoid using `.` in dimension ids. Backends expect metrics to be `.` separated and people will get confused if a dimension id contains a dot.
- - `name`
+- `name`
the name of the dimension as it will appear at the legend of the chart,
if empty or missing the `id` will be used
- - `algorithm`
+- `algorithm`
one of:
- * `absolute`
+ - `absolute`
- the value is to drawn as-is (interpolated to second boundary),
- if `algorithm` is empty, invalid or missing, `absolute` is used
+ the value is to drawn as-is (interpolated to second boundary),
+ if `algorithm` is empty, invalid or missing, `absolute` is used
- * `incremental`
+ - `incremental`
- the value increases over time,
- the difference from the last value is presented in the chart,
- the server interpolates the value and calculates a per second figure
+ the value increases over time,
+ the difference from the last value is presented in the chart,
+ the server interpolates the value and calculates a per second figure
- * `percentage-of-absolute-row`
+ - `percentage-of-absolute-row`
- the % of this value compared to the total of all dimensions
+ the % of this value compared to the total of all dimensions
- * `percentage-of-incremental-row`
+ - `percentage-of-incremental-row`
- the % of this value compared to the incremental total of
- all dimensions
+ the % of this value compared to the incremental total of
+ all dimensions
- - `multiplier`
+- `multiplier`
an integer value to multiply the collected value,
if empty or missing, `1` is used
- - `divisor`
+- `divisor`
an integer value to divide the collected value,
if empty or missing, `1` is used
- - `options`
-
- a space separated list of options, enclosed in quotes. Options supported: `obsolete` to mark a dimension as obsolete (netdata will delete it after some time) and `hidden` to make this dimension hidden, it will take part in the calculations but will not be presented in the chart.
+- `options`
+ a space separated list of options, enclosed in quotes. Options supported: `obsolete` to mark a dimension as obsolete (Netdata will delete it after some time) and `hidden` to make this dimension hidden, it will take part in the calculations but will not be presented in the chart.
#### VARIABLE
@@ -301,8 +299,8 @@ the template is:
Variables support 2 scopes:
-- `GLOBAL` or `HOST` to define the variable at the host level.
-- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alarm templates.
+- `GLOBAL` or `HOST` to define the variable at the host level.
+- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. Netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alarm templates.
The position of the `VARIABLE` line, sets its default scope (in case you do not specify a scope). So, defining a `VARIABLE` before any `CHART`, or between `END` and `BEGIN` (outside any chart), sets `GLOBAL` scope, while defining a `VARIABLE` just after a `CHART` or a `DIMENSION`, or within the `BEGIN` - `END` block of a chart, sets `LOCAL` scope.
@@ -310,9 +308,9 @@ These variables can be set and updated at any point.
Variable names should use alphanumeric characters, the `.` and the `_`.
-The `value` is floating point (netdata used `long double`).
+The `value` is floating point (Netdata used `long double`).
-Variables are transferred to upstream netdata servers (streaming and database replication).
+Variables are transferred to upstream Netdata servers (streaming and database replication).
## Data collection
@@ -320,31 +318,31 @@ data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines
> BEGIN type.id [microseconds]
- - `type.id`
+- `type.id`
is the unique identification of the chart (as given in `CHART`)
- - `microseconds`
+- `microseconds`
is the number of microseconds since the last update of the chart. It is optional.
Under heavy system load, the system may have some latency transferring
- data from the plugins to netdata via the pipe. This number improves
+ data from the plugins to Netdata via the pipe. This number improves
accuracy significantly, since the plugin is able to calculate the
- duration between its iterations better than netdata.
+ duration between its iterations better than Netdata.
The first time the plugin is started, no microseconds should be given
- to netdata.
+ to Netdata.
> SET id = value
- - `id`
+- `id`
- is the unique identification of the dimension (of the chart just began)
+ is the unique identification of the dimension (of the chart just began)
- - `value`
+- `value`
- is the collected value, only integer values are collected. If you want to push fractional values, multiply this value by 100 or 1000 and set the `DIMENSION` divider to 1000.
+ is the collected value, only integer values are collected. If you want to push fractional values, multiply this value by 100 or 1000 and set the `DIMENSION` divider to 1000.
> END
@@ -360,56 +358,56 @@ If more charts need to be updated, each chart should have its own
`BEGIN` -> `SET` -> `END` block.
If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it,
-it can issue a `FLUSH`. The `FLUSH` command will instruct netdata to ignore
+it can issue a `FLUSH`. The `FLUSH` command will instruct Netdata to ignore
all the values collected since the last `BEGIN` command.
If a plugin does not behave properly (outputs invalid lines, or does not
-follow these guidelines), will be disabled by netdata.
+follow these guidelines), will be disabled by Netdata.
### collected values
-netdata will collect any **signed** value in the 64bit range:
+Netdata will collect any **signed** value in the 64bit range:
`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807`
If a value is not collected, leave it empty, like this:
-`SET id = `
+`SET id =`
or do not output the line at all.
## Modular Plugins
-1. **python**, use `python.d.plugin`, there are many examples in the [python.d directory](../python.d.plugin/)
+1. **python**, use `python.d.plugin`, there are many examples in the [python.d directory](../python.d.plugin/)
- python is ideal for netdata plugins. It is a simple, yet powerful way to collect data, it has a very small memory footprint, although it is not the most CPU efficient way to do it.
+ python is ideal for Netdata plugins. It is a simple, yet powerful way to collect data, it has a very small memory footprint, although it is not the most CPU efficient way to do it.
-2. **node.js**, use `node.d.plugin`, there are a few examples in the [node.d directory](../node.d.plugin/)
+2. **node.js**, use `node.d.plugin`, there are a few examples in the [node.d directory](../node.d.plugin/)
- node.js is the fastest scripting language for collecting data. If your plugin needs to do a lot of work, compute values, etc, node.js is probably the best choice before moving to compiled code. Keep in mind though that node.js is not memory efficient; it will probably need more RAM compared to python.
+ node.js is the fastest scripting language for collecting data. If your plugin needs to do a lot of work, compute values, etc, node.js is probably the best choice before moving to compiled code. Keep in mind though that node.js is not memory efficient; it will probably need more RAM compared to python.
-3. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d directory](../charts.d.plugin/)
+3. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d directory](../charts.d.plugin/)
- BASH is the simplest scripting language for collecting values. It is the less efficient though in terms of CPU resources. You can use it to collect data quickly, but extensive use of it might use a lot of system resources.
+ BASH is the simplest scripting language for collecting values. It is the less efficient though in terms of CPU resources. You can use it to collect data quickly, but extensive use of it might use a lot of system resources.
-4. **C**
+4. **C**
- Of course, C is the most efficient way of collecting data. This is why netdata itself is written in C.
+ Of course, C is the most efficient way of collecting data. This is why Netdata itself is written in C.
## Writing Plugins Properly
There are a few rules for writing plugins properly:
-1. Respect system resources
+1. Respect system resources
- Pay special attention to efficiency:
+ Pay special attention to efficiency:
- - Initialize everything once, at the beginning. Initialization is not an expensive operation. Your plugin will most probably be started once and run forever. So, do whatever heavy operation is needed at the beginning, just once.
- - Do the absolutely minimum while iterating to collect values repeatedly.
- - If you need to connect to another server to collect values, avoid re-connects if possible. Connect just once, with keep-alive (for HTTP) enabled and collect values using the same connection.
- - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation while iterating to collect values.
- - Avoid running external commands when possible. If you are writing shell scripts avoid especially pipes (each pipe is another fork, a very expensive operation).
+ - Initialize everything once, at the beginning. Initialization is not an expensive operation. Your plugin will most probably be started once and run forever. So, do whatever heavy operation is needed at the beginning, just once.
+ - Do the absolutely minimum while iterating to collect values repeatedly.
+ - If you need to connect to another server to collect values, avoid re-connects if possible. Connect just once, with keep-alive (for HTTP) enabled and collect values using the same connection.
+ - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation while iterating to collect values.
+ - Avoid running external commands when possible. If you are writing shell scripts avoid especially pipes (each pipe is another fork, a very expensive operation).
-2. The best way to iterate at a constant pace is this pseudo code:
+2. The best way to iterate at a constant pace is this pseudo code:
```js
var update_every = argv[1] * 1000; /* seconds * 1000 = milliseconds */
@@ -436,7 +434,7 @@ There are a few rules for writing plugins properly:
/*
* find the time of the next loop
* this makes sure we are always aligned
- * with the netdata daemon
+ * with the Netdata daemon
*/
next_run = now - (now % update_every) + update_every;
@@ -461,7 +459,7 @@ There are a few rules for writing plugins properly:
/* do your magic here to collect values */
collectValues();
- /* send the collected data to netdata */
+ /* send the collected data to Netdata */
printValues(dt_since_last_run); /* print BEGIN, SET, END statements */
}
```
@@ -470,9 +468,8 @@ There are a few rules for writing plugins properly:
Netdata interpolates values to second boundaries, so even if your plugin is not perfectly aligned it does not matter. Netdata will find out. When your plugin works in increments of `update_every`, there will be no gaps in the charts due to the possible cumulative micro-delays in data collection. Gaps will only appear if the data collection is really delayed.
-3. If you are not sure of memory leaks, exit every one hour. Netdata will re-start your process.
-
-4. If possible, try to autodetect if your plugin should be enabled, without any configuration.
+3. If you are not sure of memory leaks, exit every one hour. Netdata will re-start your process.
+4. If possible, try to autodetect if your plugin should be enabled, without any configuration.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fplugins.d%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fplugins.d%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/plugins.d/plugins_d.c b/collectors/plugins.d/plugins_d.c
index aea2704a..85b670df 100644
--- a/collectors/plugins.d/plugins_d.c
+++ b/collectors/plugins.d/plugins_d.c
@@ -249,7 +249,7 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int
#ifdef ENABLE_HTTPS
int bytesleft = 0;
char tmpbuffer[PLUGINSD_LINE_MAX];
- char *readfrom;
+ char *readfrom = NULL;
#endif
char *r = NULL;
while(!ferror(fp)) {
diff --git a/collectors/proc.plugin/Makefile.in b/collectors/proc.plugin/Makefile.in
new file mode 100644
index 00000000..563e50ee
--- /dev/null
+++ b/collectors/proc.plugin/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/proc.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/proc.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/proc.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md
index 9513877d..7e2aa109 100644
--- a/collectors/proc.plugin/README.md
+++ b/collectors/proc.plugin/README.md
@@ -1,31 +1,30 @@
# proc.plugin
- - `/proc/net/dev` (all network interfaces for all their values)
- - `/proc/diskstats` (all disks for all their values)
- - `/proc/mdstat` (status of RAID arrays)
- - `/proc/net/snmp` (total IPv4, TCP and UDP usage)
- - `/proc/net/snmp6` (total IPv6 usage)
- - `/proc/net/netstat` (more IPv4 usage)
- - `/proc/net/stat/nf_conntrack` (connection tracking performance)
- - `/proc/net/stat/synproxy` (synproxy performance)
- - `/proc/net/ip_vs/stats` (IPVS connection statistics)
- - `/proc/stat` (CPU utilization and attributes)
- - `/proc/meminfo` (memory information)
- - `/proc/vmstat` (system performance)
- - `/proc/net/rpc/nfsd` (NFS server statistics for both v3 and v4 NFS servers)
- - `/sys/fs/cgroup` (Control Groups - Linux Containers)
- - `/proc/self/mountinfo` (mount points)
- - `/proc/interrupts` (total and per core hardware interrupts)
- - `/proc/softirqs` (total and per core software interrupts)
- - `/proc/loadavg` (system load and total processes running)
- - `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography)
- - `/sys/class/power_supply` (power supply properties)
- - `ipc` (IPC semaphores and message queues)
- - `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`).
- - `netdata` (internal netdata resources utilization)
-
-
----
+- `/proc/net/dev` (all network interfaces for all their values)
+- `/proc/diskstats` (all disks for all their values)
+- `/proc/mdstat` (status of RAID arrays)
+- `/proc/net/snmp` (total IPv4, TCP and UDP usage)
+- `/proc/net/snmp6` (total IPv6 usage)
+- `/proc/net/netstat` (more IPv4 usage)
+- `/proc/net/stat/nf_conntrack` (connection tracking performance)
+- `/proc/net/stat/synproxy` (synproxy performance)
+- `/proc/net/ip_vs/stats` (IPVS connection statistics)
+- `/proc/stat` (CPU utilization and attributes)
+- `/proc/meminfo` (memory information)
+- `/proc/vmstat` (system performance)
+- `/proc/net/rpc/nfsd` (NFS server statistics for both v3 and v4 NFS servers)
+- `/sys/fs/cgroup` (Control Groups - Linux Containers)
+- `/proc/self/mountinfo` (mount points)
+- `/proc/interrupts` (total and per core hardware interrupts)
+- `/proc/softirqs` (total and per core software interrupts)
+- `/proc/loadavg` (system load and total processes running)
+- `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography)
+- `/sys/class/power_supply` (power supply properties)
+- `ipc` (IPC semaphores and message queues)
+- `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`).
+- `netdata` (internal Netdata resources utilization)
+
+- - -
## Monitoring Disks
@@ -37,57 +36,57 @@ Hopefully, the Linux kernel provides many metrics that can provide deep insights
### Monitored disk metrics
-- **I/O bandwidth/s (kb/s)**
- The amount of data transferred from and to the disk.
-- **I/O operations/s**
- The number of I/O operations completed.
-- **Queued I/O operations**
- The number of currently queued I/O operations. For traditional disks that execute commands one after another, one of them is being run by the disk and the rest are just waiting in a queue.
-- **Backlog size (time in ms)**
- The expected duration of the currently queued I/O operations.
-- **Utilization (time percentage)**
- The percentage of time the disk was busy with something. This is a very interesting metric, since for most disks, that execute commands sequentially, **this is the key indication of congestion**. A sequential disk that is 100% of the available time busy, has no time to do anything more, so even if the bandwidth or the number of operations executed by the disk is low, its capacity has been reached.
- Of course, for newer disk technologies (like fusion cards) that are capable to execute multiple commands in parallel, this metric is just meaningless.
-- **Average I/O operation time (ms)**
- The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.
-- **Average I/O operation size (kb)**
- The average amount of data of the completed I/O operations.
-- **Average Service Time (ms)**
- The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading.
-- **Merged I/O operations/s**
- The Linux kernel is capable of merging I/O operations. So, if two requests to read data from the disk are adjacent, the Linux kernel may merge them to one before giving them to disk. This metric measures the number of operations that have been merged by the Linux kernel.
-- **Total I/O time**
- The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute multiple I/O operations in parallel.
-- **Space usage**
- For mounted disks, netdata will provide a chart for their space, with 3 dimensions:
- 1. free
- 2. used
- 3. reserved for root
-- **inode usage**
- For mounted disks, netdata will provide a chart for their inodes (number of file and directories), with 3 dimensions:
- 1. free
- 2. used
- 3. reserved for root
+- **I/O bandwidth/s (kb/s)**
+ The amount of data transferred from and to the disk.
+- **I/O operations/s**
+ The number of I/O operations completed.
+- **Queued I/O operations**
+ The number of currently queued I/O operations. For traditional disks that execute commands one after another, one of them is being run by the disk and the rest are just waiting in a queue.
+- **Backlog size (time in ms)**
+ The expected duration of the currently queued I/O operations.
+- **Utilization (time percentage)**
+ The percentage of time the disk was busy with something. This is a very interesting metric, since for most disks, that execute commands sequentially, **this is the key indication of congestion**. A sequential disk that is 100% of the available time busy, has no time to do anything more, so even if the bandwidth or the number of operations executed by the disk is low, its capacity has been reached.
+ Of course, for newer disk technologies (like fusion cards) that are capable to execute multiple commands in parallel, this metric is just meaningless.
+- **Average I/O operation time (ms)**
+ The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.
+- **Average I/O operation size (kb)**
+ The average amount of data of the completed I/O operations.
+- **Average Service Time (ms)**
+ The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading.
+- **Merged I/O operations/s**
+ The Linux kernel is capable of merging I/O operations. So, if two requests to read data from the disk are adjacent, the Linux kernel may merge them to one before giving them to disk. This metric measures the number of operations that have been merged by the Linux kernel.
+- **Total I/O time**
+ The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute multiple I/O operations in parallel.
+- **Space usage**
+ For mounted disks, Netdata will provide a chart for their space, with 3 dimensions:
+ 1. free
+ 2. used
+ 3. reserved for root
+- **inode usage**
+ For mounted disks, Netdata will provide a chart for their inodes (number of file and directories), with 3 dimensions:
+ 1. free
+ 2. used
+ 3. reserved for root
### disk names
-netdata will automatically set the name of disks on the dashboard, from the mount point they are mounted, of course only when they are mounted. Changes in mount points are not currently detected (you will have to restart netdata to change the name of the disk). To use disk IDs provided by `/dev/disk/by-id`, the `name disks by id` option should be enabled. The `preferred disk ids` simple pattern allows choosing disk IDs to be used in the first place.
+Netdata will automatically set the name of disks on the dashboard, from the mount point they are mounted, of course only when they are mounted. Changes in mount points are not currently detected (you will have to restart Netdata to change the name of the disk). To use disk IDs provided by `/dev/disk/by-id`, the `name disks by id` option should be enabled. The `preferred disk ids` simple pattern allows choosing disk IDs to be used in the first place.
### performance metrics
-By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
-netdata categorizes all block devices in 3 categories:
+Netdata categorizes all block devices in 3 categories:
-1. physical disks (i.e. block devices that does not have slaves and are not partitions)
-2. virtual disks (i.e. block devices that have slaves - like RAID devices)
-3. disk partitions (i.e. block devices that are part of a physical disk)
+1. physical disks (i.e. block devices that does not have slaves and are not partitions)
+2. virtual disks (i.e. block devices that have slaves - like RAID devices)
+3. disk partitions (i.e. block devices that are part of a physical disk)
-Performance metrics are enabled by default for all disk devices, except partitions and not-mounted virtual disks. Of course, you can enable/disable monitoring any block device by editing the netdata configuration file.
+Performance metrics are enabled by default for all disk devices, except partitions and not-mounted virtual disks. Of course, you can enable/disable monitoring any block device by editing the Netdata configuration file.
-### netdata configuration
+### Netdata configuration
-You can get the running netdata configuration using this:
+You can get the running Netdata configuration using this:
```sh
cd /etc/netdata
@@ -144,21 +143,26 @@ For each virtual disk, physical disk and partition you will have a section like
```
For all configuration options:
-- `auto` = enable monitoring if the collected values are not zero
-- `yes` = enable monitoring
-- `no` = disable monitoring
+
+- `auto` = enable monitoring if the collected values are not zero
+- `yes` = enable monitoring
+- `no` = disable monitoring
Of course, to set options, you will have to uncomment them. The comments show the internal defaults.
-After saving `/etc/netdata/netdata.conf`, restart your netdata to apply them.
+After saving `/etc/netdata/netdata.conf`, restart your Netdata to apply them.
#### Disabling performance metrics for individual device and to multiple devices by device type
+
You can pretty easy disable performance metrics for individual device, for ex.:
+
```
[plugin:proc:/proc/diskstats:sda]
enable performance metrics = no
```
+
But sometimes you need disable performance metrics for all devices with the same type, to do it you need to figure out device type from `/proc/diskstats` for ex.:
+
```
7 0 loop0 1651 0 3452 168 0 0 0 0 0 8 168
7 1 loop1 4955 0 11924 880 0 0 0 0 0 64 880
@@ -168,8 +172,10 @@ But sometimes you need disable performance metrics for all devices with the same
251 2 zram2 27487 0 219896 188 79953 0 639624 1640 0 1828 1828
251 3 zram3 27348 0 218784 152 79952 0 639616 1960 0 2060 2104
```
+
All zram devices starts with `251` number and all loop devices starts with `7`.
So, to disable performance metrics for all loop devices you could add `performance metrics for disks with major 7 = no` to `[plugin:proc:/proc/diskstats]` section.
+
```
[plugin:proc:/proc/diskstats]
performance metrics for disks with major 7 = no
@@ -179,26 +185,30 @@ So, to disable performance metrics for all loop devices you could add `performan
### Monitored RAID array metrics
-1. **Health** Number of failed disks in every array (aggregate chart).
+1. **Health** Number of failed disks in every array (aggregate chart).
+
+2. **Disks stats**
+
+- total (number of devices array ideally would have)
+- inuse (number of devices currently are in use)
+
+3. **Mismatch count**
+
+- unsynchronized blocks
-2. **Disks stats**
- * total (number of devices array ideally would have)
- * inuse (number of devices currently are in use)
+4. **Current status**
-3. **Mismatch count**
- * unsynchronized blocks
+- resync in percent
+- recovery in percent
+- reshape in percent
+- check in percent
-4. **Current status**
- * resync in percent
- * recovery in percent
- * reshape in percent
- * check in percent
+5. **Operation status** (if resync/recovery/reshape/check is active)
-5. **Operation status** (if resync/recovery/reshape/check is active)
- * finish in minutes
- * speed in megabytes/s
+- finish in minutes
+- speed in megabytes/s
-6. **Nonredundant array availability**
+6. **Nonredundant array availability**
#### configuration
@@ -259,26 +269,31 @@ each state.
### Monitored network interface metrics
-- **Physical Network Interfaces Aggregated Bandwidth (kilobits/s)**
- The amount of data received and sent through all physical interfaces in the system. This is the source of data for the Net Inbound and Net Outbound dials in the System Overview section.
+- **Physical Network Interfaces Aggregated Bandwidth (kilobits/s)**
+ The amount of data received and sent through all physical interfaces in the system. This is the source of data for the Net Inbound and Net Outbound dials in the System Overview section.
-- **Bandwidth (kilobits/s)**
- The amount of data received and sent through the interface.
-- **Packets (packets/s)**
- The number of packets received, packets sent, and multicast packets transmitted through the interface.
+- **Bandwidth (kilobits/s)**
+ The amount of data received and sent through the interface.
-- **Interface Errors (errors/s)**
- The number of errors for the inbound and outbound traffic on the interface.
-- **Interface Drops (drops/s)**
- The number of packets dropped for the inbound and outbound traffic on the interface.
-- **Interface FIFO Buffer Errors (errors/s)**
- The number of FIFO buffer errors encountered while receiving and transmitting data through the interface.
-- **Compressed Packets (packets/s)**
- The number of compressed packets transmitted or received by the device driver.
-- **Network Interface Events (events/s)**
- The number of packet framing errors, collisions detected on the interface, and carrier losses detected by the device driver.
+- **Packets (packets/s)**
+ The number of packets received, packets sent, and multicast packets transmitted through the interface.
-By default netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though).
+- **Interface Errors (errors/s)**
+ The number of errors for the inbound and outbound traffic on the interface.
+
+- **Interface Drops (drops/s)**
+ The number of packets dropped for the inbound and outbound traffic on the interface.
+
+- **Interface FIFO Buffer Errors (errors/s)**
+ The number of FIFO buffer errors encountered while receiving and transmitting data through the interface.
+
+- **Compressed Packets (packets/s)**
+ The number of compressed packets transmitted or received by the device driver.
+
+- **Network Interface Events (events/s)**
+ The number of packet framing errors, collisions detected on the interface, and carrier losses detected by the device driver.
+
+By default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though).
#### alarms
@@ -327,6 +342,7 @@ Per interface configuration:
![image6](https://cloud.githubusercontent.com/assets/2662304/14253733/53550b16-fa95-11e5-8d9d-4ed171df4735.gif)
---
+
SYNPROXY is a TCP SYN packets proxy. It can be used to protect any TCP server (like a web server) from SYN floods and similar DDos attacks.
SYNPROXY is a netfilter module, in the Linux kernel (since version 3.12). It is optimized to handle millions of packets per second utilizing all CPUs available without any concurrency locking between the connections.
@@ -335,25 +351,25 @@ The net effect of this, is that the real servers will not notice any change duri
Netdata does not enable SYNPROXY. It just uses the SYNPROXY metrics exposed by your kernel, so you will first need to configure it. The hard way is to run iptables SYNPROXY commands directly on the console. An easier way is to use [FireHOL](https://firehol.org/), which, is a firewall manager for iptables. FireHOL can configure SYNPROXY using the following setup guides:
- - **[Working with SYNPROXY](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY)**
- - **[Working with SYNPROXY and traps](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY-and-traps)**
+- **[Working with SYNPROXY](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY)**
+- **[Working with SYNPROXY and traps](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY-and-traps)**
### Real-time monitoring of Linux Anti-DDoS
-netdata is able to monitor in real-time (per second updates) the operation of the Linux Anti-DDoS protection.
+Netdata is able to monitor in real-time (per second updates) the operation of the Linux Anti-DDoS protection.
It visualizes 4 charts:
-1. TCP SYN Packets received on ports operated by SYNPROXY
-2. TCP Cookies (valid, invalid, retransmits)
-3. Connections Reopened
-4. Entries used
+1. TCP SYN Packets received on ports operated by SYNPROXY
+2. TCP Cookies (valid, invalid, retransmits)
+3. Connections Reopened
+4. Entries used
Example image:
![ddos](https://cloud.githubusercontent.com/assets/2662304/14398891/6016e3fc-fdf0-11e5-942b-55de6a52cb66.gif)
-See Linux Anti-DDoS in action at: **[netdata demo site (with SYNPROXY enabled)](https://registry.my-netdata.io/#menu_netfilter_submenu_synproxy)**
+See Linux Anti-DDoS in action at: **[Netdata demo site (with SYNPROXY enabled)](https://registry.my-netdata.io/#menu_netfilter_submenu_synproxy)**
## Linux power supply
@@ -364,29 +380,33 @@ battery capacity.
Depending on the underlying driver, it may provide the following charts
and metrics:
-1. Capacity: The power supply capacity expressed as a percentage.
- * capacity\_now
-
-2. Charge: The charge for the power supply, expressed as amphours.
- * charge\_full\_design
- * charge\_full
- * charge\_now
- * charge\_empty
- * charge\_empty\_design
-
-3. Energy: The energy for the power supply, expressed as watthours.
- * energy\_full\_design
- * energy\_full
- * energy\_now
- * energy\_empty
- * energy\_empty\_design
-
-2. Voltage: The voltage for the power supply, expressed as volts.
- * voltage\_max\_design
- * voltage\_max
- * voltage\_now
- * voltage\_min
- * voltage\_min\_design
+1. Capacity: The power supply capacity expressed as a percentage.
+
+ - capacity_now
+
+2. Charge: The charge for the power supply, expressed as amphours.
+
+ - charge_full_design
+ - charge_full
+ - charge_now
+ - charge_empty
+ - charge_empty_design
+
+3. Energy: The energy for the power supply, expressed as watthours.
+
+ - energy_full_design
+ - energy_full
+ - energy_now
+ - energy_empty
+ - energy_empty_design
+
+4. Voltage: The voltage for the power supply, expressed as volts.
+
+ - voltage_max_design
+ - voltage_max
+ - voltage_now
+ - voltage_min
+ - voltage_min_design
#### configuration
@@ -402,30 +422,30 @@ and metrics:
#### notes
-* Most drivers provide at least the first chart. Battery powered ACPI
-compliant systems (like most laptops) provide all but the third, but do
-not provide all of the metrics for each chart.
+- Most drivers provide at least the first chart. Battery powered ACPI
+ compliant systems (like most laptops) provide all but the third, but do
+ not provide all of the metrics for each chart.
-* Current, energy, and voltages are reported with a _very_ high precision
-by the power\_supply framework. Usually, this is far higher than the
-actual hardware supports reporting, so expect to see changes in these
-charts jump instead of scaling smoothly.
+- Current, energy, and voltages are reported with a *very* high precision
+ by the power_supply framework. Usually, this is far higher than the
+ actual hardware supports reporting, so expect to see changes in these
+ charts jump instead of scaling smoothly.
-* If `max` or `full` attribute is defined by the driver, but not a
-corresponding `min` or `empty` attribute, then Netdata will still provide
-the corresponding `min` or `empty`, which will then always read as zero.
-This way, alerts which match on these will still work.
+- If `max` or `full` attribute is defined by the driver, but not a
+ corresponding `min` or `empty` attribute, then Netdata will still provide
+ the corresponding `min` or `empty`, which will then always read as zero.
+ This way, alerts which match on these will still work.
## IPC
### Monitored IPC metrics
-- **number of messages in message queues**
-- **amount of memory used by message queues**
-- **number of semaphores**
-- **number of semaphore arrays**
-- **number of shared memory segments**
-- **amount of memory used by shared memory segments**
+- **number of messages in message queues**
+- **amount of memory used by message queues**
+- **number of semaphores**
+- **number of semaphore arrays**
+- **number of shared memory segments**
+- **amount of memory used by shared memory segments**
As far as the message queue charts are dynamic, sane limits are applied for the number of dimensions per chart (the limit is configurable).
@@ -441,4 +461,4 @@ As far as the message queue charts are dynamic, sane limits are applied for the
# max dimensions in memory allowed = 50
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fproc.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fproc.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/proc.plugin/plugin_proc.c b/collectors/proc.plugin/plugin_proc.c
index 343acfa3..62e974cf 100644
--- a/collectors/proc.plugin/plugin_proc.c
+++ b/collectors/proc.plugin/plugin_proc.c
@@ -29,6 +29,7 @@ static struct proc_module {
{ .name = "/proc/vmstat", .dim = "vmstat", .func = do_proc_vmstat },
{ .name = "/proc/meminfo", .dim = "meminfo", .func = do_proc_meminfo },
{ .name = "/sys/kernel/mm/ksm", .dim = "ksm", .func = do_sys_kernel_mm_ksm },
+ { .name = "/sys/block/zram", .dim = "zram", .func = do_sys_block_zram },
{ .name = "/sys/devices/system/edac/mc", .dim = "ecc", .func = do_proc_sys_devices_system_edac_mc },
{ .name = "/sys/devices/system/node", .dim = "numa", .func = do_proc_sys_devices_system_node },
diff --git a/collectors/proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h
index 0c2afe77..40a0e82d 100644
--- a/collectors/proc.plugin/plugin_proc.h
+++ b/collectors/proc.plugin/plugin_proc.h
@@ -41,6 +41,7 @@ extern int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt);
extern int do_proc_interrupts(int update_every, usec_t dt);
extern int do_proc_softirqs(int update_every, usec_t dt);
extern int do_sys_kernel_mm_ksm(int update_every, usec_t dt);
+extern int do_sys_block_zram(int update_every, usec_t dt);
extern int do_proc_loadavg(int update_every, usec_t dt);
extern int do_proc_net_stat_synproxy(int update_every, usec_t dt);
extern int do_proc_net_softnet_stat(int update_every, usec_t dt);
diff --git a/collectors/proc.plugin/proc_uptime.c b/collectors/proc.plugin/proc_uptime.c
index 142ae2d0..28b00e0d 100644
--- a/collectors/proc.plugin/proc_uptime.c
+++ b/collectors/proc.plugin/proc_uptime.c
@@ -2,76 +2,17 @@
#include "plugin_proc.h"
-static inline collected_number uptime_from_boottime(void) {
-#ifdef CLOCK_BOOTTIME_IS_AVAILABLE
- return now_boottime_usec() / 1000;
-#else
- error("uptime cannot be read from CLOCK_BOOTTIME on this system.");
- return 0;
-#endif
-}
-
-static procfile *read_proc_uptime_ff = NULL;
-static inline collected_number read_proc_uptime(void) {
- if(unlikely(!read_proc_uptime_ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/uptime");
-
- read_proc_uptime_ff = procfile_open(config_get("plugin:proc:/proc/uptime", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!read_proc_uptime_ff)) return 0;
- }
-
- read_proc_uptime_ff = procfile_readall(read_proc_uptime_ff);
- if(unlikely(!read_proc_uptime_ff)) return 0;
-
- if(unlikely(procfile_lines(read_proc_uptime_ff) < 1)) {
- error("/proc/uptime has no lines.");
- return 0;
- }
- if(unlikely(procfile_linewords(read_proc_uptime_ff, 0) < 1)) {
- error("/proc/uptime has less than 1 word in it.");
- return 0;
- }
-
- return (collected_number)(strtold(procfile_lineword(read_proc_uptime_ff, 0, 0), NULL) * 1000.0);
-}
-
int do_proc_uptime(int update_every, usec_t dt) {
(void)dt;
- static int use_boottime = -1;
-
- if(unlikely(use_boottime == -1)) {
- collected_number uptime_boottime = uptime_from_boottime();
- collected_number uptime_proc = read_proc_uptime();
-
- long long delta = (long long)uptime_boottime - (long long)uptime_proc;
- if(delta < 0) delta = -delta;
+ static char *uptime_filename = NULL;
+ if(!uptime_filename) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/uptime");
- if(delta <= 1000 && uptime_boottime != 0) {
- procfile_close(read_proc_uptime_ff);
- info("Using now_boottime_usec() for uptime (dt is %lld ms)", delta);
- use_boottime = 1;
- }
- else if(uptime_proc != 0) {
- info("Using /proc/uptime for uptime (dt is %lld ms)", delta);
- use_boottime = 0;
- }
- else {
- error("Cannot find any way to read uptime on this system.");
- return 1;
- }
+ uptime_filename = config_get("plugin:proc:/proc/uptime", "filename to monitor", filename);
}
- collected_number uptime;
- if(use_boottime)
- uptime = uptime_from_boottime();
- else
- uptime = read_proc_uptime();
-
-
- // --------------------------------------------------------------------
-
static RRDSET *st = NULL;
static RRDDIM *rd = NULL;
@@ -97,7 +38,7 @@ int do_proc_uptime(int update_every, usec_t dt) {
else
rrdset_next(st);
- rrddim_set_by_pointer(st, rd, uptime);
+ rrddim_set_by_pointer(st, rd, uptime_msec(uptime_filename));
rrdset_done(st);
diff --git a/collectors/proc.plugin/sys_block_zram.c b/collectors/proc.plugin/sys_block_zram.c
new file mode 100644
index 00000000..a68a405d
--- /dev/null
+++ b/collectors/proc.plugin/sys_block_zram.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_ZRAM_NAME "/sys/block/zram"
+#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st)
+
+typedef struct mm_stat {
+ unsigned long long orig_data_size;
+ unsigned long long compr_data_size;
+ unsigned long long mem_used_total;
+ unsigned long long mem_limit;
+ unsigned long long mem_used_max;
+ unsigned long long same_pages;
+ unsigned long long pages_compacted;
+} MM_STAT;
+
+typedef struct zram_device {
+ procfile *file;
+
+ RRDSET *st_usage;
+ RRDDIM *rd_compr_data_size;
+ RRDDIM *rd_metadata_size;
+
+ RRDSET *st_savings;
+ RRDDIM *rd_original_size;
+ RRDDIM *rd_savings_size;
+
+ RRDSET *st_comp_ratio;
+ RRDDIM *rd_comp_ratio;
+
+ RRDSET *st_alloc_efficiency;
+ RRDDIM *rd_alloc_efficiency;
+} ZRAM_DEVICE;
+
+ // --------------------------------------------------------------------
+
+static int try_get_zram_major_number(procfile *file) {
+ size_t i;
+ unsigned int lines = procfile_lines(file);
+ int id = -1;
+ char *name = NULL;
+ for (i = 0; i < lines; i++)
+ {
+ if (procfile_linewords(file, i) < 2)
+ continue;
+ name = procfile_lineword(file, i, 1);
+ if (strcmp(name, "zram") == 0)
+ {
+ id = str2i(procfile_lineword(file, i, 0));
+ if (id == 0)
+ return -1;
+ return id;
+ }
+ }
+ return -1;
+}
+
+static inline void init_rrd(const char *name, ZRAM_DEVICE *d, int update_every) {
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+
+ snprintfz(chart_name, RRD_ID_LENGTH_MAX, "zram_usage.%s", name);
+ d->st_usage = rrdset_create_localhost(
+ "mem"
+ , chart_name
+ , chart_name
+ , name
+ , "mem.zram_usage"
+ , "ZRAM Memory Usage"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_ZRAM_NAME
+ , NETDATA_CHART_PRIO_MEM_ZRAM
+ , update_every
+ , RRDSET_TYPE_AREA);
+ d->rd_compr_data_size = rrddim_add(d->st_usage, "compressed", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_metadata_size = rrddim_add(d->st_usage, "metadata", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ snprintfz(chart_name, RRD_ID_LENGTH_MAX, "zram_savings.%s", name);
+ d->st_savings = rrdset_create_localhost(
+ "mem"
+ , chart_name
+ , chart_name
+ , name
+ , "mem.zram_savings"
+ , "ZRAM Memory Savings"
+ , "MiB"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_ZRAM_NAME
+ , NETDATA_CHART_PRIO_MEM_ZRAM_SAVINGS
+ , update_every
+ , RRDSET_TYPE_AREA);
+ d->rd_savings_size = rrddim_add(d->st_savings, "savings", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_original_size = rrddim_add(d->st_savings, "original", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ snprintfz(chart_name, RRD_ID_LENGTH_MAX, "zram_ratio.%s", name);
+ d->st_comp_ratio = rrdset_create_localhost(
+ "mem"
+ , chart_name
+ , chart_name
+ , name
+ , "mem.zram_ratio"
+ , "ZRAM Compression Ratio (original to compressed)"
+ , "ratio"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_ZRAM_NAME
+ , NETDATA_CHART_PRIO_MEM_ZRAM_RATIO
+ , update_every
+ , RRDSET_TYPE_LINE);
+ d->rd_comp_ratio = rrddim_add(d->st_comp_ratio, "ratio", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+
+ snprintfz(chart_name, RRD_ID_LENGTH_MAX, "zram_efficiency.%s", name);
+ d->st_alloc_efficiency = rrdset_create_localhost(
+ "mem"
+ , chart_name
+ , chart_name
+ , name
+ , "mem.zram_efficiency"
+ , "ZRAM Efficiency"
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_ZRAM_NAME
+ , NETDATA_CHART_PRIO_MEM_ZRAM_EFFICIENCY
+ , update_every
+ , RRDSET_TYPE_LINE);
+ d->rd_alloc_efficiency = rrddim_add(d->st_alloc_efficiency, "percent", NULL, 1, 10000, RRD_ALGORITHM_ABSOLUTE);
+}
+
+static int init_devices(DICTIONARY *devices, unsigned int zram_id, int update_every) {
+ int count = 0;
+ DIR *dir = opendir("/dev");
+ struct dirent *de;
+ struct stat st;
+ char filename[FILENAME_MAX + 1];
+ procfile *ff = NULL;
+ ZRAM_DEVICE device;
+
+ if (unlikely(!dir))
+ return 0;
+ while ((de = readdir(dir)))
+ {
+ snprintfz(filename, FILENAME_MAX, "/dev/%s", de->d_name);
+ if (unlikely(stat(filename, &st) != 0))
+ {
+ error("ZRAM : Unable to stat %s: %s", filename, strerror(errno));
+ continue;
+ }
+ if (major(st.st_rdev) == zram_id)
+ {
+ info("ZRAM : Found device %s", filename);
+ snprintfz(filename, FILENAME_MAX, "/sys/block/%s/mm_stat", de->d_name);
+ ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if (ff == NULL)
+ {
+ error("ZRAM : Failed to open %s: %s", filename, strerror(errno));
+ continue;
+ }
+ device.file = ff;
+ init_rrd(de->d_name, &device, update_every);
+ dictionary_set(devices, de->d_name, &device, sizeof(ZRAM_DEVICE));
+ count++;
+ }
+ }
+ closedir(dir);
+ return count;
+}
+
+static void free_device(DICTIONARY *dict, char *name)
+{
+ ZRAM_DEVICE *d = (ZRAM_DEVICE*)dictionary_get(dict, name);
+ info("ZRAM : Disabling monitoring of device %s", name);
+ rrdset_obsolete_and_pointer_null(d->st_usage);
+ rrdset_obsolete_and_pointer_null(d->st_savings);
+ rrdset_obsolete_and_pointer_null(d->st_alloc_efficiency);
+ rrdset_obsolete_and_pointer_null(d->st_comp_ratio);
+ dictionary_del(dict, name);
+}
+ // --------------------------------------------------------------------
+
+static inline int read_mm_stat(procfile *ff, MM_STAT *stats) {
+ ff = procfile_readall(ff);
+ if (!ff)
+ return -1;
+ if (procfile_lines(ff) < 1)
+ return -1;
+ if (procfile_linewords(ff, 0) < 7)
+ return -1;
+
+ stats->orig_data_size = str2ull(procfile_word(ff, 0));
+ stats->compr_data_size = str2ull(procfile_word(ff, 1));
+ stats->mem_used_total = str2ull(procfile_word(ff, 2));
+ stats->mem_limit = str2ull(procfile_word(ff, 3));
+ stats->mem_used_max = str2ull(procfile_word(ff, 4));
+ stats->same_pages = str2ull(procfile_word(ff, 5));
+ stats->pages_compacted = str2ull(procfile_word(ff, 6));
+ return 0;
+}
+
+static inline int _collect_zram_metrics(char* name, ZRAM_DEVICE *d, int advance, DICTIONARY* dict) {
+ MM_STAT mm;
+ int value;
+ if (unlikely(read_mm_stat(d->file, &mm) < 0))
+ {
+ free_device(dict, name);
+ return -1;
+ }
+
+ if (likely(advance))
+ {
+ rrdset_next(d->st_usage);
+ rrdset_next(d->st_savings);
+ rrdset_next(d->st_comp_ratio);
+ rrdset_next(d->st_alloc_efficiency);
+ }
+ // zram_usage
+ rrddim_set_by_pointer(d->st_usage, d->rd_compr_data_size, mm.compr_data_size);
+ rrddim_set_by_pointer(d->st_usage, d->rd_metadata_size, mm.mem_used_total - mm.compr_data_size);
+ rrdset_done(d->st_usage);
+ // zram_savings
+ rrddim_set_by_pointer(d->st_savings, d->rd_savings_size, mm.compr_data_size - mm.orig_data_size);
+ rrddim_set_by_pointer(d->st_savings, d->rd_original_size, mm.orig_data_size);
+ rrdset_done(d->st_savings);
+ // zram_ratio
+ value = mm.compr_data_size == 0 ? 1 : mm.orig_data_size * 100 / mm.compr_data_size;
+ rrddim_set_by_pointer(d->st_comp_ratio, d->rd_comp_ratio, value);
+ rrdset_done(d->st_comp_ratio);
+ // zram_efficiency
+ value = mm.mem_used_total == 0 ? 100 : (mm.compr_data_size * 1000000 / mm.mem_used_total);
+ rrddim_set_by_pointer(d->st_alloc_efficiency, d->rd_alloc_efficiency, value);
+ rrdset_done(d->st_alloc_efficiency);
+ return 0;
+}
+
+static int collect_first_zram_metrics(char *name, void *entry, void *data) {
+ // collect without calling rrdset_next (init only)
+ return _collect_zram_metrics(name, (ZRAM_DEVICE *)entry, 0, (DICTIONARY *)data);
+}
+
+static int collect_zram_metrics(char *name, void *entry, void *data) {
+ (void)name;
+ // collect with calling rrdset_next
+ return _collect_zram_metrics(name, (ZRAM_DEVICE *)entry, 1, (DICTIONARY *)data);
+}
+
+ // --------------------------------------------------------------------
+
+int do_sys_block_zram(int update_every, usec_t dt) {
+ (void)dt;
+ static procfile *ff = NULL;
+ static DICTIONARY *devices = NULL;
+ static int initialized = 0;
+ static int device_count = 0;
+ int zram_id = -1;
+ if (unlikely(!initialized))
+ {
+ initialized = 1;
+ ff = procfile_open("/proc/devices", " \t:", PROCFILE_FLAG_DEFAULT);
+ if (ff == NULL)
+ {
+ error("Cannot read /proc/devices");
+ return 1;
+ }
+ ff = procfile_readall(ff);
+ if (!ff)
+ return 1;
+ zram_id = try_get_zram_major_number(ff);
+ if (zram_id == -1)
+ {
+ if (ff != NULL)
+ procfile_close(ff);
+ return 1;
+ }
+ procfile_close(ff);
+
+ devices = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
+ device_count = init_devices(devices, (unsigned int)zram_id, update_every);
+ if (device_count < 1)
+ return 1;
+ dictionary_get_all_name_value(devices, collect_first_zram_metrics, devices);
+ }
+ else
+ {
+ if (unlikely(device_count < 1))
+ return 1;
+ dictionary_get_all_name_value(devices, collect_zram_metrics, devices);
+ }
+ return 0;
+} \ No newline at end of file
diff --git a/collectors/python.d.plugin/Makefile.in b/collectors/python.d.plugin/Makefile.in
new file mode 100644
index 00000000..d39e4a05
--- /dev/null
+++ b/collectors/python.d.plugin/Makefile.in
@@ -0,0 +1,2063 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/python.d.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_python_SCRIPTS) $(dist_bases_DATA) \
+ $(dist_bases_framework_services_DATA) $(dist_libconfig_DATA) \
+ $(dist_noinst_DATA) $(dist_python_DATA) \
+ $(dist_python_urllib3_DATA) \
+ $(dist_python_urllib3_backports_DATA) \
+ $(dist_python_urllib3_contrib_DATA) \
+ $(dist_python_urllib3_packages_DATA) \
+ $(dist_python_urllib3_securetransport_DATA) \
+ $(dist_python_urllib3_ssl_match_hostname_DATA) \
+ $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
+ $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
+ $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
+ $(dist_userpythonconfig_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" \
+ "$(DESTDIR)$(basesdir)" \
+ "$(DESTDIR)$(bases_framework_servicesdir)" \
+ "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" \
+ "$(DESTDIR)$(python_urllib3dir)" \
+ "$(DESTDIR)$(python_urllib3_backportsdir)" \
+ "$(DESTDIR)$(python_urllib3_contribdir)" \
+ "$(DESTDIR)$(python_urllib3_packagesdir)" \
+ "$(DESTDIR)$(python_urllib3_securetransportdir)" \
+ "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" \
+ "$(DESTDIR)$(python_urllib3_utildir)" \
+ "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" \
+ "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" \
+ "$(DESTDIR)$(third_partydir)" \
+ "$(DESTDIR)$(userpythonconfigdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS) $(dist_python_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \
+ $(dist_libconfig_DATA) $(dist_noinst_DATA) $(dist_python_DATA) \
+ $(dist_python_urllib3_DATA) \
+ $(dist_python_urllib3_backports_DATA) \
+ $(dist_python_urllib3_contrib_DATA) \
+ $(dist_python_urllib3_packages_DATA) \
+ $(dist_python_urllib3_securetransport_DATA) \
+ $(dist_python_urllib3_ssl_match_hostname_DATA) \
+ $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
+ $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
+ $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
+ $(dist_userpythonconfig_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in \
+ $(srcdir)/adaptec_raid/Makefile.inc \
+ $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc \
+ $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc \
+ $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc \
+ $(srcdir)/couchdb/Makefile.inc \
+ $(srcdir)/dns_query_time/Makefile.inc \
+ $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dockerd/Makefile.inc \
+ $(srcdir)/dovecot/Makefile.inc \
+ $(srcdir)/elasticsearch/Makefile.inc \
+ $(srcdir)/energid/Makefile.inc $(srcdir)/example/Makefile.inc \
+ $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc \
+ $(srcdir)/freeradius/Makefile.inc \
+ $(srcdir)/go_expvar/Makefile.inc \
+ $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc \
+ $(srcdir)/httpcheck/Makefile.inc \
+ $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc \
+ $(srcdir)/isc_dhcpd/Makefile.inc \
+ $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc \
+ $(srcdir)/megacli/Makefile.inc \
+ $(srcdir)/memcached/Makefile.inc \
+ $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc \
+ $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc \
+ $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nsd/Makefile.inc \
+ $(srcdir)/ntpd/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc \
+ $(srcdir)/openldap/Makefile.inc \
+ $(srcdir)/oracledb/Makefile.inc \
+ $(srcdir)/ovpn_status_log/Makefile.inc \
+ $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc \
+ $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc \
+ $(srcdir)/powerdns/Makefile.inc \
+ $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc \
+ $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc \
+ $(srcdir)/rethinkdbs/Makefile.inc \
+ $(srcdir)/retroshare/Makefile.inc \
+ $(srcdir)/riakkv/Makefile.inc $(srcdir)/samba/Makefile.inc \
+ $(srcdir)/sensors/Makefile.inc \
+ $(srcdir)/smartd_log/Makefile.inc \
+ $(srcdir)/spigotmc/Makefile.inc \
+ $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc \
+ $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc \
+ $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc \
+ $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc \
+ $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc \
+ $(top_srcdir)/build/subst.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ python.d.plugin \
+ $(NULL)
+
+SUFFIXES = .in
+dist_libconfig_DATA = \
+ python.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ python.d.plugin \
+ $(NULL)
+
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA = python.d.plugin.in README.md $(NULL) \
+ adaptec_raid/README.md adaptec_raid/Makefile.inc \
+ apache/README.md apache/Makefile.inc beanstalk/README.md \
+ beanstalk/Makefile.inc bind_rndc/README.md \
+ bind_rndc/Makefile.inc boinc/README.md boinc/Makefile.inc \
+ ceph/README.md ceph/Makefile.inc chrony/README.md \
+ chrony/Makefile.inc couchdb/README.md couchdb/Makefile.inc \
+ dnsdist/README.md dnsdist/Makefile.inc \
+ dns_query_time/README.md dns_query_time/Makefile.inc \
+ dockerd/README.md dockerd/Makefile.inc dovecot/README.md \
+ dovecot/Makefile.inc elasticsearch/README.md \
+ elasticsearch/Makefile.inc energid/README.md \
+ energid/Makefile.inc example/README.md example/Makefile.inc \
+ exim/README.md exim/Makefile.inc fail2ban/README.md \
+ fail2ban/Makefile.inc freeradius/README.md \
+ freeradius/Makefile.inc go_expvar/README.md \
+ go_expvar/Makefile.inc haproxy/README.md haproxy/Makefile.inc \
+ hddtemp/README.md hddtemp/Makefile.inc httpcheck/README.md \
+ httpcheck/Makefile.inc icecast/README.md icecast/Makefile.inc \
+ ipfs/README.md ipfs/Makefile.inc isc_dhcpd/README.md \
+ isc_dhcpd/Makefile.inc litespeed/README.md \
+ litespeed/Makefile.inc logind/README.md logind/Makefile.inc \
+ megacli/README.md megacli/Makefile.inc memcached/README.md \
+ memcached/Makefile.inc mongodb/README.md mongodb/Makefile.inc \
+ monit/README.md monit/Makefile.inc mysql/README.md \
+ mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \
+ nginx_plus/README.md nginx_plus/Makefile.inc \
+ nvidia_smi/README.md nvidia_smi/Makefile.inc nsd/README.md \
+ nsd/Makefile.inc ntpd/README.md ntpd/Makefile.inc \
+ ovpn_status_log/README.md ovpn_status_log/Makefile.inc \
+ openldap/README.md openldap/Makefile.inc oracledb/README.md \
+ oracledb/Makefile.inc phpfpm/README.md phpfpm/Makefile.inc \
+ portcheck/README.md portcheck/Makefile.inc postfix/README.md \
+ postfix/Makefile.inc postgres/README.md postgres/Makefile.inc \
+ powerdns/README.md powerdns/Makefile.inc proxysql/README.md \
+ proxysql/Makefile.inc puppet/README.md puppet/Makefile.inc \
+ rabbitmq/README.md rabbitmq/Makefile.inc redis/README.md \
+ redis/Makefile.inc rethinkdbs/README.md \
+ rethinkdbs/Makefile.inc retroshare/README.md \
+ retroshare/Makefile.inc riakkv/README.md riakkv/Makefile.inc \
+ samba/README.md samba/Makefile.inc sensors/README.md \
+ sensors/Makefile.inc smartd_log/README.md \
+ smartd_log/Makefile.inc spigotmc/README.md \
+ spigotmc/Makefile.inc springboot/README.md \
+ springboot/Makefile.inc squid/README.md squid/Makefile.inc \
+ tomcat/README.md tomcat/Makefile.inc tor/README.md \
+ tor/Makefile.inc traefik/README.md traefik/Makefile.inc \
+ unbound/README.md unbound/Makefile.inc uwsgi/README.md \
+ uwsgi/Makefile.inc varnish/README.md varnish/Makefile.inc \
+ w1sensor/README.md w1sensor/Makefile.inc web_log/README.md \
+ web_log/Makefile.inc
+dist_python_SCRIPTS = \
+ $(NULL)
+
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+dist_python_DATA = $(NULL) adaptec_raid/adaptec_raid.chart.py \
+ apache/apache.chart.py beanstalk/beanstalk.chart.py \
+ bind_rndc/bind_rndc.chart.py boinc/boinc.chart.py \
+ ceph/ceph.chart.py chrony/chrony.chart.py \
+ couchdb/couchdb.chart.py dnsdist/dnsdist.chart.py \
+ dns_query_time/dns_query_time.chart.py \
+ dockerd/dockerd.chart.py dovecot/dovecot.chart.py \
+ elasticsearch/elasticsearch.chart.py energid/energid.chart.py \
+ example/example.chart.py exim/exim.chart.py \
+ fail2ban/fail2ban.chart.py freeradius/freeradius.chart.py \
+ go_expvar/go_expvar.chart.py haproxy/haproxy.chart.py \
+ hddtemp/hddtemp.chart.py httpcheck/httpcheck.chart.py \
+ icecast/icecast.chart.py ipfs/ipfs.chart.py \
+ isc_dhcpd/isc_dhcpd.chart.py litespeed/litespeed.chart.py \
+ logind/logind.chart.py megacli/megacli.chart.py \
+ memcached/memcached.chart.py mongodb/mongodb.chart.py \
+ monit/monit.chart.py mysql/mysql.chart.py nginx/nginx.chart.py \
+ nginx_plus/nginx_plus.chart.py nvidia_smi/nvidia_smi.chart.py \
+ nsd/nsd.chart.py ntpd/ntpd.chart.py \
+ ovpn_status_log/ovpn_status_log.chart.py \
+ openldap/openldap.chart.py oracledb/oracledb.chart.py \
+ phpfpm/phpfpm.chart.py portcheck/portcheck.chart.py \
+ postfix/postfix.chart.py postgres/postgres.chart.py \
+ powerdns/powerdns.chart.py proxysql/proxysql.chart.py \
+ puppet/puppet.chart.py rabbitmq/rabbitmq.chart.py \
+ redis/redis.chart.py rethinkdbs/rethinkdbs.chart.py \
+ retroshare/retroshare.chart.py riakkv/riakkv.chart.py \
+ samba/samba.chart.py sensors/sensors.chart.py \
+ smartd_log/smartd_log.chart.py spigotmc/spigotmc.chart.py \
+ springboot/springboot.chart.py squid/squid.chart.py \
+ tomcat/tomcat.chart.py tor/tor.chart.py \
+ traefik/traefik.chart.py unbound/unbound.chart.py \
+ uwsgi/uwsgi.chart.py varnish/varnish.chart.py \
+ w1sensor/w1sensor.chart.py web_log/web_log.chart.py
+userpythonconfigdir = $(configdir)/python.d
+dist_userpythonconfig_DATA = \
+ .keep \
+ $(NULL)
+
+pythonconfigdir = $(libconfigdir)/python.d
+dist_pythonconfig_DATA = $(NULL) adaptec_raid/adaptec_raid.conf \
+ apache/apache.conf beanstalk/beanstalk.conf \
+ bind_rndc/bind_rndc.conf boinc/boinc.conf ceph/ceph.conf \
+ chrony/chrony.conf couchdb/couchdb.conf dnsdist/dnsdist.conf \
+ dns_query_time/dns_query_time.conf dockerd/dockerd.conf \
+ dovecot/dovecot.conf elasticsearch/elasticsearch.conf \
+ energid/energid.conf example/example.conf exim/exim.conf \
+ fail2ban/fail2ban.conf freeradius/freeradius.conf \
+ go_expvar/go_expvar.conf haproxy/haproxy.conf \
+ hddtemp/hddtemp.conf httpcheck/httpcheck.conf \
+ icecast/icecast.conf ipfs/ipfs.conf isc_dhcpd/isc_dhcpd.conf \
+ litespeed/litespeed.conf logind/logind.conf \
+ megacli/megacli.conf memcached/memcached.conf \
+ mongodb/mongodb.conf monit/monit.conf mysql/mysql.conf \
+ nginx/nginx.conf nginx_plus/nginx_plus.conf \
+ nvidia_smi/nvidia_smi.conf nsd/nsd.conf ntpd/ntpd.conf \
+ ovpn_status_log/ovpn_status_log.conf openldap/openldap.conf \
+ oracledb/oracledb.conf phpfpm/phpfpm.conf \
+ portcheck/portcheck.conf postfix/postfix.conf \
+ postgres/postgres.conf powerdns/powerdns.conf \
+ proxysql/proxysql.conf puppet/puppet.conf \
+ rabbitmq/rabbitmq.conf redis/redis.conf \
+ rethinkdbs/rethinkdbs.conf retroshare/retroshare.conf \
+ riakkv/riakkv.conf samba/samba.conf sensors/sensors.conf \
+ smartd_log/smartd_log.conf spigotmc/spigotmc.conf \
+ springboot/springboot.conf squid/squid.conf tomcat/tomcat.conf \
+ tor/tor.conf traefik/traefik.conf unbound/unbound.conf \
+ uwsgi/uwsgi.conf varnish/varnish.conf w1sensor/w1sensor.conf \
+ web_log/web_log.conf
+pythonmodulesdir = $(pythondir)/python_modules
+dist_pythonmodules_DATA = \
+ python_modules/__init__.py \
+ $(NULL)
+
+basesdir = $(pythonmodulesdir)/bases
+dist_bases_DATA = \
+ python_modules/bases/__init__.py \
+ python_modules/bases/charts.py \
+ python_modules/bases/collection.py \
+ python_modules/bases/loaders.py \
+ python_modules/bases/loggers.py \
+ $(NULL)
+
+bases_framework_servicesdir = $(basesdir)/FrameworkServices
+dist_bases_framework_services_DATA = \
+ python_modules/bases/FrameworkServices/__init__.py \
+ python_modules/bases/FrameworkServices/ExecutableService.py \
+ python_modules/bases/FrameworkServices/LogService.py \
+ python_modules/bases/FrameworkServices/MySQLService.py \
+ python_modules/bases/FrameworkServices/SimpleService.py \
+ python_modules/bases/FrameworkServices/SocketService.py \
+ python_modules/bases/FrameworkServices/UrlService.py \
+ $(NULL)
+
+third_partydir = $(pythonmodulesdir)/third_party
+dist_third_party_DATA = \
+ python_modules/third_party/__init__.py \
+ python_modules/third_party/ordereddict.py \
+ python_modules/third_party/lm_sensors.py \
+ python_modules/third_party/mcrcon.py \
+ python_modules/third_party/boinc_client.py \
+ python_modules/third_party/monotonic.py \
+ $(NULL)
+
+pythonyaml2dir = $(pythonmodulesdir)/pyyaml2
+dist_pythonyaml2_DATA = \
+ python_modules/pyyaml2/__init__.py \
+ python_modules/pyyaml2/composer.py \
+ python_modules/pyyaml2/constructor.py \
+ python_modules/pyyaml2/cyaml.py \
+ python_modules/pyyaml2/dumper.py \
+ python_modules/pyyaml2/emitter.py \
+ python_modules/pyyaml2/error.py \
+ python_modules/pyyaml2/events.py \
+ python_modules/pyyaml2/loader.py \
+ python_modules/pyyaml2/nodes.py \
+ python_modules/pyyaml2/parser.py \
+ python_modules/pyyaml2/reader.py \
+ python_modules/pyyaml2/representer.py \
+ python_modules/pyyaml2/resolver.py \
+ python_modules/pyyaml2/scanner.py \
+ python_modules/pyyaml2/serializer.py \
+ python_modules/pyyaml2/tokens.py \
+ $(NULL)
+
+pythonyaml3dir = $(pythonmodulesdir)/pyyaml3
+dist_pythonyaml3_DATA = \
+ python_modules/pyyaml3/__init__.py \
+ python_modules/pyyaml3/composer.py \
+ python_modules/pyyaml3/constructor.py \
+ python_modules/pyyaml3/cyaml.py \
+ python_modules/pyyaml3/dumper.py \
+ python_modules/pyyaml3/emitter.py \
+ python_modules/pyyaml3/error.py \
+ python_modules/pyyaml3/events.py \
+ python_modules/pyyaml3/loader.py \
+ python_modules/pyyaml3/nodes.py \
+ python_modules/pyyaml3/parser.py \
+ python_modules/pyyaml3/reader.py \
+ python_modules/pyyaml3/representer.py \
+ python_modules/pyyaml3/resolver.py \
+ python_modules/pyyaml3/scanner.py \
+ python_modules/pyyaml3/serializer.py \
+ python_modules/pyyaml3/tokens.py \
+ $(NULL)
+
+python_urllib3dir = $(pythonmodulesdir)/urllib3
+dist_python_urllib3_DATA = \
+ python_modules/urllib3/__init__.py \
+ python_modules/urllib3/_collections.py \
+ python_modules/urllib3/connection.py \
+ python_modules/urllib3/connectionpool.py \
+ python_modules/urllib3/exceptions.py \
+ python_modules/urllib3/fields.py \
+ python_modules/urllib3/filepost.py \
+ python_modules/urllib3/response.py \
+ python_modules/urllib3/poolmanager.py \
+ python_modules/urllib3/request.py \
+ $(NULL)
+
+python_urllib3_utildir = $(python_urllib3dir)/util
+dist_python_urllib3_util_DATA = \
+ python_modules/urllib3/util/__init__.py \
+ python_modules/urllib3/util/connection.py \
+ python_modules/urllib3/util/request.py \
+ python_modules/urllib3/util/response.py \
+ python_modules/urllib3/util/retry.py \
+ python_modules/urllib3/util/selectors.py \
+ python_modules/urllib3/util/ssl_.py \
+ python_modules/urllib3/util/timeout.py \
+ python_modules/urllib3/util/url.py \
+ python_modules/urllib3/util/wait.py \
+ $(NULL)
+
+python_urllib3_packagesdir = $(python_urllib3dir)/packages
+dist_python_urllib3_packages_DATA = \
+ python_modules/urllib3/packages/__init__.py \
+ python_modules/urllib3/packages/ordered_dict.py \
+ python_modules/urllib3/packages/six.py \
+ $(NULL)
+
+python_urllib3_backportsdir = $(python_urllib3_packagesdir)/backports
+dist_python_urllib3_backports_DATA = \
+ python_modules/urllib3/packages/backports/__init__.py \
+ python_modules/urllib3/packages/backports/makefile.py \
+ $(NULL)
+
+python_urllib3_ssl_match_hostnamedir = $(python_urllib3_packagesdir)/ssl_match_hostname
+dist_python_urllib3_ssl_match_hostname_DATA = \
+ python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
+ python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
+ $(NULL)
+
+python_urllib3_contribdir = $(python_urllib3dir)/contrib
+dist_python_urllib3_contrib_DATA = \
+ python_modules/urllib3/contrib/__init__.py \
+ python_modules/urllib3/contrib/appengine.py \
+ python_modules/urllib3/contrib/ntlmpool.py \
+ python_modules/urllib3/contrib/pyopenssl.py \
+ python_modules/urllib3/contrib/securetransport.py \
+ python_modules/urllib3/contrib/socks.py \
+ $(NULL)
+
+python_urllib3_securetransportdir = $(python_urllib3_contribdir)/_securetransport
+dist_python_urllib3_securetransport_DATA = \
+ python_modules/urllib3/contrib/_securetransport/__init__.py \
+ python_modules/urllib3/contrib/_securetransport/bindings.py \
+ python_modules/urllib3/contrib/_securetransport/low_level.py \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/energid/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/openldap/Makefile.inc $(srcdir)/oracledb/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/riakkv/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/energid/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/openldap/Makefile.inc $(srcdir)/oracledb/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/riakkv/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pythondir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pythondir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pythonSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
+install-dist_basesDATA: $(dist_bases_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(basesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(basesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(basesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(basesdir)" || exit $$?; \
+ done
+
+uninstall-dist_basesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(basesdir)'; $(am__uninstall_files_from_dir)
+install-dist_bases_framework_servicesDATA: $(dist_bases_framework_services_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(bases_framework_servicesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(bases_framework_servicesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bases_framework_servicesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(bases_framework_servicesdir)" || exit $$?; \
+ done
+
+uninstall-dist_bases_framework_servicesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(bases_framework_servicesdir)'; $(am__uninstall_files_from_dir)
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonDATA: $(dist_python_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythondir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythondir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3DATA: $(dist_python_urllib3_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3dir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3dir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3DATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3dir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_backportsDATA: $(dist_python_urllib3_backports_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_backportsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_backportsdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_backportsDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_backportsdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_contribDATA: $(dist_python_urllib3_contrib_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_contribdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_contribdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_contribdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_contribdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_contribDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_contribdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_packagesDATA: $(dist_python_urllib3_packages_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_packagesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_packagesdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_packagesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_packagesdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_securetransportDATA: $(dist_python_urllib3_securetransport_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_securetransportDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_securetransportdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_ssl_match_hostnameDATA: $(dist_python_urllib3_ssl_match_hostname_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_ssl_match_hostnameDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_utilDATA: $(dist_python_urllib3_util_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_utildir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_utildir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_utildir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_utildir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_utilDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_utildir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonconfigDATA: $(dist_pythonconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythonconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonmodulesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonmodulesdir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonmodulesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml2dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml2dir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonyaml2DATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml3dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml3dir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonyaml3DATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir)
+install-dist_third_partyDATA: $(dist_third_party_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(third_partydir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(third_partydir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(third_partydir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(third_partydir)" || exit $$?; \
+ done
+
+uninstall-dist_third_partyDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(third_partydir)'; $(am__uninstall_files_from_dir)
+install-dist_userpythonconfigDATA: $(dist_userpythonconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(userpythonconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(userpythonconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userpythonconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(userpythonconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_userpythonconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(userpythonconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" "$(DESTDIR)$(bases_framework_servicesdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" "$(DESTDIR)$(python_urllib3_backportsdir)" "$(DESTDIR)$(python_urllib3_contribdir)" "$(DESTDIR)$(python_urllib3_packagesdir)" "$(DESTDIR)$(python_urllib3_securetransportdir)" "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" "$(DESTDIR)$(python_urllib3_utildir)" "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)" "$(DESTDIR)$(userpythonconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_basesDATA \
+ install-dist_bases_framework_servicesDATA \
+ install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
+ install-dist_pythonDATA install-dist_pythonSCRIPTS \
+ install-dist_python_urllib3DATA \
+ install-dist_python_urllib3_backportsDATA \
+ install-dist_python_urllib3_contribDATA \
+ install-dist_python_urllib3_packagesDATA \
+ install-dist_python_urllib3_securetransportDATA \
+ install-dist_python_urllib3_ssl_match_hostnameDATA \
+ install-dist_python_urllib3_utilDATA \
+ install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
+ install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
+ install-dist_third_partyDATA install-dist_userpythonconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_basesDATA \
+ uninstall-dist_bases_framework_servicesDATA \
+ uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
+ uninstall-dist_python_urllib3DATA \
+ uninstall-dist_python_urllib3_backportsDATA \
+ uninstall-dist_python_urllib3_contribDATA \
+ uninstall-dist_python_urllib3_packagesDATA \
+ uninstall-dist_python_urllib3_securetransportDATA \
+ uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
+ uninstall-dist_python_urllib3_utilDATA \
+ uninstall-dist_pythonconfigDATA \
+ uninstall-dist_pythonmodulesDATA \
+ uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
+ uninstall-dist_third_partyDATA \
+ uninstall-dist_userpythonconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_basesDATA \
+ install-dist_bases_framework_servicesDATA \
+ install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
+ install-dist_pythonDATA install-dist_pythonSCRIPTS \
+ install-dist_python_urllib3DATA \
+ install-dist_python_urllib3_backportsDATA \
+ install-dist_python_urllib3_contribDATA \
+ install-dist_python_urllib3_packagesDATA \
+ install-dist_python_urllib3_securetransportDATA \
+ install-dist_python_urllib3_ssl_match_hostnameDATA \
+ install-dist_python_urllib3_utilDATA \
+ install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
+ install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
+ install-dist_third_partyDATA install-dist_userpythonconfigDATA \
+ install-dvi install-dvi-am install-exec install-exec-am \
+ install-html install-html-am install-info install-info-am \
+ install-man install-pdf install-pdf-am install-ps \
+ install-ps-am install-strip installcheck installcheck-am \
+ installdirs maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
+ uninstall uninstall-am uninstall-dist_basesDATA \
+ uninstall-dist_bases_framework_servicesDATA \
+ uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
+ uninstall-dist_python_urllib3DATA \
+ uninstall-dist_python_urllib3_backportsDATA \
+ uninstall-dist_python_urllib3_contribDATA \
+ uninstall-dist_python_urllib3_packagesDATA \
+ uninstall-dist_python_urllib3_securetransportDATA \
+ uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
+ uninstall-dist_python_urllib3_utilDATA \
+ uninstall-dist_pythonconfigDATA \
+ uninstall-dist_pythonmodulesDATA \
+ uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
+ uninstall-dist_third_partyDATA \
+ uninstall-dist_userpythonconfigDATA
+
+.PRECIOUS: Makefile
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
index 32437c6d..d0074a12 100644
--- a/collectors/python.d.plugin/README.md
+++ b/collectors/python.d.plugin/README.md
@@ -1,13 +1,13 @@
# python.d.plugin
-`python.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `python`.
+`python.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `python`.
-1. It runs as an independent process `ps fax` shows it
-2. It is started and stopped automatically by netdata
-3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon)
-4. Supports any number of data collection **modules**
-5. Allows each **module** to have one or more data collection **jobs**
-6. Each **job** is collecting one or more metrics from a single data source
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by Netdata
+3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon)
+4. Supports any number of data collection **modules**
+5. Allows each **module** to have one or more data collection **jobs**
+6. Each **job** is collecting one or more metrics from a single data source
## Disclaimer
@@ -17,7 +17,7 @@ Module configurations are written in YAML and **pyYAML is required**.
Every configuration file must have one of two formats:
-- Configuration for only one job:
+- Configuration for only one job:
```yaml
update_every : 2 # update frequency
@@ -27,7 +27,7 @@ other_var1 : bla # variables passed to module
other_var2 : alb
```
-- Configuration for many jobs (ex. mysql):
+- Configuration for many jobs (ex. mysql):
```yaml
# module defaults:
@@ -51,6 +51,7 @@ other_job:
# become user netdata
sudo su -s /bin/bash netdata
```
+
Depending on where Netdata was installed, execute one of the following commands to trace the execution of a python module:
```
@@ -58,16 +59,18 @@ Depending on where Netdata was installed, execute one of the following commands
/opt/netdata/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
```
-Where `[module]` is the directory name under https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin
+
+Where `[module]` is the directory name under <https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin>
## How to write a new module
Writing new python module is simple. You just need to remember to include 5 major things:
-- **ORDER** global list
-- **CHART** global dictionary
-- **Service** class
-- **_get_data** method
-- all code needs to be compatible with Python 2 (**≥ 2.7**) *and* 3 (**≥ 3.1**)
+
+- **ORDER** global list
+- **CHART** global dictionary
+- **Service** class
+- **\_get_data** method
+- all code needs to be compatible with Python 2 (**≥ 2.7**) *and* 3 (**≥ 3.1**)
If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](#pull-request-checklist-for-python-plugins) beforehand to make sure you have updated all the files you need to.
@@ -76,11 +79,13 @@ For a quick start, you can look at the [example plugin](example/example.chart.py
### Global variables `ORDER` and `CHART`
`ORDER` list should contain the order of chart ids. Example:
+
```py
ORDER = ['first_chart', 'second_chart', 'third_chart']
```
`CHART` dictionary is a little bit trickier. It should contain the chart definition in following format:
+
```py
CHART = {
id: {
@@ -97,15 +102,16 @@ Parameters like `priority` and `update_every` are handled by `python.d.plugin`.
Every module needs to implement its own `Service` class. This class should inherit from one of the framework classes:
-- `SimpleService`
-- `UrlService`
-- `SocketService`
-- `LogService`
-- `ExecutableService`
+- `SimpleService`
+- `UrlService`
+- `SocketService`
+- `LogService`
+- `ExecutableService`
Also it needs to invoke the parent class constructor in a specific way as well as assign global variables to class variables.
Simple example:
+
```py
from base import UrlService
class Service(UrlService):
@@ -120,6 +126,7 @@ class Service(UrlService):
This method should grab raw data from `_get_raw_data`, parse it, and return a dictionary where keys are unique dimension names or `None` if no data is collected.
Example:
+
```py
def _get_data(self):
try:
@@ -129,12 +136,12 @@ def _get_data(self):
return None
```
-More about framework classes
-============================
+# More about framework classes
Every framework class has some user-configurable variables which are specific to this particular class. Those variables should have default values initialized in the child class constructor.
If module needs some additional user-configurable variable, it can be accessed from the `self.configuration` list and assigned in constructor or custom `check` method. Example:
+
```py
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
@@ -153,11 +160,12 @@ _This is last resort class, if a new module cannot be written by using other fra
_Example: `ceph`, `sensors`_
It is the lowest-level class which implements most of module logic, like:
-- threading
-- handling run times
-- chart formatting
-- logging
-- chart creation and updating
+
+- threading
+- handling run times
+- chart formatting
+- logging
+- chart creation and updating
### `LogService`
@@ -174,11 +182,12 @@ _Examples: `exim`, `postfix`_
_Variable from config file_: `command`.
This allows to execute a shell command in a secure way. It will check for invalid characters in `command` variable and won't proceed if there is one of:
-- '&'
-- '|'
-- ';'
-- '>'
-- '<'
+
+- '&'
+- '|'
+- ';'
+- '>'
+- '\<'
For additional security it uses python `subprocess.Popen` (without `shell=True` option) to execute command. Command can be specified with absolute or relative name. When using relative name, it will try to find `command` in `PATH` environment variable as well as in `/sbin` and `/usr/sbin`.
@@ -214,12 +223,12 @@ This is a generic checklist for submitting a new Python plugin for Netdata. It
At minimum, to be buildable and testable, the PR needs to include:
-* The module itself, following proper naming conventions: `python.d/<module_dir>/<module_name>.chart.py`
-* A README.md file for the plugin under `python.d/<module_dir>`.
-* The configuration file for the module: `conf.d/python.d/<module_name>.conf`. Python config files are in YAML format, and should include comments describing what options are present. The instructions are also needed in the configuration section of the README.md
-* A basic configuration for the plugin in the appropriate global config file: `conf.d/python.d.conf`, which is also in YAML format. Either add a line that reads `# <module_name>: yes` if the module is to be enabled by default, or one that reads `<module_name>: no` if it is to be disabled by default.
-* A line for the plugin in `python.d/Makefile.am` under `dist_python_DATA`.
-* A line for the plugin configuration file in `conf.d/Makefile.am`, under `dist_pythonconfig_DATA`
-* Optionally, chart information in `web/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
+- The module itself, following proper naming conventions: `python.d/<module_dir>/<module_name>.chart.py`
+- A README.md file for the plugin under `python.d/<module_dir>`.
+- The configuration file for the module: `conf.d/python.d/<module_name>.conf`. Python config files are in YAML format, and should include comments describing what options are present. The instructions are also needed in the configuration section of the README.md
+- A basic configuration for the plugin in the appropriate global config file: `conf.d/python.d.conf`, which is also in YAML format. Either add a line that reads `# <module_name>: yes` if the module is to be enabled by default, or one that reads `<module_name>: no` if it is to be disabled by default.
+- A line for the plugin in `python.d/Makefile.am` under `dist_python_DATA`.
+- A line for the plugin configuration file in `conf.d/Makefile.am`, under `dist_pythonconfig_DATA`
+- Optionally, chart information in `web/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md
index 682280f2..127d595b 100644
--- a/collectors/python.d.plugin/adaptec_raid/README.md
+++ b/collectors/python.d.plugin/adaptec_raid/README.md
@@ -3,35 +3,39 @@
Module collects logical and physical devices health metrics.
**Requirements:**
-* `arcconf` program
-* `sudo` program
-* `netdata` user needs to be able to sudo the `arcconf` program without password
+
+- `arcconf` program
+- `sudo` program
+- `netdata` user needs to be able to sudo the `arcconf` program without password
To grab stats it executes:
- * `sudo -n arcconf GETCONFIG 1 LD`
- * `sudo -n arcconf GETCONFIG 1 PD`
+- `sudo -n arcconf GETCONFIG 1 LD`
+- `sudo -n arcconf GETCONFIG 1 PD`
It produces:
-1. **Logical Device Status**
+1. **Logical Device Status**
+
+2. **Physical Device State**
-2. **Physical Device State**
+3. **Physical Device S.M.A.R.T warnings**
-3. **Physical Device S.M.A.R.T warnings**
+4. **Physical Device Temperature**
-4. **Physical Device Temperature**
+## prerequisite
-### prerequisite
This module uses `arcconf` which can only be executed by root. It uses
`sudo` and assumes that it is configured such that the `netdata` user can
execute `arcconf` as root without password.
Add to `sudoers`:
- netdata ALL=(root) NOPASSWD: /path/to/arcconf
+```
+netdata ALL=(root) NOPASSWD: /path/to/arcconf
+```
-### configuration
+## configuration
**adaptec_raid** is disabled by default. Should be explicitly enabled in `python.d.conf`.
@@ -39,10 +43,10 @@ Add to `sudoers`:
adaptec_raid: yes
```
-#### Screenshot:
+### Screenshot:
![image](https://user-images.githubusercontent.com/22274335/47278133-6d306680-d601-11e8-87c2-cc9c0f42d686.png)
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fadaptec_raid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fadaptec_raid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md
index 090feb07..8f0ec0c1 100644
--- a/collectors/python.d.plugin/apache/README.md
+++ b/collectors/python.d.plugin/apache/README.md
@@ -3,38 +3,47 @@
This module will monitor one or more Apache servers depending on configuration.
**Requirements:**
- * apache with enabled `mod_status`
+
+- apache with enabled `mod_status`
It produces the following charts:
-1. **Requests** in requests/s
- * requests
+1. **Requests** in requests/s
+
+ - requests
+
+2. **Connections**
+
+ - connections
+
+3. **Async Connections**
+
+ - keepalive
+ - closing
+ - writing
+
+4. **Bandwidth** in kilobytes/s
+
+ - sent
+
+5. **Workers**
-2. **Connections**
- * connections
+ - idle
+ - busy
-3. **Async Connections**
- * keepalive
- * closing
- * writing
+6. **Lifetime Avg. Requests/s** in requests/s
-4. **Bandwidth** in kilobytes/s
- * sent
+ - requests_sec
-5. **Workers**
- * idle
- * busy
+7. **Lifetime Avg. Bandwidth/s** in kilobytes/s
-6. **Lifetime Avg. Requests/s** in requests/s
- * requests_sec
+ - size_sec
-7. **Lifetime Avg. Bandwidth/s** in kilobytes/s
- * size_sec
+8. **Lifetime Avg. Response Size** in bytes/request
-8. **Lifetime Avg. Response Size** in bytes/request
- * size_req
+ - size_req
-### configuration
+## configuration
Needs only `url` to server's `server-status?auto`
@@ -56,4 +65,4 @@ Without configuration, module attempts to connect to `http://localhost/server-st
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fapache%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fapache%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md
index 8daa3660..c93dfa0d 100644
--- a/collectors/python.d.plugin/beanstalk/README.md
+++ b/collectors/python.d.plugin/beanstalk/README.md
@@ -3,93 +3,107 @@
Module provides server and tube-level statistics:
**Requirements:**
- * `python-beanstalkc`
+
+- `python-beanstalkc`
**Server statistics:**
-1. **Cpu usage** in cpu time
- * user
- * system
-
-2. **Jobs rate** in jobs/s
- * total
- * timeouts
-
-3. **Connections rate** in connections/s
- * connections
-
-4. **Commands rate** in commands/s
- * put
- * peek
- * peek-ready
- * peek-delayed
- * peek-buried
- * reserve
- * use
- * watch
- * ignore
- * delete
- * release
- * bury
- * kick
- * stats
- * stats-job
- * stats-tube
- * list-tubes
- * list-tube-used
- * list-tubes-watched
- * pause-tube
-
-5. **Current tubes** in tubes
- * tubes
-
-6. **Current jobs** in jobs
- * urgent
- * ready
- * reserved
- * delayed
- * buried
-
-7. **Current connections** in connections
- * written
- * producers
- * workers
- * waiting
-
-8. **Binlog** in records/s
- * written
- * migrated
-
-9. **Uptime** in seconds
- * uptime
+1. **Cpu usage** in cpu time
+
+ - user
+ - system
+
+2. **Jobs rate** in jobs/s
+
+ - total
+ - timeouts
+
+3. **Connections rate** in connections/s
+
+ - connections
+
+4. **Commands rate** in commands/s
+
+ - put
+ - peek
+ - peek-ready
+ - peek-delayed
+ - peek-buried
+ - reserve
+ - use
+ - watch
+ - ignore
+ - delete
+ - release
+ - bury
+ - kick
+ - stats
+ - stats-job
+ - stats-tube
+ - list-tubes
+ - list-tube-used
+ - list-tubes-watched
+ - pause-tube
+
+5. **Current tubes** in tubes
+
+ - tubes
+
+6. **Current jobs** in jobs
+
+ - urgent
+ - ready
+ - reserved
+ - delayed
+ - buried
+
+7. **Current connections** in connections
+
+ - written
+ - producers
+ - workers
+ - waiting
+
+8. **Binlog** in records/s
+
+ - written
+ - migrated
+
+9. **Uptime** in seconds
+
+ - uptime
**Per tube statistics:**
-1. **Jobs rate** in jobs/s
- * jobs
+1. **Jobs rate** in jobs/s
+
+ - jobs
+
+2. **Jobs** in jobs
+
+ - using
+ - ready
+ - reserved
+ - delayed
+ - buried
+
+3. **Connections** in connections
-2. **Jobs** in jobs
- * using
- * ready
- * reserved
- * delayed
- * buried
+ - using
+ - waiting
+ - watching
-3. **Connections** in connections
- * using
- * waiting
- * watching
+4. **Commands** in commands/s
-4. **Commands** in commands/s
- * deletes
- * pauses
+ - deletes
+ - pauses
-5. **Pause** in seconds
- * since
- * left
+5. **Pause** in seconds
+ - since
+ - left
-### configuration
+## configuration
Sample:
@@ -102,4 +116,4 @@ If no configuration is given, module will attempt to connect to beanstalkd on `1
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbeanstalk%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbeanstalk%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md
index fefe7493..021a5d66 100644
--- a/collectors/python.d.plugin/bind_rndc/README.md
+++ b/collectors/python.d.plugin/bind_rndc/README.md
@@ -3,50 +3,53 @@
Module parses bind dump file to collect real-time performance metrics
**Requirements:**
- * Version of bind must be 9.6 +
- * Netdata must have permissions to run `rndc stats`
+
+- Version of bind must be 9.6 +
+- Netdata must have permissions to run `rndc stats`
It produces:
-1. **Name server statistics**
- * requests
- * responses
- * success
- * auth_answer
- * nonauth_answer
- * nxrrset
- * failure
- * nxdomain
- * recursion
- * duplicate
- * rejections
-
-2. **Incoming queries**
- * RESERVED0
- * A
- * NS
- * CNAME
- * SOA
- * PTR
- * MX
- * TXT
- * X25
- * AAAA
- * SRV
- * NAPTR
- * A6
- * DS
- * RSIG
- * DNSKEY
- * SPF
- * ANY
- * DLV
-
-3. **Outgoing queries**
- * Same as Incoming queries
-
-
-### configuration
+1. **Name server statistics**
+
+ - requests
+ - responses
+ - success
+ - auth_answer
+ - nonauth_answer
+ - nxrrset
+ - failure
+ - nxdomain
+ - recursion
+ - duplicate
+ - rejections
+
+2. **Incoming queries**
+
+ - RESERVED0
+ - A
+ - NS
+ - CNAME
+ - SOA
+ - PTR
+ - MX
+ - TXT
+ - X25
+ - AAAA
+ - SRV
+ - NAPTR
+ - A6
+ - DS
+ - RSIG
+ - DNSKEY
+ - SPF
+ - ANY
+ - DLV
+
+3. **Outgoing queries**
+
+- Same as Incoming queries
+
+## configuration
Sample:
@@ -59,4 +62,4 @@ If no configuration is given, module will attempt to read named.stats file at `
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbind_rndc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbind_rndc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md
index 0f0aa1c6..260ae54b 100644
--- a/collectors/python.d.plugin/boinc/README.md
+++ b/collectors/python.d.plugin/boinc/README.md
@@ -7,7 +7,7 @@ RPC interface that the BOINC monitoring GUI does.
It provides charts tracking the total number of tasks and active tasks,
as well as ones tracking each of the possible states for tasks.
-### configuration
+## configuration
BOINC requires use of a password to access it's RPC interface. You can
find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
@@ -27,4 +27,4 @@ remote:
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fboinc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fboinc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md
index 9eecb223..f5b36e14 100644
--- a/collectors/python.d.plugin/ceph/README.md
+++ b/collectors/python.d.plugin/ceph/README.md
@@ -4,25 +4,27 @@ This module monitors the ceph cluster usage and consumption data of a server.
It produces:
-* Cluster statistics (usage, available, latency, objects, read/write rate)
-* OSD usage
-* OSD latency
-* Pool usage
-* Pool read/write operations
-* Pool read/write rate
-* number of objects per pool
+- Cluster statistics (usage, available, latency, objects, read/write rate)
+- OSD usage
+- OSD latency
+- Pool usage
+- Pool read/write operations
+- Pool read/write rate
+- number of objects per pool
**Requirements:**
-- `rados` python module
-- Granting read permissions to ceph group from keyring file
+- `rados` python module
+- Granting read permissions to ceph group from keyring file
+
```shell
# chmod 640 /etc/ceph/ceph.client.admin.keyring
```
-### Configuration
+## Configuration
Sample:
+
```yaml
local:
config_file: '/etc/ceph/ceph.conf'
@@ -31,4 +33,4 @@ local:
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fceph%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fceph%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/chrony/README.md b/collectors/python.d.plugin/chrony/README.md
index 67ed1a05..a45adb33 100644
--- a/collectors/python.d.plugin/chrony/README.md
+++ b/collectors/python.d.plugin/chrony/README.md
@@ -4,21 +4,22 @@ This module monitors the precision and statistics of a local chronyd server.
It produces:
-* frequency
-* last offset
-* RMS offset
-* residual freq
-* root delay
-* root dispersion
-* skew
-* system time
+- frequency
+- last offset
+- RMS offset
+- residual freq
+- root delay
+- root dispersion
+- skew
+- system time
**Requirements:**
-Verify that user netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`.
+Verify that user Netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`.
-### Configuration
+## Configuration
Sample:
+
```yaml
# data collection frequency:
update_every: 1
@@ -30,4 +31,4 @@ local:
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fchrony%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fchrony%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md
index 2cc353ed..28897067 100644
--- a/collectors/python.d.plugin/couchdb/README.md
+++ b/collectors/python.d.plugin/couchdb/README.md
@@ -2,18 +2,19 @@
This module monitors vital statistics of a local Apache CouchDB 2.x server, including:
-* Overall server reads/writes
-* HTTP traffic breakdown
- * Request methods (`GET`, `PUT`, `POST`, etc.)
- * Response status codes (`200`, `201`, `4xx`, etc.)
-* Active server tasks
-* Replication status (CouchDB 2.1 and up only)
-* Erlang VM stats
-* Optional per-database statistics: sizes, # of docs, # of deleted docs
+- Overall server reads/writes
+- HTTP traffic breakdown
+ - Request methods (`GET`, `PUT`, `POST`, etc.)
+ - Response status codes (`200`, `201`, `4xx`, etc.)
+- Active server tasks
+- Replication status (CouchDB 2.1 and up only)
+- Erlang VM stats
+- Optional per-database statistics: sizes, # of docs, # of deleted docs
-### Configuration
+## Configuration
Sample for a local server running on port 5984:
+
```yaml
local:
user: 'admin'
@@ -26,6 +27,7 @@ Be sure to specify a correct admin-level username and password.
You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server.
If you want per-database statistics, these need to be added to the configuration, separated by spaces:
+
```yaml
local:
...
@@ -34,4 +36,4 @@ local:
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcouchdb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcouchdb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md
index 73d70d3a..ebf34a3d 100644
--- a/collectors/python.d.plugin/dns_query_time/README.md
+++ b/collectors/python.d.plugin/dns_query_time/README.md
@@ -3,10 +3,11 @@
This module provides DNS query time statistics.
**Requirement:**
-* `python-dnspython` package
+
+- `python-dnspython` package
It produces one aggregate chart or one chart per DNS server, showing the query time.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdns_query_time%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdns_query_time%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md
index c7647a11..fecf4a5d 100644
--- a/collectors/python.d.plugin/dnsdist/README.md
+++ b/collectors/python.d.plugin/dnsdist/README.md
@@ -4,42 +4,48 @@ Module monitor dnsdist performance and health metrics.
Following charts are drawn:
-1. **Response latency**
- * latency-slow
- * latency100-1000
- * latency50-100
- * latency10-50
- * latency1-10
- * latency0-1
-
-2. **Cache performance**
- * cache-hits
- * cache-misses
-
-3. **ACL events**
- * acl-drops
- * rule-drop
- * rule-nxdomain
- * rule-refused
-
-4. **Noncompliant data**
- * empty-queries
- * no-policy
- * noncompliant-queries
- * noncompliant-responses
-
-5. **Queries**
- * queries
- * rdqueries
- * rdqueries
-
-6. **Health**
- * downstream-send-errors
- * downstream-timeouts
- * servfail-responses
- * trunc-failures
-
-### configuration
+1. **Response latency**
+
+ - latency-slow
+ - latency100-1000
+ - latency50-100
+ - latency10-50
+ - latency1-10
+ - latency0-1
+
+2. **Cache performance**
+
+ - cache-hits
+ - cache-misses
+
+3. **ACL events**
+
+ - acl-drops
+ - rule-drop
+ - rule-nxdomain
+ - rule-refused
+
+4. **Noncompliant data**
+
+ - empty-queries
+ - no-policy
+ - noncompliant-queries
+ - noncompliant-responses
+
+5. **Queries**
+
+ - queries
+ - rdqueries
+ - rdqueries
+
+6. **Health**
+
+ - downstream-send-errors
+ - downstream-timeouts
+ - servfail-responses
+ - trunc-failures
+
+## configuration
```yaml
localhost:
@@ -53,4 +59,4 @@ localhost:
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdnsdist%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdnsdist%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md
index b09a5d59..ec69262f 100644
--- a/collectors/python.d.plugin/dockerd/README.md
+++ b/collectors/python.d.plugin/dockerd/README.md
@@ -3,26 +3,30 @@
Module monitor docker health metrics.
**Requirement:**
-* `docker` package, required version 3.2.0+
+
+- `docker` package, required version 3.2.0+
Following charts are drawn:
-1. **running containers**
- * count
+1. **running containers**
+
+ - count
+
+2. **healthy containers**
+
+ - count
-2. **healthy containers**
- * count
+3. **unhealthy containers**
-3. **unhealthy containers**
- * count
+ - count
-### configuration
+## configuration
```yaml
update_every : 1
priority : 60000
- ```
+```
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdockerd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdockerd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md
index de8788b3..6048f1a6 100644
--- a/collectors/python.d.plugin/dovecot/README.md
+++ b/collectors/python.d.plugin/dovecot/README.md
@@ -9,55 +9,67 @@ Module isn't compatible with new statistic api (v2.3), but you are still able to
by following [upgrading steps.](https://wiki2.dovecot.org/Upgrading/2.3).
**Requirement:**
-Dovecot UNIX socket with R/W permissions for user netdata or Dovecot with configured TCP/IP socket.
+Dovecot UNIX socket with R/W permissions for user `netdata` or Dovecot with configured TCP/IP socket.
Module gives information with following charts:
-1. **sessions**
- * active sessions
+1. **sessions**
-2. **logins**
- * logins
+ - active sessions
-3. **commands** - number of IMAP commands
- * commands
+2. **logins**
-4. **Faults**
- * minor
- * major
+ - logins
-5. **Context Switches**
- * volountary
- * involountary
+3. **commands** - number of IMAP commands
-6. **disk** in bytes/s
- * read
- * write
+ - commands
-7. **bytes** in bytes/s
- * read
- * write
+4. **Faults**
-8. **number of syscalls** in syscalls/s
- * read
- * write
+ - minor
+ - major
-9. **lookups** - number of lookups per second
- * path
- * attr
+5. **Context Switches**
+
+ - volountary
+ - involountary
+
+6. **disk** in bytes/s
+
+ - read
+ - write
+
+7. **bytes** in bytes/s
+
+ - read
+ - write
+
+8. **number of syscalls** in syscalls/s
+
+ - read
+ - write
+
+9. **lookups** - number of lookups per second
+
+ - path
+ - attr
10. **hits** - number of cache hits
- * hits
+
+ - hits
11. **attempts** - authorization attempts
- * success
- * failure
+
+ - success
+ - failure
12. **cache** - cached authorization hits
- * hit
- * miss
-### configuration
+ - hit
+ - miss
+
+## configuration
Sample:
@@ -76,4 +88,4 @@ If no configuration is given, module will attempt to connect to dovecot using un
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdovecot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdovecot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md
index 6d25b02d..a719a943 100644
--- a/collectors/python.d.plugin/elasticsearch/README.md
+++ b/collectors/python.d.plugin/elasticsearch/README.md
@@ -4,46 +4,54 @@ This module monitors Elasticsearch performance and health metrics.
It produces:
-1. **Search performance** charts:
- * Number of queries, fetches
- * Time spent on queries, fetches
- * Query and fetch latency
-
-2. **Indexing performance** charts:
- * Number of documents indexed, index refreshes, flushes
- * Time spent on indexing, refreshing, flushing
- * Indexing and flushing latency
-
-3. **Memory usage and garbace collection** charts:
- * JVM heap currently in use, committed
- * Count of garbage collections
- * Time spent on garbage collections
-
-4. **Host metrics** charts:
- * Available file descriptors in percent
- * Opened HTTP connections
- * Cluster communication transport metrics
-
-5. **Queues and rejections** charts:
- * Number of queued/rejected threads in thread pool
-
-6. **Fielddata cache** charts:
- * Fielddata cache size
- * Fielddata evictions and circuit breaker tripped count
-
-7. **Cluster health API** charts:
- * Cluster status
- * Nodes and tasks statistics
- * Shards statistics
-
-8. **Cluster stats API** charts:
- * Nodes statistics
- * Query cache statistics
- * Docs statistics
- * Store statistics
- * Indices and shards statistics
-
-### configuration
+1. **Search performance** charts:
+
+ - Number of queries, fetches
+ - Time spent on queries, fetches
+ - Query and fetch latency
+
+2. **Indexing performance** charts:
+
+ - Number of documents indexed, index refreshes, flushes
+ - Time spent on indexing, refreshing, flushing
+ - Indexing and flushing latency
+
+3. **Memory usage and garbace collection** charts:
+
+ - JVM heap currently in use, committed
+ - Count of garbage collections
+ - Time spent on garbage collections
+
+4. **Host metrics** charts:
+
+ - Available file descriptors in percent
+ - Opened HTTP connections
+ - Cluster communication transport metrics
+
+5. **Queues and rejections** charts:
+
+ - Number of queued/rejected threads in thread pool
+
+6. **Fielddata cache** charts:
+
+ - Fielddata cache size
+ - Fielddata evictions and circuit breaker tripped count
+
+7. **Cluster health API** charts:
+
+ - Cluster status
+ - Nodes and tasks statistics
+ - Shards statistics
+
+8. **Cluster stats API** charts:
+
+ - Nodes statistics
+ - Query cache statistics
+ - Docs statistics
+ - Store statistics
+ - Indices and shards statistics
+
+## configuration
Sample:
@@ -59,4 +67,4 @@ If no configuration is given, module will fail to run.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Felasticsearch%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Felasticsearch%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/energid/README.md b/collectors/python.d.plugin/energid/README.md
index 645170e8..fc510159 100644
--- a/collectors/python.d.plugin/energid/README.md
+++ b/collectors/python.d.plugin/energid/README.md
@@ -43,6 +43,7 @@ long daemon startup.
## Configuration
Sample:
+
```yaml
energi:
host: '127.0.0.1'
@@ -59,4 +60,4 @@ bitcoin:
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fenergid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fenergid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md
index 552f2e41..699ebe69 100644
--- a/collectors/python.d.plugin/example/README.md
+++ b/collectors/python.d.plugin/example/README.md
@@ -3,4 +3,4 @@
An example python data collection module.
You can use this example to help you [write a new Python module](../#how-to-write-a-new-module).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexample%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexample%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md
index 1cebb27f..985bd6e3 100644
--- a/collectors/python.d.plugin/exim/README.md
+++ b/collectors/python.d.plugin/exim/README.md
@@ -5,11 +5,12 @@ This command can take a lot of time to finish its execution thus it is not recom
It produces only one chart:
-1. **Exim Queue Emails**
- * emails
+1. **Exim Queue Emails**
+
+ - emails
Configuration is not needed.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md
index 26511986..1ab0f6f6 100644
--- a/collectors/python.d.plugin/fail2ban/README.md
+++ b/collectors/python.d.plugin/fail2ban/README.md
@@ -3,11 +3,12 @@
Module monitor fail2ban log file to show all bans for all active jails
**Requirements:**
- * fail2ban.log file MUST BE readable by netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d)
+
+- fail2ban.log file MUST BE readable by Netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d)
It produces one chart with multiple lines (one line per jail)
-### configuration
+## configuration
Sample:
@@ -17,9 +18,10 @@ local:
conf_path: '/etc/fail2ban/jail.local'
exclude: 'dropbear apache'
```
+
If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`.
If conf file is not found default jail is `ssh`.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffail2ban%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffail2ban%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md
index 00eb50df..3a2cdf9b 100644
--- a/collectors/python.d.plugin/freeradius/README.md
+++ b/collectors/python.d.plugin/freeradius/README.md
@@ -4,44 +4,47 @@ Uses the `radclient` command to provide freeradius statistics. It is not recomme
It produces:
-1. **Authentication counters:**
- * access-accepts
- * access-rejects
- * auth-dropped-requests
- * auth-duplicate-requests
- * auth-invalid-requests
- * auth-malformed-requests
- * auth-unknown-types
-
-2. **Accounting counters:** [optional]
- * accounting-requests
- * accounting-responses
- * acct-dropped-requests
- * acct-duplicate-requests
- * acct-invalid-requests
- * acct-malformed-requests
- * acct-unknown-types
-
-3. **Proxy authentication counters:** [optional]
- * proxy-access-accepts
- * proxy-access-rejects
- * proxy-auth-dropped-requests
- * proxy-auth-duplicate-requests
- * proxy-auth-invalid-requests
- * proxy-auth-malformed-requests
- * proxy-auth-unknown-types
-
-4. **Proxy accounting counters:** [optional]
- * proxy-accounting-requests
- * proxy-accounting-responses
- * proxy-acct-dropped-requests
- * proxy-acct-duplicate-requests
- * proxy-acct-invalid-requests
- * proxy-acct-malformed-requests
- * proxy-acct-unknown-typesa
-
-
-### configuration
+1. **Authentication counters:**
+
+ - access-accepts
+ - access-rejects
+ - auth-dropped-requests
+ - auth-duplicate-requests
+ - auth-invalid-requests
+ - auth-malformed-requests
+ - auth-unknown-types
+
+2. **Accounting counters:** [optional]
+
+ - accounting-requests
+ - accounting-responses
+ - acct-dropped-requests
+ - acct-duplicate-requests
+ - acct-invalid-requests
+ - acct-malformed-requests
+ - acct-unknown-types
+
+3. **Proxy authentication counters:** [optional]
+
+ - proxy-access-accepts
+ - proxy-access-rejects
+ - proxy-auth-dropped-requests
+ - proxy-auth-duplicate-requests
+ - proxy-auth-invalid-requests
+ - proxy-auth-malformed-requests
+ - proxy-auth-unknown-types
+
+4. **Proxy accounting counters:** [optional]
+
+ - proxy-accounting-requests
+ - proxy-accounting-responses
+ - proxy-acct-dropped-requests
+ - proxy-acct-duplicate-requests
+ - proxy-acct-invalid-requests
+ - proxy-acct-malformed-requests
+ - proxy-acct-unknown-typesa
+
+## configuration
Sample:
@@ -62,11 +65,12 @@ By default, server is enabled and can be queried from every client.
FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
- * cd sites-enabled
- * ln -s ../sites-available/status status
+
+- cd sites-enabled
+- ln -s ../sites-available/status status
and restart/reload your FREERADIUS server.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffreeradius%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffreeradius%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md
index 3942a7be..7d78fabd 100644
--- a/collectors/python.d.plugin/go_expvar/README.md
+++ b/collectors/python.d.plugin/go_expvar/README.md
@@ -7,28 +7,34 @@ The `go_expvar` module can monitor any Go application that exposes its metrics w
For the memory statistics, it produces the following charts:
-1. **Heap allocations** in kB
- * alloc: size of objects allocated on the heap
- * inuse: size of allocated heap spans
+1. **Heap allocations** in kB
-2. **Stack allocations** in kB
- * inuse: size of allocated stack spans
+ - alloc: size of objects allocated on the heap
+ - inuse: size of allocated heap spans
-3. **MSpan allocations** in kB
- * inuse: size of allocated mspan structures
+2. **Stack allocations** in kB
-4. **MCache allocations** in kB
- * inuse: size of allocated mcache structures
+ - inuse: size of allocated stack spans
-5. **Virtual memory** in kB
- * sys: size of reserved virtual address space
+3. **MSpan allocations** in kB
-6. **Live objects**
- * live: number of live objects in memory
+ - inuse: size of allocated mspan structures
-7. **GC pauses average** in ns
- * avg: average duration of all GC stop-the-world pauses
+4. **MCache allocations** in kB
+ - inuse: size of allocated mcache structures
+
+5. **Virtual memory** in kB
+
+ - sys: size of reserved virtual address space
+
+6. **Live objects**
+
+ - live: number of live objects in memory
+
+7. **GC pauses average** in ns
+
+ - avg: average duration of all GC stop-the-world pauses
## Monitoring Go Applications
@@ -102,9 +108,9 @@ Apart from the runtime memory stats, this application publishes two counters and
number of currently running Goroutines and updates these stats every second.
In the next section, we will cover how to monitor and chart these exposed stats with
-the use of `netdata`s ```go_expvar``` module.
+the use of `netdata`s `go_expvar` module.
-### Using netdata go_expvar module
+### Using Netdata go_expvar module
The `go_expvar` module is disabled by default. To enable it, edit [`python.d.conf`](../python.d.conf)
(to edit it on your system run `/etc/netdata/edit-config python.d.conf`), and change the `go_expvar`
@@ -141,22 +147,30 @@ app1:
Let's go over each of the defined options:
- name: 'app1'
+```
+name: 'app1'
+```
-This is the job name that will appear at the netdata dashboard.
+This is the job name that will appear at the Netdata dashboard.
If not defined, the job_name (top level key) will be used.
- url: 'http://127.0.0.1:8080/debug/vars'
+```
+url: 'http://127.0.0.1:8080/debug/vars'
+```
This is the URL of the expvar endpoint. As the expvar handler can be installed
in a custom path, the whole URL has to be specified. This value is mandatory.
- collect_memstats: true
+```
+collect_memstats: true
+```
Whether to enable collecting stats about Go runtime's memory. You can find more
information about the exposed values at the [runtime package docs](https://golang.org/pkg/runtime/#MemStats).
- extra_charts: {}
+```
+extra_charts: {}
+```
Enables the user to specify custom expvars to monitor and chart.
Will be explained in more detail below.
@@ -164,52 +178,58 @@ Will be explained in more detail below.
**Note: if `collect_memstats` is disabled and no `extra_charts` are defined, the plugin will
disable itself, as there will be no data to collect!**
-Apart from these options, each job supports options inherited from netdata's `python.d.plugin`
+Apart from these options, each job supports options inherited from Netdata's `python.d.plugin`
and its base `UrlService` class. These are:
- update_every: 1 # the job's data collection frequency
- priority: 60000 # the job's order on the dashboard
- user: admin # use when the expvar endpoint is protected by HTTP Basic Auth
- password: sekret # use when the expvar endpoint is protected by HTTP Basic Auth
+```
+update_every: 1 # the job's data collection frequency
+priority: 60000 # the job's order on the dashboard
+user: admin # use when the expvar endpoint is protected by HTTP Basic Auth
+password: sekret # use when the expvar endpoint is protected by HTTP Basic Auth
+```
### Monitoring custom vars with go_expvar
-Now, memory stats might be useful, but what if you want netdata to monitor some custom values
+Now, memory stats might be useful, but what if you want Netdata to monitor some custom values
that your Go application exposes? The `go_expvar` module can do that as well with the use of
the `extra_charts` configuration variable.
-The `extra_charts` variable is a YaML list of netdata chart definitions.
+The `extra_charts` variable is a YaML list of Netdata chart definitions.
Each chart definition has the following keys:
- id: netdata chart ID
- options: a key-value mapping of chart options
- lines: a list of line definitions
+```
+id: Netdata chart ID
+options: a key-value mapping of chart options
+lines: a list of line definitions
+```
**Note: please do not use dots in the chart or line ID field.
See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**
-Please see these two links to the official netdata documentation for more information about the values:
+Please see these two links to the official Netdata documentation for more information about the values:
-- [External plugins - charts](../../plugins.d/#chart)
-- [Chart variables](../#global-variables-order-and-chart)
+- [External plugins - charts](../../plugins.d/#chart)
+- [Chart variables](../#global-variables-order-and-chart)
**Line definitions**
Each chart can define multiple lines (dimensions).
A line definition is a key-value mapping of line options.
Each line can have the following options:
-
- # mandatory
- expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint
- expvar_type: value type; supported are "float" or "int"
- id: the id of this line/dimension in netdata
-
- # optional - netdata defaults are used if these options are not defined
- name: ''
- algorithm: absolute
- multiplier: 1
- divisor: 100 if expvar_type == float, 1 if expvar_type == int
- hidden: False
+
+```
+# mandatory
+expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint
+expvar_type: value type; supported are "float" or "int"
+id: the id of this line/dimension in Netdata
+
+# optional - Netdata defaults are used if these options are not defined
+name: ''
+algorithm: absolute
+multiplier: 1
+divisor: 100 if expvar_type == float, 1 if expvar_type == int
+hidden: False
+```
Please see the following link for more information about the options and their default values:
[External plugins - dimensions](../../plugins.d/#dimension)
@@ -219,6 +239,7 @@ All dicts in the resulting JSON document are then flattened to one level.
Expvar names are joined together with '.' when flattening.
Example:
+
```
{
"counters": {"cnt1": 1042, "cnt2": 1512.9839999999983},
@@ -267,11 +288,10 @@ app1:
**Netdata charts example**
-The images below show how do the final charts in netdata look.
+The images below show how do the final charts in Netdata look.
![Memory stats charts](https://cloud.githubusercontent.com/assets/15180106/26762052/62b4af58-493b-11e7-9e69-146705acfc2c.png)
![Custom charts](https://cloud.githubusercontent.com/assets/15180106/26762051/62ae915e-493b-11e7-8518-bd25a3886650.png)
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fgo_expvar%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fgo_expvar%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
index 4bd80a23..c4bb0447 100644
--- a/collectors/python.d.plugin/haproxy/README.md
+++ b/collectors/python.d.plugin/haproxy/README.md
@@ -6,27 +6,29 @@ And health metrics such as backend servers status (server check should be used).
Plugin can obtain data from url **OR** unix socket.
**Requirement:**
-Socket MUST be readable AND writable by netdata user.
+Socket MUST be readable AND writable by the `netdata` user.
It produces:
-1. **Frontend** family charts
- * Kilobytes in/s
- * Kilobytes out/s
- * Sessions current
- * Sessions in queue current
+1. **Frontend** family charts
-2. **Backend** family charts
- * Kilobytes in/s
- * Kilobytes out/s
- * Sessions current
- * Sessions in queue current
+ - Kilobytes in/s
+ - Kilobytes out/s
+ - Sessions current
+ - Sessions in queue current
-3. **Health** chart
- * number of failed servers for every backend (in DOWN state)
+2. **Backend** family charts
+ - Kilobytes in/s
+ - Kilobytes out/s
+ - Sessions current
+ - Sessions in queue current
-### configuration
+3. **Health** chart
+
+ - number of failed servers for every backend (in DOWN state)
+
+## configuration
Sample:
@@ -48,4 +50,4 @@ If no configuration is given, module will fail to run.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhaproxy%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhaproxy%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md
index d9f254d5..03474c89 100644
--- a/collectors/python.d.plugin/hddtemp/README.md
+++ b/collectors/python.d.plugin/hddtemp/README.md
@@ -7,7 +7,7 @@ Running `hddtemp` in daemonized mode with access on tcp port
It produces one chart **Temperature** with dynamic number of dimensions (one per disk)
-### configuration
+## configuration
Sample:
@@ -21,4 +21,4 @@ If no configuration is given, module will attempt to connect to hddtemp daemon o
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhddtemp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhddtemp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md
index 4cd024d1..99b28cfe 100644
--- a/collectors/python.d.plugin/httpcheck/README.md
+++ b/collectors/python.d.plugin/httpcheck/README.md
@@ -4,18 +4,20 @@ Module monitors remote http server for availability and response time.
Following charts are drawn per job:
-1. **Response time** ms
- * Time in 0.1 ms resolution in which the server responds.
- If the connection failed, the value is missing.
+1. **Response time** ms
-2. **Status** boolean
- * Connection successful
- * Unexpected content: No Regex match found in the response
- * Unexpected status code: Do we get 500 errors?
- * Connection failed: port not listening or blocked
- * Connection timed out: host or port unreachable
+ - Time in 0.1 ms resolution in which the server responds.
+ If the connection failed, the value is missing.
-### configuration
+2. **Status** boolean
+
+ - Connection successful
+ - Unexpected content: No Regex match found in the response
+ - Unexpected status code: Do we get 500 errors?
+ - Connection failed: port not listening or blocked
+ - Connection timed out: host or port unreachable
+
+## configuration
Sample configuration and their default values.
@@ -32,12 +34,12 @@ server:
### notes
- * The status chart is primarily intended for alarms, badges or for access via API.
- * A system/service/firewall might block netdata's access if a portscan or
- similar is detected.
- * This plugin is meant for simple use cases. Currently, the accuracy of the
- response time is low and should be used as reference only.
+- The status chart is primarily intended for alarms, badges or for access via API.
+- A system/service/firewall might block Netdata's access if a portscan or
+ similar is detected.
+- This plugin is meant for simple use cases. Currently, the accuracy of the
+ response time is low and should be used as reference only.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhttpcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhttpcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md
index 068da6a0..eabfee0a 100644
--- a/collectors/python.d.plugin/icecast/README.md
+++ b/collectors/python.d.plugin/icecast/README.md
@@ -3,14 +3,16 @@
This module will monitor number of listeners for active sources.
**Requirements:**
- * icecast version >= 2.4.0
+
+- icecast version >= 2.4.0
It produces the following charts:
-1. **Listeners** in listeners
- * source number
+1. **Listeners** in listeners
+
+- source number
-### configuration
+## configuration
Needs only `url` to server's `/status-json.xsl`
@@ -25,4 +27,4 @@ Without configuration, module attempts to connect to `http://localhost:8443/stat
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ficecast%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ficecast%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
index a8392037..63963150 100644
--- a/collectors/python.d.plugin/ipfs/README.md
+++ b/collectors/python.d.plugin/ipfs/README.md
@@ -2,14 +2,16 @@
Module monitors [IPFS](https://ipfs.io) basic information.
-1. **Bandwidth** in kbits/s
- * in
- * out
+1. **Bandwidth** in kbits/s
-2. **Peers**
- * peers
+ - in
+ - out
-### configuration
+2. **Peers**
+
+ - peers
+
+## configuration
Only url to IPFS server is needed.
@@ -23,5 +25,4 @@ localhost:
---
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fipfs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fipfs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md
index 67547e2f..f90cd041 100644
--- a/collectors/python.d.plugin/isc_dhcpd/README.md
+++ b/collectors/python.d.plugin/isc_dhcpd/README.md
@@ -3,22 +3,25 @@
Module monitor leases database to show all active leases for given pools.
**Requirements:**
- * dhcpd leases file MUST BE readable by netdata
- * pools MUST BE in CIDR format
+
+- dhcpd leases file MUST BE readable by Netdata
+- pools MUST BE in CIDR format
It produces:
-1. **Pools utilization** Aggregate chart for all pools.
- * utilization in percent
+1. **Pools utilization** Aggregate chart for all pools.
+
+ - utilization in percent
+
+2. **Total leases**
-2. **Total leases**
- * leases (overall number of leases for all pools)
+ - leases (overall number of leases for all pools)
-3. **Active leases** for every pools
- * leases (number of active leases in pool)
+3. **Active leases** for every pools
+ - leases (number of active leases in pool)
-### configuration
+## configuration
Sample:
@@ -33,4 +36,4 @@ The module will not work If no configuration is given.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fisc_dhcpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fisc_dhcpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md
index 88b67253..586973bf 100644
--- a/collectors/python.d.plugin/litespeed/README.md
+++ b/collectors/python.d.plugin/litespeed/README.md
@@ -4,39 +4,48 @@ Module monitor litespeed web server performance metrics.
It produces:
-1. **Network Throughput HTTP** in kilobits/s
- * in
- * out
+1. **Network Throughput HTTP** in kilobits/s
-2. **Network Throughput HTTPS** in kilobits/s
- * in
- * out
+ - in
+ - out
-3. **Connections HTTP** in connections
- * free
- * used
+2. **Network Throughput HTTPS** in kilobits/s
-4. **Connections HTTPS** in connections
- * free
- * used
+ - in
+ - out
-5. **Requests** in requests/s
- * requests
+3. **Connections HTTP** in connections
-6. **Requests In Processing** in requests
- * processing
+ - free
+ - used
-7. **Public Cache Hits** in hits/s
- * hits
+4. **Connections HTTPS** in connections
-8. **Private Cache Hits** in hits/s
- * hits
+ - free
+ - used
-9. **Static Hits** in hits/s
- * hits
+5. **Requests** in requests/s
+ - requests
+
+6. **Requests In Processing** in requests
+
+ - processing
+
+7. **Public Cache Hits** in hits/s
+
+ - hits
+
+8. **Private Cache Hits** in hits/s
+
+ - hits
+
+9. **Static Hits** in hits/s
+
+ - hits
+
+## configuration
-### configuration
```yaml
local:
path : 'PATH'
@@ -46,4 +55,4 @@ If no configuration is given, module will use "/tmp/lshttpd/".
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flitespeed%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flitespeed%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md
index c35630c8..5aa1fa62 100644
--- a/collectors/python.d.plugin/logind/README.md
+++ b/collectors/python.d.plugin/logind/README.md
@@ -4,22 +4,25 @@ This module monitors active sessions, users, and seats tracked by systemd-logind
It provides the following charts:
-1. **Sessions** Tracks the total number of sessions.
- * Graphical: Local graphical sessions (running X11, or Wayland, or something else).
- * Console: Local console sessions.
- * Remote: Remote sessions.
+1. **Sessions** Tracks the total number of sessions.
-2. **Users** Tracks total number of unique user logins of each type.
- * Graphical
- * Console
- * Remote
+ - Graphical: Local graphical sessions (running X11, or Wayland, or something else).
+ - Console: Local console sessions.
+ - Remote: Remote sessions.
-3. **Seats** Total number of seats in use.
- * Seats
+2. **Users** Tracks total number of unique user logins of each type.
-### configuration
+ - Graphical
+ - Console
+ - Remote
-This module needs no configuration. Just make sure the netdata user
+3. **Seats** Total number of seats in use.
+
+ - Seats
+
+## configuration
+
+This module needs no configuration. Just make sure the `netdata` user
can run the `loginctl` command and get a session list without having to
specify a path.
@@ -32,25 +35,25 @@ specify it using the `command` key like so:
command: '/path/to/other/command'
```
-### notes
+## notes
-* This module's ability to track logins is dependent on what PAM services
-are configured to register sessions with logind. In particular, for
-most systems, it will only track TTY logins, local desktop logins,
-and logins through remote shell connections.
+- This module's ability to track logins is dependent on what PAM services
+ are configured to register sessions with logind. In particular, for
+ most systems, it will only track TTY logins, local desktop logins,
+ and logins through remote shell connections.
-* The users chart counts _usernames_ not UID's. This is potentially
-important in configurations where multiple users have the same UID.
+- The users chart counts _usernames_ not UID's. This is potentially
+ important in configurations where multiple users have the same UID.
-* The users chart counts any given user name up to once for _each_ type
-of login. So if the same user has a graphical and a console login on a
-system, they will show up once in the graphical count, and once in the
-console count.
+- The users chart counts any given user name up to once for _each_ type
+ of login. So if the same user has a graphical and a console login on a
+ system, they will show up once in the graphical count, and once in the
+ console count.
-* Because the data collection process is rather expensive, this plugin
-is currently disabled by default, and needs to be explicitly enabled in
-`/etc/netdata/python.d.conf` before it will run.
+- Because the data collection process is rather expensive, this plugin
+ is currently disabled by default, and needs to be explicitly enabled in
+ `/etc/netdata/python.d.conf` before it will run.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flogind%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flogind%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md
index e96015dd..afc8cbda 100644
--- a/collectors/python.d.plugin/megacli/README.md
+++ b/collectors/python.d.plugin/megacli/README.md
@@ -3,48 +3,54 @@
Module collects adapter, physical drives and battery stats.
**Requirements:**
- * `megacli` program
- * `sudo` program
- * `netdata` user needs to be able to be able to sudo the `megacli` program without password
+
+- `megacli` program
+- `sudo` program
+- `netdata` user needs to be able to be able to sudo the `megacli` program without password
To grab stats it executes:
- * `sudo -n megacli -LDPDInfo -aAll`
- * `sudo -n megacli -AdpBbuCmd -a0`
+- `sudo -n megacli -LDPDInfo -aAll`
+- `sudo -n megacli -AdpBbuCmd -a0`
It produces:
-1. **Adapter State**
+1. **Adapter State**
+
+2. **Physical Drives Media Errors**
-2. **Physical Drives Media Errors**
+3. **Physical Drives Predictive Failures**
-3. **Physical Drives Predictive Failures**
+4. **Battery Relative State of Charge**
-4. **Battery Relative State of Charge**
+5. **Battery Cycle Count**
-5. **Battery Cycle Count**
+## prerequisite
-### prerequisite
This module uses `megacli` which can only be executed by root. It uses
`sudo` and assumes that it is configured such that the `netdata` user can
execute `megacli` as root without password.
Add to `sudoers`:
- netdata ALL=(root) NOPASSWD: /path/to/megacli
+```
+netdata ALL=(root) NOPASSWD: /path/to/megacli
+```
### configuration
**megacli** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+
```yaml
megacli: yes
```
Battery stats disabled by default. To enable them modify `megacli.conf`.
+
```yaml
do_battery: yes
```
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmegacli%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmegacli%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md
index 98627c4a..169a5f7b 100644
--- a/collectors/python.d.plugin/memcached/README.md
+++ b/collectors/python.d.plugin/memcached/README.md
@@ -2,58 +2,71 @@
Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats).
-1. **Network** in kilobytes/s
- * read
- * written
+1. **Network** in kilobytes/s
-2. **Connections** per second
- * current
- * rejected
- * total
+ - read
+ - written
-3. **Items** in cluster
- * current
- * total
+2. **Connections** per second
-4. **Evicted and Reclaimed** items
- * evicted
- * reclaimed
+ - current
+ - rejected
+ - total
-5. **GET** requests/s
- * hits
- * misses
+3. **Items** in cluster
-6. **GET rate** rate in requests/s
- * rate
+ - current
+ - total
-7. **SET rate** rate in requests/s
- * rate
+4. **Evicted and Reclaimed** items
-8. **DELETE** requests/s
- * hits
- * misses
+ - evicted
+ - reclaimed
-9. **CAS** requests/s
- * hits
- * misses
- * bad value
+5. **GET** requests/s
+
+ - hits
+ - misses
+
+6. **GET rate** rate in requests/s
+
+ - rate
+
+7. **SET rate** rate in requests/s
+
+ - rate
+
+8. **DELETE** requests/s
+
+ - hits
+ - misses
+
+9. **CAS** requests/s
+
+ - hits
+ - misses
+ - bad value
10. **Increment** requests/s
- * hits
- * misses
+
+ - hits
+ - misses
11. **Decrement** requests/s
- * hits
- * misses
+
+ - hits
+ - misses
12. **Touch** requests/s
- * hits
- * misses
+
+ - hits
+ - misses
13. **Touch rate** rate in requests/s
- * rate
-### configuration
+ - rate
+
+## configuration
Sample:
@@ -68,4 +81,4 @@ If no configuration is given, module will attempt to connect to memcached instan
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmemcached%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmemcached%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md
index 3ddf137d..fd694c1e 100644
--- a/collectors/python.d.plugin/mongodb/README.md
+++ b/collectors/python.d.plugin/mongodb/README.md
@@ -3,135 +3,161 @@
Module monitor mongodb performance and health metrics
**Requirements:**
- * `python-pymongo` package v2.4+.
-You need to install it manually.
+- `python-pymongo` package v2.4+.
+You need to install it manually.
Number of charts depends on mongodb version, storage engine and other features (replication):
-1. **Read requests**:
- * query
- * getmore (operation the cursor executes to get additional data from query)
-
-2. **Write requests**:
- * insert
- * delete
- * update
-
-3. **Active clients**:
- * readers (number of clients with read operations in progress or queued)
- * writers (number of clients with write operations in progress or queued)
-
-4. **Journal transactions**:
- * commits (count of transactions that have been written to the journal)
-
-5. **Data written to the journal**:
- * volume (volume of data)
-
-6. **Background flush** (MMAPv1):
- * average ms (average time taken by flushes to execute)
- * last ms (time taken by the last flush)
-
-8. **Read tickets** (WiredTiger):
- * in use (number of read tickets in use)
- * available (number of available read tickets remaining)
-
-9. **Write tickets** (WiredTiger):
- * in use (number of write tickets in use)
- * available (number of available write tickets remaining)
-
-10. **Cursors**:
- * opened (number of cursors currently opened by MongoDB for clients)
- * timedOut (number of cursors that have timed)
- * noTimeout (number of open cursors with timeout disabled)
-
-11. **Connections**:
- * connected (number of clients currently connected to the database server)
- * unused (number of unused connections available for new clients)
-
-12. **Memory usage metrics**:
- * virtual
- * resident (amount of memory used by the database process)
- * mapped
- * non mapped
-
-13. **Page faults**:
- * page faults (number of times MongoDB had to request from disk)
-
-14. **Cache metrics** (WiredTiger):
- * percentage of bytes currently in the cache (amount of space taken by cached data)
- * percantage of tracked dirty bytes in the cache (amount of space taken by dirty data)
-
-15. **Pages evicted from cache** (WiredTiger):
- * modified
- * unmodified
-
-16. **Queued requests**:
- * readers (number of read request currently queued)
- * writers (number of write request currently queued)
-
-17. **Errors**:
- * msg (number of message assertions raised)
- * warning (number of warning assertions raised)
- * regular (number of regular assertions raised)
- * user (number of assertions corresponding to errors generated by users)
-
-18. **Storage metrics** (one chart for every database)
- * dataSize (size of all documents + padding in the database)
- * indexSize (size of all indexes in the database)
- * storageSize (size of all extents in the database)
-
-19. **Documents in the database** (one chart for all databases)
- * documents (number of objects in the database among all the collections)
-
-20. **tcmalloc metrics**
- * central cache free
- * current total thread cache
- * pageheap free
- * pageheap unmapped
- * thread cache free
- * transfer cache free
- * heap size
-
-21. **Commands total/failed rate**
- * count
- * createIndex
- * delete
- * eval
- * findAndModify
- * insert
-
-22. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode)
- * Global lock
- * Database lock
- * Collection lock
- * Metadata lock
- * oplog lock
-
-23. **Replica set members state**
- * state
-
-24. **Oplog window**
- * window (interval of time between the oldest and the latest entries in the oplog)
-
-25. **Replication lag**
- * member (time when last entry from the oplog was applied for every member)
-
-26. **Replication set member heartbeat latency**
- * member (time when last heartbeat was received from replica set member)
-
-### prerequisite
-Create a read-only user for the netdata in the admin database.
-
-1. Authenticate as the admin user.
+1. **Read requests**:
+
+ - query
+ - getmore (operation the cursor executes to get additional data from query)
+
+2. **Write requests**:
+
+ - insert
+ - delete
+ - update
+
+3. **Active clients**:
+
+ - readers (number of clients with read operations in progress or queued)
+ - writers (number of clients with write operations in progress or queued)
+
+4. **Journal transactions**:
+
+ - commits (count of transactions that have been written to the journal)
+
+5. **Data written to the journal**:
+
+ - volume (volume of data)
+
+6. **Background flush** (MMAPv1):
+
+ - average ms (average time taken by flushes to execute)
+ - last ms (time taken by the last flush)
+
+7. **Read tickets** (WiredTiger):
+
+ - in use (number of read tickets in use)
+ - available (number of available read tickets remaining)
+
+8. **Write tickets** (WiredTiger):
+
+ - in use (number of write tickets in use)
+ - available (number of available write tickets remaining)
+
+9. **Cursors**:
+
+- opened (number of cursors currently opened by MongoDB for clients)
+- timedOut (number of cursors that have timed)
+- noTimeout (number of open cursors with timeout disabled)
+
+10. **Connections**:
+
+ - connected (number of clients currently connected to the database server)
+ - unused (number of unused connections available for new clients)
+
+11. **Memory usage metrics**:
+
+ - virtual
+ - resident (amount of memory used by the database process)
+ - mapped
+ - non mapped
+
+12. **Page faults**:
+
+ - page faults (number of times MongoDB had to request from disk)
+
+13. **Cache metrics** (WiredTiger):
+
+ - percentage of bytes currently in the cache (amount of space taken by cached data)
+ - percantage of tracked dirty bytes in the cache (amount of space taken by dirty data)
+
+14. **Pages evicted from cache** (WiredTiger):
+
+ - modified
+ - unmodified
+
+15. **Queued requests**:
+
+ - readers (number of read request currently queued)
+ - writers (number of write request currently queued)
+
+16. **Errors**:
+
+ - msg (number of message assertions raised)
+ - warning (number of warning assertions raised)
+ - regular (number of regular assertions raised)
+ - user (number of assertions corresponding to errors generated by users)
+
+17. **Storage metrics** (one chart for every database)
+
+ - dataSize (size of all documents + padding in the database)
+ - indexSize (size of all indexes in the database)
+ - storageSize (size of all extents in the database)
+
+18. **Documents in the database** (one chart for all databases)
+
+- documents (number of objects in the database among all the collections)
+
+19. **tcmalloc metrics**
+
+ - central cache free
+ - current total thread cache
+ - pageheap free
+ - pageheap unmapped
+ - thread cache free
+ - transfer cache free
+ - heap size
+
+20. **Commands total/failed rate**
+
+ - count
+ - createIndex
+ - delete
+ - eval
+ - findAndModify
+ - insert
+
+21. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode)
+
+ - Global lock
+ - Database lock
+ - Collection lock
+ - Metadata lock
+ - oplog lock
+
+22. **Replica set members state**
+
+ - state
+
+23. **Oplog window**
+
+ - window (interval of time between the oldest and the latest entries in the oplog)
+
+24. **Replication lag**
+
+ - member (time when last entry from the oplog was applied for every member)
+
+25. **Replication set member heartbeat latency**
+
+ - member (time when last heartbeat was received from replica set member)
+
+## prerequisite
+
+Create a read-only user for Netdata in the admin database.
+
+1. Authenticate as the admin user.
```
use admin
db.auth("admin", "<MONGODB_ADMIN_PASSWORD>")
```
-2. Create a user.
+2. Create a user.
```
# MongoDB 2.x.
@@ -161,11 +187,10 @@ local:
port : 27017
user : 'netdata'
pass : 'netdata'
-
```
If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmongodb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmongodb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/mongodb/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py
index 5db48cb1..0dbe82ff 100644
--- a/collectors/python.d.plugin/mongodb/mongodb.chart.py
+++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py
@@ -3,6 +3,8 @@
# Author: ilyam8
# SPDX-License-Identifier: GPL-3.0-or-later
+import ssl
+
from copy import deepcopy
from datetime import datetime
from sys import exc_info
@@ -418,18 +420,31 @@ CHARTS = {
}
}
+DEFAULT_HOST = '127.0.0.1'
+DEFAULT_PORT = 27017
+DEFAULT_TIMEOUT = 100
+DEFAULT_AUTHDB = 'admin'
+
+CONN_PARAM_HOST = 'host'
+CONN_PARAM_PORT = 'port'
+CONN_PARAM_SERVER_SELECTION_TIMEOUT_MS = 'serverselectiontimeoutms'
+CONN_PARAM_SSL_SSL = 'ssl'
+CONN_PARAM_SSL_CERT_REQS = 'ssl_cert_reqs'
+CONN_PARAM_SSL_CA_CERTS = 'ssl_ca_certs'
+CONN_PARAM_SSL_CRL_FILE = 'ssl_crlfile'
+CONN_PARAM_SSL_CERT_FILE = 'ssl_certfile'
+CONN_PARAM_SSL_KEY_FILE = 'ssl_keyfile'
+CONN_PARAM_SSL_PEM_PASSPHRASE = 'ssl_pem_passphrase'
+
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER[:]
self.definitions = deepcopy(CHARTS)
- self.authdb = self.configuration.get('authdb', 'admin')
+ self.authdb = self.configuration.get('authdb', DEFAULT_AUTHDB)
self.user = self.configuration.get('user')
self.password = self.configuration.get('pass')
- self.host = self.configuration.get('host', '127.0.0.1')
- self.port = self.configuration.get('port', 27017)
- self.timeout = self.configuration.get('timeout', 100)
self.metrics_to_collect = deepcopy(DEFAULT_METRICS)
self.connection = None
self.do_replica = None
@@ -705,14 +720,53 @@ class Service(SimpleService):
return data
- def _create_connection(self):
- conn_vars = {'host': self.host, 'port': self.port}
+ def build_ssl_connection_params(self):
+ conf = self.configuration
+
+ def cert_req(v):
+ if v is None:
+ return None
+ if not v:
+ return ssl.CERT_NONE
+ return ssl.CERT_REQUIRED
+
+ ssl_params = {
+ CONN_PARAM_SSL_SSL: conf.get(CONN_PARAM_SSL_SSL),
+ CONN_PARAM_SSL_CERT_REQS: cert_req(conf.get(CONN_PARAM_SSL_CERT_REQS)),
+ CONN_PARAM_SSL_CA_CERTS: conf.get(CONN_PARAM_SSL_CA_CERTS),
+ CONN_PARAM_SSL_CRL_FILE: conf.get(CONN_PARAM_SSL_CRL_FILE),
+ CONN_PARAM_SSL_CERT_FILE: conf.get(CONN_PARAM_SSL_CERT_FILE),
+ CONN_PARAM_SSL_KEY_FILE: conf.get(CONN_PARAM_SSL_KEY_FILE),
+ CONN_PARAM_SSL_PEM_PASSPHRASE: conf.get(CONN_PARAM_SSL_PEM_PASSPHRASE),
+ }
+
+ ssl_params = dict((k, v) for k, v in ssl_params.items() if v is not None)
+
+ return ssl_params
+
+ def build_connection_params(self):
+ conf = self.configuration
+ params = {
+ CONN_PARAM_HOST: conf.get(CONN_PARAM_HOST, DEFAULT_HOST),
+ CONN_PARAM_PORT: conf.get(CONN_PARAM_PORT, DEFAULT_PORT),
+ }
if hasattr(MongoClient, 'server_selection_timeout'):
- conn_vars.update({'serverselectiontimeoutms': self.timeout})
+ params[CONN_PARAM_SERVER_SELECTION_TIMEOUT_MS] = conf.get('timeout', DEFAULT_TIMEOUT)
+
+ params.update(self.build_ssl_connection_params())
+ return params
+
+ def _create_connection(self):
+ params = self.build_connection_params()
+ self.debug('creating connection, connection params: {0}'.format(sorted(params)))
+
try:
- connection = MongoClient(**conn_vars)
+ connection = MongoClient(**params)
if self.user and self.password:
+ self.debug('authenticating, user: {0}, password: {1}'.format(self.user, self.password))
getattr(connection, self.authdb).authenticate(name=self.user, password=self.password)
+ else:
+ self.debug('skip authenticating, user and password are not set')
# elif self.user:
# connection.admin.authenticate(name=self.user, mechanism='MONGODB-X509')
server_status = connection.admin.command('serverStatus')
diff --git a/collectors/python.d.plugin/mongodb/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf
index 2dded40a..9f660f59 100644
--- a/collectors/python.d.plugin/mongodb/mongodb.conf
+++ b/collectors/python.d.plugin/mongodb/mongodb.conf
@@ -71,6 +71,16 @@
# user: 'username' # the mongodb username to use
# pass: 'password' # the mongodb password to use
#
+# SSL connection parameters (https://api.mongodb.com/python/current/examples/tls.html):
+#
+# ssl: yes # connect to the server using TLS
+# ssl_cert_reqs: yes # require a certificate from the server when TLS is enabled
+# ssl_ca_certs: '/path/to/ca.pem' # use a specific set of CA certificates
+# ssl_crlfile: '/path/to/crl.pem' # use a certificate revocation lists
+# ssl_certfile: '/path/to/client.pem' # use a client certificate
+# ssl_keyfile: '/path/to/key.pem' # use a specific client certificate key
+# ssl_pem_passphrase: 'passphrase' # use a passphrase to decrypt encrypted private keys
+#
# ----------------------------------------------------------------------
# to connect to the mongodb on localhost, without a password:
diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md
index 0f69aff2..a54b9d67 100644
--- a/collectors/python.d.plugin/monit/README.md
+++ b/collectors/python.d.plugin/monit/README.md
@@ -2,21 +2,24 @@
Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official documentation). Mostly this plugin shows statuses of monit targets, i.e. [statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks).
-1. **Filesystems**
- * Filesystems
- * Directories
- * Files
- * Pipes
+1. **Filesystems**
-2. **Applications**
- * Processes (+threads/childs)
- * Programs
+ - Filesystems
+ - Directories
+ - Files
+ - Pipes
-3. **Network**
- * Hosts (+latency)
- * Network interfaces
+2. **Applications**
-### configuration
+ - Processes (+threads/childs)
+ - Programs
+
+3. **Network**
+
+ - Hosts (+latency)
+ - Network interfaces
+
+## configuration
Sample:
@@ -32,4 +35,4 @@ If no configuration is given, module will attempt to connect to monit as `http:/
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmonit%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmonit%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md
index f7028ab6..45f842d4 100644
--- a/collectors/python.d.plugin/mysql/README.md
+++ b/collectors/python.d.plugin/mysql/README.md
@@ -3,255 +3,305 @@
Module monitors one or more mysql servers
**Requirements:**
- * python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
+
+- python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
It will produce following charts (if data is available):
-1. **Bandwidth** in kilobits/s
- * in
- * out
-
-2. **Queries** in queries/sec
- * queries
- * questions
- * slow queries
-
-3. **Queries By Type** in queries/s
- * select
- * delete
- * update
- * insert
- * cache hits
- * replace
-
-4. **Handlerse** in handlers/s
- * commit
- * delete
- * prepare
- * read first
- * read key
- * read next
- * read prev
- * read rnd
- * read rnd next
- * rollback
- * savepoint
- * savepoint rollback
- * update
- * write
-
-4. **Table Locks** in locks/s
- * immediate
- * waited
-
-5. **Table Select Join Issuess** in joins/s
- * full join
- * full range join
- * range
- * range check
- * scan
-
-6. **Table Sort Issuess** in joins/s
- * merge passes
- * range
- * scan
-
-7. **Tmp Operations** in created/s
- * disk tables
- * files
- * tables
-
-8. **Connections** in connections/s
- * all
- * aborted
-
-9. **Connections Active** in connections/s
- * active
- * limit
- * max active
-
-10. **Binlog Cache** in threads
- * disk
- * all
-
-11. **Threads** in transactions/s
- * connected
- * cached
- * running
-
-12. **Threads Creation Rate** in threads/s
- * created
-
-13. **Threads Cache Misses** in misses
- * misses
-
-14. **InnoDB I/O Bandwidth** in KiB/s
- * read
- * write
-
-15. **InnoDB I/O Operations** in operations/s
- * reads
- * writes
- * fsyncs
-
-16. **InnoDB Pending I/O Operations** in operations/s
- * reads
- * writes
- * fsyncs
-
-17. **InnoDB Log Operations** in operations/s
- * waits
- * write requests
- * writes
-
-18. **InnoDB OS Log Pending Operations** in operations
- * fsyncs
- * writes
-
-19. **InnoDB OS Log Operations** in operations/s
- * fsyncs
-
-20. **InnoDB OS Log Bandwidth** in KiB/s
- * write
-
-21. **InnoDB Current Row Locks** in operations
- * current waits
-
-22. **InnoDB Row Operations** in operations/s
- * inserted
- * read
- * updated
- * deleted
-
-23. **InnoDB Buffer Pool Pagess** in pages
- * data
- * dirty
- * free
- * misc
- * total
-
-24. **InnoDB Buffer Pool Flush Pages Requests** in requests/s
- * flush pages
-
-25. **InnoDB Buffer Pool Bytes** in MiB
- * data
- * dirty
-
-26. **InnoDB Buffer Pool Operations** in operations/s
- * disk reads
- * wait free
-
-27. **QCache Operations** in queries/s
- * hits
- * lowmem prunes
- * inserts
- * no caches
-
-28. **QCache Queries in Cache** in queries
- * queries
-
-29. **QCache Free Memory** in MiB
- * free
-
-30. **QCache Memory Blocks** in blocks
- * free
- * total
-
-31. **MyISAM Key Cache Blocks** in blocks
- * unused
- * used
- * not flushed
-
-32. **MyISAM Key Cache Requests** in requests/s
- * reads
- * writes
+1. **Bandwidth** in kilobits/s
+
+ - in
+ - out
+
+2. **Queries** in queries/sec
+
+ - queries
+ - questions
+ - slow queries
+
+3. **Queries By Type** in queries/s
+
+ - select
+ - delete
+ - update
+ - insert
+ - cache hits
+ - replace
+
+4. **Handlerse** in handlers/s
+
+ - commit
+ - delete
+ - prepare
+ - read first
+ - read key
+ - read next
+ - read prev
+ - read rnd
+ - read rnd next
+ - rollback
+ - savepoint
+ - savepoint rollback
+ - update
+ - write
+
+5. **Table Locks** in locks/s
+
+ - immediate
+ - waited
+
+6. **Table Select Join Issuess** in joins/s
+
+ - full join
+ - full range join
+ - range
+ - range check
+ - scan
+
+7. **Table Sort Issuess** in joins/s
+
+ - merge passes
+ - range
+ - scan
+
+8. **Tmp Operations** in created/s
+
+ - disk tables
+ - files
+ - tables
+
+9. **Connections** in connections/s
+
+ - all
+ - aborted
+
+10. **Connections Active** in connections/s
+
+ - active
+ - limit
+ - max active
+
+11. **Binlog Cache** in threads
+
+ - disk
+ - all
+
+12. **Threads** in transactions/s
+
+ - connected
+ - cached
+ - running
+
+13. **Threads Creation Rate** in threads/s
+
+ - created
+
+14. **Threads Cache Misses** in misses
+
+ - misses
+
+15. **InnoDB I/O Bandwidth** in KiB/s
+
+ - read
+ - write
+
+16. **InnoDB I/O Operations** in operations/s
+
+ - reads
+ - writes
+ - fsyncs
+
+17. **InnoDB Pending I/O Operations** in operations/s
+
+ - reads
+ - writes
+ - fsyncs
+
+18. **InnoDB Log Operations** in operations/s
+
+ - waits
+ - write requests
+ - writes
+
+19. **InnoDB OS Log Pending Operations** in operations
+
+ - fsyncs
+ - writes
+
+20. **InnoDB OS Log Operations** in operations/s
+
+ - fsyncs
+
+21. **InnoDB OS Log Bandwidth** in KiB/s
+
+ - write
+
+22. **InnoDB Current Row Locks** in operations
+
+ - current waits
+
+23. **InnoDB Row Operations** in operations/s
+
+ - inserted
+ - read
+ - updated
+ - deleted
+
+24. **InnoDB Buffer Pool Pagess** in pages
+
+ - data
+ - dirty
+ - free
+ - misc
+ - total
+
+25. **InnoDB Buffer Pool Flush Pages Requests** in requests/s
+
+ - flush pages
+
+26. **InnoDB Buffer Pool Bytes** in MiB
+
+ - data
+ - dirty
+
+27. **InnoDB Buffer Pool Operations** in operations/s
+
+ - disk reads
+ - wait free
+
+28. **QCache Operations** in queries/s
+
+ - hits
+ - lowmem prunes
+ - inserts
+ - no caches
+
+29. **QCache Queries in Cache** in queries
+
+ - queries
+
+30. **QCache Free Memory** in MiB
+
+ - free
+
+31. **QCache Memory Blocks** in blocks
+
+ - free
+ - total
+
+32. **MyISAM Key Cache Blocks** in blocks
+
+ - unused
+ - used
+ - not flushed
33. **MyISAM Key Cache Requests** in requests/s
- * reads
- * writes
-34. **MyISAM Key Cache Disk Operations** in operations/s
- * reads
- * writes
+ - reads
+ - writes
+
+34. **MyISAM Key Cache Requests** in requests/s
+
+ - reads
+ - writes
+
+35. **MyISAM Key Cache Disk Operations** in operations/s
+
+ - reads
+ - writes
+
+36. **Open Files** in files
+
+ - files
+
+37. **Opened Files Rate** in files/s
+
+ - files
-35. **Open Files** in files
- * files
+38. **Binlog Statement Cache** in statements/s
-36. **Opened Files Rate** in files/s
- * files
+ - disk
+ - all
-37. **Binlog Statement Cache** in statements/s
- * disk
- * all
+39. **Connection Errors** in errors/s
-38. **Connection Errors** in errors/s
- * accept
- * internal
- * max
- * peer addr
- * select
- * tcpwrap
+ - accept
+ - internal
+ - max
+ - peer addr
+ - select
+ - tcpwrap
-39. **Slave Behind Seconds** in seconds
- * time
+40. **Slave Behind Seconds** in seconds
-40. **I/O / SQL Thread Running State** in bool
- * sql
- * io
+ - time
-41. **Replicated Writesets** in writesets/s
- * rx
- * tx
+41. **I/O / SQL Thread Running State** in bool
-42. **Replicated Bytes** in KiB/s
- * rx
- * tx
+ - sql
+ - io
-43. **Galera Queue** in writesets
- * rx
- * tx
+42. **Replicated Writesets** in writesets/s
-44. **Replication Conflicts** in transactions
- * bf aborts
- * cert fails
+ - rx
+ - tx
-45. **Flow Control** in ms
- * paused
+43. **Replicated Bytes** in KiB/s
-46. **Users CPU time** in percentage
- * users
+ - rx
+ - tx
+
+44. **Galera Queue** in writesets
+
+ - rx
+ - tx
+
+45. **Replication Conflicts** in transactions
+
+ - bf aborts
+ - cert fails
+
+46. **Flow Control** in ms
+
+ - paused
+
+47. **Users CPU time** in percentage
+
+ - users
**Per user statistics:**
-1. **Rows Operations** in operations/s
- * read
- * send
- * updated
- * inserted
- * deleted
+1. **Rows Operations** in operations/s
-2. **Commands** in commands/s
- * select
- * update
- * other
+ - read
+ - send
+ - updated
+ - inserted
+ - deleted
+2. **Commands** in commands/s
-### configuration
+ - select
+ - update
+ - other
+
+## configuration
You can provide, per server, the following:
-1. username which have access to database (defaults to 'root')
-2. password (defaults to none)
-3. mysql my.cnf configuration file
-4. mysql socket (optional)
-5. mysql host (ip or hostname)
-6. mysql port (defaults to 3306)
-7. ssl connection parameters
- - key: the path name of the client private key file.
- - cert: the path name of the client public key certificate file.
- - ca: the path name of the Certificate Authority (CA) certificate file. This option, if used, must specify the same certificate used by the server.
- - capath: the path name of the directory that contains trusted SSL CA certificate files.
- - cipher: the list of permitted ciphers for SSL encryption.
+1. username which have access to database (defaults to 'root')
+2. password (defaults to none)
+3. mysql my.cnf configuration file
+4. mysql socket (optional)
+5. mysql host (ip or hostname)
+6. mysql port (defaults to 3306)
+7. ssl connection parameters
+
+ - key: the path name of the client private key file.
+ - cert: the path name of the client public key certificate file.
+ - ca: the path name of the Certificate Authority (CA) certificate file. This option, if used, must specify the same certificate used by the server.
+ - capath: the path name of the directory that contains trusted SSL CA certificate files.
+ - cipher: the list of permitted ciphers for SSL encryption.
Here is an example for 3 servers:
@@ -282,4 +332,4 @@ If no configuration is given, module will attempt to connect to mysql server via
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/mysql/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py
index 82bd9079..46d0712f 100644
--- a/collectors/python.d.plugin/mysql/mysql.chart.py
+++ b/collectors/python.d.plugin/mysql/mysql.chart.py
@@ -601,6 +601,33 @@ CHARTS = {
}
+def slave_status_chart_template(channel_name):
+ order = [
+ 'slave_behind_{0}'.format(channel_name),
+ 'slave_status_{0}'.format(channel_name)
+ ]
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Slave Behind Seconds Channel {0}'.format(channel_name),
+ 'seconds', 'slave', 'mysql.slave_behind', 'line'],
+ 'lines': [
+ ['Seconds_Behind_Master_{0}'.format(channel_name), 'seconds', 'absolute']
+ ]
+ },
+ order[1]: {
+ 'options': [None, 'Slave Status Channel {0}'.format(channel_name),
+ 'status', 'slave', 'mysql.slave_status', 'line'],
+ 'lines': [
+ ['Slave_SQL_Running_{0}'.format(channel_name), 'sql_running', 'absolute'],
+ ['Slave_IO_Running_{0}'.format(channel_name), 'io_running', 'absolute']
+ ]
+ },
+ }
+
+ return order, charts
+
+
def userstats_chart_template(name):
order = [
'userstats_rows_{0}'.format(name),
@@ -632,6 +659,10 @@ def userstats_chart_template(name):
return order, charts
+# https://dev.mysql.com/doc/refman/8.0/en/replication-channels.html
+DEFAULT_REPL_CHANNEL = ''
+
+
class Service(MySQLService):
def __init__(self, configuration=None, name=None):
MySQLService.__init__(self, configuration=configuration, name=name)
@@ -643,6 +674,7 @@ class Service(MySQLService):
variables=QUERY_VARIABLES,
user_statistics=QUERY_USER_STATISTICS,
)
+ self.repl_channels = [DEFAULT_REPL_CHANNEL]
def _get_data(self):
@@ -651,29 +683,24 @@ class Service(MySQLService):
if not raw_data:
return None
- to_netdata = dict()
+ data = dict()
if 'global_status' in raw_data:
global_status = dict(raw_data['global_status'][0])
for key in GLOBAL_STATS:
if key in global_status:
- to_netdata[key] = global_status[key]
- if 'Threads_created' in to_netdata and 'Connections' in to_netdata:
- to_netdata['Thread_cache_misses'] = round(int(to_netdata['Threads_created'])
- / float(to_netdata['Connections']) * 10000)
+ data[key] = global_status[key]
+ if 'Threads_created' in data and 'Connections' in data:
+ data['Thread_cache_misses'] = round(int(data['Threads_created']) / float(data['Connections']) * 10000)
if 'slave_status' in raw_data:
- if raw_data['slave_status'][0]:
- slave_raw_data = dict(zip([e[0] for e in raw_data['slave_status'][1]], raw_data['slave_status'][0][0]))
- for key, func in SLAVE_STATS:
- if key in slave_raw_data:
- to_netdata[key] = func(slave_raw_data[key])
- else:
- self.queries.pop('slave_status')
+ status = self.get_slave_status(raw_data['slave_status'])
+ if status:
+ data.update(status)
if 'user_statistics' in raw_data:
if raw_data['user_statistics'][0]:
- to_netdata.update(self.get_userstats(raw_data))
+ data.update(self.get_userstats(raw_data))
else:
self.queries.pop('user_statistics')
@@ -681,46 +708,75 @@ class Service(MySQLService):
variables = dict(raw_data['variables'][0])
for key in VARIABLES:
if key in variables:
- to_netdata[key] = variables[key]
-
- return to_netdata or None
-
- # raw_data['user_statistics'] contains the following data structure:
- # (
- # (
- # ('netdata', 42L, 0L, 1264L, 3.111252999999968, 2.968510299999994, 110267L, 19741424L, 0L, 0L, 1265L, 0L,
- # 0L, 0L, 3L, 0L, 1301L, 0L, 0L, 7633L, 0L, 83L, 44L, 0L, 0L),
- # ('root', 60L, 0L, 184L, 0.22856499999999966, 0.1601419999999998, 11605L, 1516513L, 0L, 9L, 220L, 0L, 2L, 1L,
- # 6L, 4L,127L, 0L, 0L, 45L, 0L, 45L, 0L, 0L, 0L)
- # ),
- # (
- # ('User', 253, 9, 128, 128, 0, 0),
- # ('Total_connections', 3, 2, 11, 11, 0, 0),
- # ('Concurrent_connections', 3, 1, 11, 11, 0, 0),
- # ('Connected_time', 3, 4, 11, 11, 0, 0),
- # ('Busy_time', 5, 21, 21, 21, 31, 0),
- # ('Cpu_time', 5, 18, 21, 21, 31, 0),
- # ('Bytes_received', 8, 6, 21, 21, 0, 0),
- # ('Bytes_sent', 8, 8, 21, 21, 0, 0),
- # ('Binlog_bytes_written', 8, 1, 21, 21, 0, 0),
- # ('Rows_read', 8, 1, 21, 21, 0, 0),
- # ('Rows_sent', 8, 4, 21, 21, 0, 0),
- # ('Rows_deleted', 8, 1, 21, 21, 0, 0),
- # ('Rows_inserted', 8, 1, 21, 21, 0, 0),
- # ('Rows_updated', 8, 1, 21, 21, 0, 0),
- # ('Select_commands', 8, 1, 21, 21, 0, 0),
- # ('Update_commands', 8, 1, 21, 21, 0, 0),
- # ('Other_commands', 8, 4, 21, 21, 0, 0),
- # ('Commit_transactions', 8, 1, 21, 21, 0, 0),
- # ('Rollback_transactions', 8, 1, 21, 21, 0, 0),
- # ('Denied_connections', 8, 4, 21, 21, 0, 0),
- # ('Lost_connections', 8, 1, 21, 21, 0, 0),
- # ('Access_denied', 8, 2, 21, 21, 0, 0),
- # ('Empty_queries', 8, 2, 21, 21, 0, 0),
- # ('Total_ssl_connections', 8, 1, 21, 21, 0, 0),
- # ('Max_statement_time_exceeded', 8, 1, 21, 21, 0, 0)),
- # )
+ data[key] = variables[key]
+
+ return data or None
+
+ def get_slave_status(self, slave_status_data):
+ rows, description = slave_status_data[0], slave_status_data[1]
+ description_keys = [v[0] for v in description]
+ if not rows:
+ return
+
+ data = dict()
+ for row in rows:
+ slave_data = dict(zip(description_keys, row))
+ channel_name = slave_data.get('Channel_Name', DEFAULT_REPL_CHANNEL)
+
+ if channel_name not in self.repl_channels and len(self.charts) > 0:
+ self.add_repl_channel_charts(channel_name)
+ self.repl_channels.append(channel_name)
+
+ for key, func in SLAVE_STATS:
+ if key not in slave_data:
+ continue
+
+ value = slave_data[key]
+ if channel_name:
+ key = '{0}_{1}'.format(key, channel_name)
+ data[key] = func(value)
+
+ return data
+
+ def add_repl_channel_charts(self, name):
+ self.add_new_charts(slave_status_chart_template, name)
+
def get_userstats(self, raw_data):
+ # raw_data['user_statistics'] contains the following data structure:
+ # (
+ # (
+ # ('netdata', 42L, 0L, 1264L, 3.111252999999968, 2.968510299999994, 110267L, 19741424L, 0L, 0L, 1265L, 0L,
+ # 0L, 0L, 3L, 0L, 1301L, 0L, 0L, 7633L, 0L, 83L, 44L, 0L, 0L),
+ # ('root', 60L, 0L, 184L, 0.22856499999999966, 0.1601419999999998, 11605L, 1516513L, 0L, 9L, 220L, 0L, 2L, 1L,
+ # 6L, 4L,127L, 0L, 0L, 45L, 0L, 45L, 0L, 0L, 0L)
+ # ),
+ # (
+ # ('User', 253, 9, 128, 128, 0, 0),
+ # ('Total_connections', 3, 2, 11, 11, 0, 0),
+ # ('Concurrent_connections', 3, 1, 11, 11, 0, 0),
+ # ('Connected_time', 3, 4, 11, 11, 0, 0),
+ # ('Busy_time', 5, 21, 21, 21, 31, 0),
+ # ('Cpu_time', 5, 18, 21, 21, 31, 0),
+ # ('Bytes_received', 8, 6, 21, 21, 0, 0),
+ # ('Bytes_sent', 8, 8, 21, 21, 0, 0),
+ # ('Binlog_bytes_written', 8, 1, 21, 21, 0, 0),
+ # ('Rows_read', 8, 1, 21, 21, 0, 0),
+ # ('Rows_sent', 8, 4, 21, 21, 0, 0),
+ # ('Rows_deleted', 8, 1, 21, 21, 0, 0),
+ # ('Rows_inserted', 8, 1, 21, 21, 0, 0),
+ # ('Rows_updated', 8, 1, 21, 21, 0, 0),
+ # ('Select_commands', 8, 1, 21, 21, 0, 0),
+ # ('Update_commands', 8, 1, 21, 21, 0, 0),
+ # ('Other_commands', 8, 4, 21, 21, 0, 0),
+ # ('Commit_transactions', 8, 1, 21, 21, 0, 0),
+ # ('Rollback_transactions', 8, 1, 21, 21, 0, 0),
+ # ('Denied_connections', 8, 4, 21, 21, 0, 0),
+ # ('Lost_connections', 8, 1, 21, 21, 0, 0),
+ # ('Access_denied', 8, 2, 21, 21, 0, 0),
+ # ('Empty_queries', 8, 2, 21, 21, 0, 0),
+ # ('Total_ssl_connections', 8, 1, 21, 21, 0, 0),
+ # ('Max_statement_time_exceeded', 8, 1, 21, 21, 0, 0)),
+ # )
data = dict()
userstats_vars = [e[0] for e in raw_data['user_statistics'][1]]
for i, _ in enumerate(raw_data['user_statistics'][0]):
@@ -742,7 +798,10 @@ class Service(MySQLService):
self.charts['userstats_cpu'].add_dimension(['userstats_{0}_Cpu_time'.format(name), name, 'incremental', 100, 1])
def create_new_userstats_charts(self, tube):
- order, charts = userstats_chart_template(tube)
+ self.add_new_charts(userstats_chart_template, tube)
+
+ def add_new_charts(self, template, *params):
+ order, charts = template(*params)
for chart_name in order:
params = [chart_name] + charts[chart_name]['options']
diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md
index 7854105b..ebbdb0f2 100644
--- a/collectors/python.d.plugin/nginx/README.md
+++ b/collectors/python.d.plugin/nginx/README.md
@@ -3,29 +3,34 @@
This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote.
**Requirements:**
- * nginx with configured 'ngx_http_stub_status_module'
- * 'location /stub_status'
+
+- nginx with configured 'ngx_http_stub_status_module'
+- 'location /stub_status'
Example nginx configuration can be found in 'python.d/nginx.conf'
It produces following charts:
-1. **Active Connections**
- * active
+1. **Active Connections**
+
+ - active
+
+2. **Requests** in requests/s
+
+ - requests
+
+3. **Active Connections by Status**
-2. **Requests** in requests/s
- * requests
+ - reading
+ - writing
+ - waiting
-3. **Active Connections by Status**
- * reading
- * writing
- * waiting
+4. **Connections Rate** in connections/s
-4. **Connections Rate** in connections/s
- * accepts
- * handled
+ - accepts
+ - handled
-### configuration
+## configuration
Needs only `url` to server's `stub_status`
@@ -43,4 +48,4 @@ Without configuration, module attempts to connect to `http://localhost/stub_stat
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md
index c20ce30a..1110c552 100644
--- a/collectors/python.d.plugin/nginx_plus/README.md
+++ b/collectors/python.d.plugin/nginx_plus/README.md
@@ -7,109 +7,134 @@ Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf'
It produces following charts:
-1. **Requests total** in requests/s
- * total
+1. **Requests total** in requests/s
-2. **Requests current** in requests
- * current
+ - total
-3. **Connection Statistics** in connections/s
- * accepted
- * dropped
+2. **Requests current** in requests
-4. **Workers Statistics** in workers
- * idle
- * active
+ - current
-5. **SSL Handshakes** in handshakes/s
- * successful
- * failed
+3. **Connection Statistics** in connections/s
-6. **SSL Session Reuses** in sessions/s
- * reused
+ - accepted
+ - dropped
-7. **SSL Memory Usage** in percent
- * usage
+4. **Workers Statistics** in workers
-8. **Processes** in processes
- * respawned
+ - idle
+ - active
+
+5. **SSL Handshakes** in handshakes/s
+
+ - successful
+ - failed
+
+6. **SSL Session Reuses** in sessions/s
+
+ - reused
+
+7. **SSL Memory Usage** in percent
+
+ - usage
+
+8. **Processes** in processes
+
+ - respawned
For every server zone:
-1. **Processing** in requests
- * processing
+1. **Processing** in requests
+
+- processing
+
+2. **Requests** in requests/s
-2. **Requests** in requests/s
- * requests
+ - requests
-3. **Responses** in requests/s
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
+3. **Responses** in requests/s
-4. **Traffic** in kilobits/s
- * received
- * sent
+ - 1xx
+ - 2xx
+ - 3xx
+ - 4xx
+ - 5xx
+
+4. **Traffic** in kilobits/s
+
+ - received
+ - sent
For every upstream:
-1. **Peers Requests** in requests/s
- * peer name (dimension per peer)
+1. **Peers Requests** in requests/s
+
+ - peer name (dimension per peer)
-2. **All Peers Responses** in responses/s
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
+2. **All Peers Responses** in responses/s
-3. **Peer Responses** in requests/s (for every peer)
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
+ - 1xx
+ - 2xx
+ - 3xx
+ - 4xx
+ - 5xx
-4. **Peers Connections** in active
- * peer name (dimension per peer)
+3. **Peer Responses** in requests/s (for every peer)
-5. **Peers Connections Usage** in percent
- * peer name (dimension per peer)
+ - 1xx
+ - 2xx
+ - 3xx
+ - 4xx
+ - 5xx
-6. **All Peers Traffic** in KB
- * received
- * sent
+4. **Peers Connections** in active
-7. **Peer Traffic** in KB/s (for every peer)
- * received
- * sent
+ - peer name (dimension per peer)
-8. **Peer Timings** in ms (for every peer)
- * header
- * response
+5. **Peers Connections Usage** in percent
-9. **Memory Usage** in percent
- * usage
+ - peer name (dimension per peer)
+
+6. **All Peers Traffic** in KB
+
+ - received
+ - sent
+
+7. **Peer Traffic** in KB/s (for every peer)
+
+ - received
+ - sent
+
+8. **Peer Timings** in ms (for every peer)
+
+ - header
+ - response
+
+9. **Memory Usage** in percent
+
+ - usage
10. **Peers Status** in state
- * peer name (dimension per peer)
+
+ - peer name (dimension per peer)
11. **Peers Total Downtime** in seconds
- * peer name (dimension per peer)
+
+ - peer name (dimension per peer)
For every cache:
-1. **Traffic** in KB
- * served
- * written
- * bypass
+1. **Traffic** in KB
+
+ - served
+ - written
+ - bypass
+
+2. **Memory Usage** in percent
-2. **Memory Usage** in percent
- * usage
+ - usage
-### configuration
+## configuration
Needs only `url` to server's `status`
@@ -124,4 +149,4 @@ Without configuration, module fail to start.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx_plus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx_plus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md
index b118657d..61bc5c45 100644
--- a/collectors/python.d.plugin/nsd/README.md
+++ b/collectors/python.d.plugin/nsd/README.md
@@ -3,54 +3,60 @@
Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
**Requirements:**
- * Version of `nsd` must be 4.0+
- * Netdata must have permissions to run `nsd-control stats_noreset`
+
+- Version of `nsd` must be 4.0+
+- Netdata must have permissions to run `nsd-control stats_noreset`
It produces:
-1. **Queries**
- * queries
-
-2. **Zones**
- * master
- * slave
-
-3. **Protocol**
- * udp
- * udp6
- * tcp
- * tcp6
-
-4. **Query Type**
- * A
- * NS
- * CNAME
- * SOA
- * PTR
- * HINFO
- * MX
- * NAPTR
- * TXT
- * AAAA
- * SRV
- * ANY
-
-5. **Transfer**
- * NOTIFY
- * AXFR
-
-6. **Return Code**
- * NOERROR
- * FORMERR
- * SERVFAIL
- * NXDOMAIN
- * NOTIMP
- * REFUSED
- * YXDOMAIN
+1. **Queries**
+
+ - queries
+
+2. **Zones**
+
+ - master
+ - slave
+
+3. **Protocol**
+
+ - udp
+ - udp6
+ - tcp
+ - tcp6
+
+4. **Query Type**
+
+ - A
+ - NS
+ - CNAME
+ - SOA
+ - PTR
+ - HINFO
+ - MX
+ - NAPTR
+ - TXT
+ - AAAA
+ - SRV
+ - ANY
+
+5. **Transfer**
+
+ - NOTIFY
+ - AXFR
+
+6. **Return Code**
+ - NOERROR
+ - FORMERR
+ - SERVFAIL
+ - NXDOMAIN
+ - NOTIMP
+ - REFUSED
+ - YXDOMAIN
Configuration is not needed.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnsd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnsd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md
index d33fd877..d4d0dc60 100644
--- a/collectors/python.d.plugin/ntpd/README.md
+++ b/collectors/python.d.plugin/ntpd/README.md
@@ -3,8 +3,9 @@
Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
**Requirements:**
- * Version: `NTPv4`
- * Local interrogation allowed in `/etc/ntp.conf` (default):
+
+- Version: `NTPv4`
+- Local interrogation allowed in `/etc/ntp.conf` (default):
```
# Local users may interrogate the ntp server more closely.
@@ -14,31 +15,33 @@ restrict ::1
It produces:
-1. system
- * offset
- * jitter
- * frequency
- * delay
- * dispersion
- * stratum
- * tc
- * precision
-
-2. peers
- * offset
- * delay
- * dispersion
- * jitter
- * rootdelay
- * rootdispersion
- * stratum
- * hmode
- * pmode
- * hpoll
- * ppoll
- * precision
-
-**configuration**
+1. system
+
+ - offset
+ - jitter
+ - frequency
+ - delay
+ - dispersion
+ - stratum
+ - tc
+ - precision
+
+2. peers
+
+ - offset
+ - delay
+ - dispersion
+ - jitter
+ - rootdelay
+ - rootdispersion
+ - stratum
+ - hmode
+ - pmode
+ - hpoll
+ - ppoll
+ - precision
+
+## configuration
Sample:
@@ -70,4 +73,4 @@ If no configuration is given, module will attempt to connect to `ntpd` on `::1:1
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fntpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fntpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/nvidia_smi/README.md b/collectors/python.d.plugin/nvidia_smi/README.md
index 48b61195..71e3e288 100644
--- a/collectors/python.d.plugin/nvidia_smi/README.md
+++ b/collectors/python.d.plugin/nvidia_smi/README.md
@@ -4,37 +4,39 @@ This module monitors the `nvidia-smi` cli tool.
**Requirements and Notes:**
- * You must have the `nvidia-smi` tool installed and your NVIDIA GPU(s) must support the tool. Mostly the newer high end models used for AI / ML and Crypto or Pro range, read more about [nvidia_smi](https://developer.nvidia.com/nvidia-system-management-interface).
+- You must have the `nvidia-smi` tool installed and your NVIDIA GPU(s) must support the tool. Mostly the newer high end models used for AI / ML and Crypto or Pro range, read more about [nvidia_smi](https://developer.nvidia.com/nvidia-system-management-interface).
- * You must enable this plugin as its disabled by default due to minor performance issues.
+- You must enable this plugin as its disabled by default due to minor performance issues.
- * On some systems when the GPU is idle the `nvidia-smi` tool unloads and there is added latency again when it is next queried. If you are running GPUs under constant workload this isn't likely to be an issue.
+- On some systems when the GPU is idle the `nvidia-smi` tool unloads and there is added latency again when it is next queried. If you are running GPUs under constant workload this isn't likely to be an issue.
- * Currently the `nvidia-smi` tool is being queried via cli. Updating the plugin to use the nvidia c/c++ API directly should resolve this issue. See discussion here: https://github.com/netdata/netdata/pull/4357
+- Currently the `nvidia-smi` tool is being queried via cli. Updating the plugin to use the nvidia c/c++ API directly should resolve this issue. See discussion here: <https://github.com/netdata/netdata/pull/4357>
- * Contributions are welcome.
+- Contributions are welcome.
- * Make sure `netdata` user can execute `/usr/bin/nvidia-smi` or wherever your binary is.
+- Make sure `netdata` user can execute `/usr/bin/nvidia-smi` or wherever your binary is.
- * `poll_seconds` is how often in seconds the tool is polled for as an integer.
+- `poll_seconds` is how often in seconds the tool is polled for as an integer.
It produces:
-1. Per GPU
- * GPU utilization
- * memory allocation
- * memory utilization
- * fan speed
- * power usage
- * temperature
- * clock speed
- * PCI bandwidth
+1. Per GPU
-### configuration
+ - GPU utilization
+ - memory allocation
+ - memory utilization
+ - fan speed
+ - power usage
+ - temperature
+ - clock speed
+ - PCI bandwidth
+
+## configuration
Sample:
```yaml
poll_seconds: 1
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnvidia_smi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnvidia_smi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/openldap/README.md b/collectors/python.d.plugin/openldap/README.md
index 629cc153..f1f9de58 100644
--- a/collectors/python.d.plugin/openldap/README.md
+++ b/collectors/python.d.plugin/openldap/README.md
@@ -4,42 +4,48 @@ This module provides statistics information from openldap (slapd) server.
Statistics are taken from LDAP monitoring interface. Manual page, slapd-monitor(5) is available.
**Requirement:**
-* Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.
-* Install python ldap module `pip install ldap` or `yum install python-ldap`
-* Modify openldap.conf with your credentials
+
+- Follow instructions from <https://www.openldap.org/doc/admin24/monitoringslapd.html> to activate monitoring interface.
+- Install python ldap module `pip install ldap` or `yum install python-ldap`
+- Modify openldap.conf with your credentials
### Module gives information with following charts:
-1. **connections**
- * total connections number
+1. **connections**
+
+ - total connections number
+
+2. **Bytes**
+
+ - sent
+
+3. **operations**
+
+ - completed
+ - initiated
-2. **Bytes**
- * sent
+4. **referrals**
-3. **operations**
- * completed
- * initiated
+ - sent
-4. **referrals**
- * sent
+5. **entries**
-5. **entries**
- * sent
+ - sent
-6. **ldap operations**
- * bind
- * search
- * unbind
- * add
- * delete
- * modify
- * compare
+6. **ldap operations**
-7. **waiters**
- * read
- * write
+ - bind
+ - search
+ - unbind
+ - add
+ - delete
+ - modify
+ - compare
+7. **waiters**
+ - read
+ - write
### configuration
@@ -56,4 +62,4 @@ openldap:
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fopenldap%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fopenldap%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/oracledb/README.md b/collectors/python.d.plugin/oracledb/README.md
index 2e5972a0..708f261d 100644
--- a/collectors/python.d.plugin/oracledb/README.md
+++ b/collectors/python.d.plugin/oracledb/README.md
@@ -3,46 +3,48 @@
Module monitor oracledb performance and health metrics.
**Requirements:**
- - `cx_Oracle` package.
- - Oracle Client (using `cx_Oracle` requires Oracle Client libraries to be installed).
+
+- `cx_Oracle` package.
+- Oracle Client (using `cx_Oracle` requires Oracle Client libraries to be installed).
It produces following charts:
- - session activity
- - Session Count
- - Session Limit Usage
- - Logons
- - disk activity
- - Physical Disk Reads/Writes
- - Sorts On Disk
- - Full Table Scans
- - database and buffer activity
- - Database Wait Time Ratio
- - Shared Pool Free Memory
- - In-Memory Sorts Ratio
- - SQL Service Response Time
- - User Rollbacks
- - Enqueue Timeouts
- - cache
- - Cache Hit Ratio
- - Global Cache Blocks Events
- - activities
- - Activities
- - wait time
- - Wait Time
- - tablespace
- - Size
- - Usage
- - Usage In Percent
-
-### prerequisite
+
+- session activity
+ - Session Count
+ - Session Limit Usage
+ - Logons
+- disk activity
+ - Physical Disk Reads/Writes
+ - Sorts On Disk
+ - Full Table Scans
+- database and buffer activity
+ - Database Wait Time Ratio
+ - Shared Pool Free Memory
+ - In-Memory Sorts Ratio
+ - SQL Service Response Time
+ - User Rollbacks
+ - Enqueue Timeouts
+- cache
+ - Cache Hit Ratio
+ - Global Cache Blocks Events
+- activities
+ - Activities
+- wait time
+ - Wait Time
+- tablespace
+ - Size
+ - Usage
+ - Usage In Percent
+
+## prerequisite
To use the Oracle module do the following:
-1. Install `cx_Oracle` package ([link](https://cx-oracle.readthedocs.io/en/latest/installation.html#install-cx-oracle)).
+1. Install `cx_Oracle` package ([link](https://cx-oracle.readthedocs.io/en/latest/installation.html#install-cx-oracle)).
-2. Install Oracle Client libraries ([link](https://cx-oracle.readthedocs.io/en/latest/installation.html#install-oracle-client)).
+2. Install Oracle Client libraries ([link](https://cx-oracle.readthedocs.io/en/latest/installation.html#install-oracle-client)).
-3. Create a read-only netdata user with proper access to your Oracle Database Server.
+3. Create a read-only `netdata` user with proper access to your Oracle Database Server.
Connect to your Oracle database with an administrative user and execute:
@@ -53,8 +55,7 @@ CREATE USER netdata IDENTIFIED BY <PASSWORD>;
GRANT CONNECT TO netdata;
GRANT SELECT_CATALOG_ROLE TO netdata;
-
-```
+```
### configuration
@@ -74,4 +75,4 @@ remote:
All parameters are required. Without them module will fail to start.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Foracledb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Foracledb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/ovpn_status_log/README.md b/collectors/python.d.plugin/ovpn_status_log/README.md
index bcd1f00e..053e3f0d 100644
--- a/collectors/python.d.plugin/ovpn_status_log/README.md
+++ b/collectors/python.d.plugin/ovpn_status_log/README.md
@@ -4,23 +4,25 @@ Module monitor openvpn-status log file.
**Requirements:**
- * If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
- so that multiple instances do not overwrite each other's output files.
+- If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
+ so that multiple instances do not overwrite each other's output files.
- * Make sure NETDATA USER CAN READ openvpn-status.log
+- Make sure NETDATA USER CAN READ openvpn-status.log
- * Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
+- Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
It produces:
-1. **Users** OpenVPN active users
- * users
+1. **Users** OpenVPN active users
-2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s
- * in
- * out
+ - users
-### configuration
+2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s
+
+ - in
+ - out
+
+## configuration
Sample:
@@ -31,4 +33,4 @@ default
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fovpn_status_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fovpn_status_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md
index d3aa85a7..5f828433 100644
--- a/collectors/python.d.plugin/phpfpm/README.md
+++ b/collectors/python.d.plugin/phpfpm/README.md
@@ -3,24 +3,28 @@
This module will monitor one or more php-fpm instances depending on configuration.
**Requirements:**
- * php-fpm with enabled `status` page
- * access to `status` page via web server
+
+- php-fpm with enabled `status` page
+- access to `status` page via web server
It produces following charts:
-1. **Active Connections**
- * active
- * maxActive
- * idle
+1. **Active Connections**
+
+ - active
+ - maxActive
+ - idle
+
+2. **Requests** in requests/s
+
+ - requests
-2. **Requests** in requests/s
- * requests
+3. **Performance**
-3. **Performance**
- * reached
- * slow
+ - reached
+ - slow
-### configuration
+## configuration
Needs only `url` to server's `status`
@@ -38,4 +42,4 @@ Without configuration, module attempts to connect to `http://localhost/status`
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fphpfpm%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fphpfpm%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md
index 8f289c8d..2bbea10c 100644
--- a/collectors/python.d.plugin/portcheck/README.md
+++ b/collectors/python.d.plugin/portcheck/README.md
@@ -4,18 +4,19 @@ Module monitors a remote TCP service.
Following charts are drawn per host:
-1. **Latency** ms
- * Time required to connect to a TCP port.
- Displays latency in 0.1 ms resolution. If the connection failed, the value is missing.
+1. **Latency** ms
-2. **Status** boolean
- * Connection successful
- * Could not create socket: possible DNS problems
- * Connection refused: port not listening or blocked
- * Connection timed out: host or port unreachable
+ - Time required to connect to a TCP port.
+ Displays latency in 0.1 ms resolution. If the connection failed, the value is missing.
+2. **Status** boolean
-### configuration
+ - Connection successful
+ - Could not create socket: possible DNS problems
+ - Connection refused: port not listening or blocked
+ - Connection timed out: host or port unreachable
+
+## configuration
```yaml
server:
@@ -27,11 +28,11 @@ server:
### notes
- * The error chart is intended for alarms, badges or for access via API.
- * A system/service/firewall might block netdata's access if a portscan or
- similar is detected.
- * Currently, the accuracy of the latency is low and should be used as reference only.
+- The error chart is intended for alarms, badges or for access via API.
+- A system/service/firewall might block Netdata's access if a portscan or
+ similar is detected.
+- Currently, the accuracy of the latency is low and should be used as reference only.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fportcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fportcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md
index e2147ac9..5d2822c1 100644
--- a/collectors/python.d.plugin/postfix/README.md
+++ b/collectors/python.d.plugin/postfix/README.md
@@ -4,14 +4,16 @@ Simple module executing `postfix -p` to grab postfix queue.
It produces only two charts:
-1. **Postfix Queue Emails**
- * emails
+1. **Postfix Queue Emails**
-2. **Postfix Queue Emails Size** in KB
- * size
+ - emails
+
+2. **Postfix Queue Emails Size** in KB
+
+ - size
Configuration is not needed.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostfix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostfix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md
index 052de94f..29dd85a5 100644
--- a/collectors/python.d.plugin/postgres/README.md
+++ b/collectors/python.d.plugin/postgres/README.md
@@ -4,50 +4,60 @@ Module monitors one or more postgres servers.
**Requirements:**
- * `python-psycopg2` package. You have to install it manually.
+- `python-psycopg2` package. You have to install it manually.
Following charts are drawn:
-1. **Database size** MB
- * size
+1. **Database size** MB
-2. **Current Backend Processes** processes
- * active
+ - size
-3. **Write-Ahead Logging Statistics** files/s
- * total
- * ready
- * done
+2. **Current Backend Processes** processes
-4. **Checkpoints** writes/s
- * scheduled
- * requested
+ - active
-5. **Current connections to db** count
- * connections
+3. **Write-Ahead Logging Statistics** files/s
-6. **Tuples returned from db** tuples/s
- * sequential
- * bitmap
+ - total
+ - ready
+ - done
-7. **Tuple reads from db** reads/s
- * disk
- * cache
+4. **Checkpoints** writes/s
-8. **Transactions on db** transactions/s
- * committed
- * rolled back
+ - scheduled
+ - requested
-9. **Tuples written to db** writes/s
- * inserted
- * updated
- * deleted
- * conflicts
+5. **Current connections to db** count
+
+ - connections
+
+6. **Tuples returned from db** tuples/s
+
+ - sequential
+ - bitmap
+
+7. **Tuple reads from db** reads/s
+
+ - disk
+ - cache
+
+8. **Transactions on db** transactions/s
+
+ - committed
+ - rolled back
+
+9. **Tuples written to db** writes/s
+
+ - inserted
+ - updated
+ - deleted
+ - conflicts
10. **Locks on db** count per type
- * locks
-### configuration
+ - locks
+
+## configuration
For all available options please see module [configuration file](postgres.conf).
@@ -69,4 +79,4 @@ When no configuration file is found, module tries to connect to TCP/IP socket: `
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostgres%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostgres%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md
index 61aa5f6b..ac6e12f3 100644
--- a/collectors/python.d.plugin/powerdns/README.md
+++ b/collectors/python.d.plugin/powerdns/README.md
@@ -4,67 +4,78 @@ Module monitor powerdns performance and health metrics.
Powerdns charts:
-1. **Queries and Answers**
- * udp-queries
- * udp-answers
- * tcp-queries
- * tcp-answers
-
-2. **Cache Usage**
- * query-cache-hit
- * query-cache-miss
- * packetcache-hit
- * packetcache-miss
-
-3. **Cache Size**
- * query-cache-size
- * packetcache-size
- * key-cache-size
- * meta-cache-size
-
-4. **Latency**
- * latency
+1. **Queries and Answers**
+
+ - udp-queries
+ - udp-answers
+ - tcp-queries
+ - tcp-answers
+
+2. **Cache Usage**
+
+ - query-cache-hit
+ - query-cache-miss
+ - packetcache-hit
+ - packetcache-miss
+
+3. **Cache Size**
+
+ - query-cache-size
+ - packetcache-size
+ - key-cache-size
+ - meta-cache-size
+
+4. **Latency**
+
+ - latency
Powerdns Recursor charts:
- 1. **Questions In**
- * questions
- * ipv6-questions
- * tcp-queries
-
-2. **Questions Out**
- * all-outqueries
- * ipv6-outqueries
- * tcp-outqueries
- * throttled-outqueries
-
-3. **Answer Times**
- * answers-slow
- * answers0-1
- * answers1-10
- * answers10-100
- * answers100-1000
-
-4. **Timeouts**
- * outgoing-timeouts
- * outgoing4-timeouts
- * outgoing6-timeouts
-
-5. **Drops**
- * over-capacity-drops
-
-6. **Cache Usage**
- * cache-hits
- * cache-misses
- * packetcache-hits
- * packetcache-misses
-
-7. **Cache Size**
- * cache-entries
- * packetcache-entries
- * negcache-entries
-
-### configuration
+1. **Questions In**
+
+ - questions
+ - ipv6-questions
+ - tcp-queries
+
+2. **Questions Out**
+
+ - all-outqueries
+ - ipv6-outqueries
+ - tcp-outqueries
+ - throttled-outqueries
+
+3. **Answer Times**
+
+ - answers-slow
+ - answers0-1
+ - answers1-10
+ - answers10-100
+ - answers100-1000
+
+4. **Timeouts**
+
+ - outgoing-timeouts
+ - outgoing4-timeouts
+ - outgoing6-timeouts
+
+5. **Drops**
+
+ - over-capacity-drops
+
+6. **Cache Usage**
+
+ - cache-hits
+ - cache-misses
+ - packetcache-hits
+ - packetcache-misses
+
+7. **Cache Size**
+
+ - cache-entries
+ - packetcache-entries
+ - negcache-entries
+
+## configuration
```yaml
local:
@@ -76,4 +87,4 @@ local:
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpowerdns%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpowerdns%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/proxysql/README.md b/collectors/python.d.plugin/proxysql/README.md
index 6e5a2127..23a67751 100644
--- a/collectors/python.d.plugin/proxysql/README.md
+++ b/collectors/python.d.plugin/proxysql/README.md
@@ -4,49 +4,68 @@ This module monitors proxysql backend and frontend performance metrics.
It produces:
-1. **Connections (frontend)**
- * connected: number of frontend connections currently connected
- * aborted: number of frontend connections aborted due to invalid credential or max_connections reached
- * non_idle: number of frontend connections that are not currently idle
- * created: number of frontend connections created
-2. **Questions (frontend)**
- * questions: total number of queries sent from frontends
- * slow_queries: number of queries that ran for longer than the threshold in milliseconds defined in global variable `mysql-long_query_time`
-3. **Overall Bandwith (backends)**
- * in
- * out
-4. **Status (backends)**
- * Backends
- * `1=ONLINE`: backend server is fully operational
- * `2=SHUNNED`: backend sever is temporarily taken out of use because of either too many connection errors in a time that was too short, or replication lag exceeded the allowed threshold
- * `3=OFFLINE_SOFT`: when a server is put into OFFLINE_SOFT mode, new incoming connections aren't accepted anymore, while the existing connections are kept until they became inactive. In other words, connections are kept in use until the current transaction is completed. This allows to gracefully detach a backend
- * `4=OFFLINE_HARD`: when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren't accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work
- * `-1`: Unknown status
-5. **Bandwith (backends)**
- * Backends
- * in
- * out
-6. **Queries (backends)**
- * Backends
- * queries
-7. **Latency (backends)**
- * Backends
- * ping time
-8. **Pool connections (backends)**
- * Backends
- * Used: The number of connections are currently used by ProxySQL for sending queries to the backend server.
- * Free: The number of connections are currently free.
- * Established/OK: The number of connections were established successfully.
- * Error: The number of connections weren't established successfully.
-9. **Commands**
- * Commands
- * Count
- * Duration (Total duration for each command)
+1. **Connections (frontend)**
+
+ - connected: number of frontend connections currently connected
+ - aborted: number of frontend connections aborted due to invalid credential or max_connections reached
+ - non_idle: number of frontend connections that are not currently idle
+ - created: number of frontend connections created
+
+2. **Questions (frontend)**
+
+ - questions: total number of queries sent from frontends
+ - slow_queries: number of queries that ran for longer than the threshold in milliseconds defined in global variable `mysql-long_query_time`
+
+3. **Overall Bandwith (backends)**
+
+ - in
+ - out
+
+4. **Status (backends)**
+
+ - Backends
+ - `1=ONLINE`: backend server is fully operational
+ - `2=SHUNNED`: backend sever is temporarily taken out of use because of either too many connection errors in a time that was too short, or replication lag exceeded the allowed threshold
+ - `3=OFFLINE_SOFT`: when a server is put into OFFLINE_SOFT mode, new incoming connections aren't accepted anymore, while the existing connections are kept until they became inactive. In other words, connections are kept in use until the current transaction is completed. This allows to gracefully detach a backend
+ - `4=OFFLINE_HARD`: when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren't accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work
+ - `-1`: Unknown status
+
+5. **Bandwith (backends)**
+
+ - Backends
+ - in
+ - out
+
+6. **Queries (backends)**
+
+ - Backends
+ - queries
+
+7. **Latency (backends)**
+
+ - Backends
+ - ping time
+
+8. **Pool connections (backends)**
+
+ - Backends
+ - Used: The number of connections are currently used by ProxySQL for sending queries to the backend server.
+ - Free: The number of connections are currently free.
+ - Established/OK: The number of connections were established successfully.
+ - Error: The number of connections weren't established successfully.
+
+9. **Commands**
+
+ - Commands
+ - Count
+ - Duration (Total duration for each command)
+
10. **Commands Histogram**
- * Commands
- * 100us, 500us, ..., 10s, inf: the total number of commands of the given type which executed within the specified time limit and the previous one.
-### configuration
+ - Commands
+ - 100us, 500us, ..., 10s, inf: the total number of commands of the given type which executed within the specified time limit and the previous one.
+
+## configuration
```yaml
tcpipv4:
@@ -61,4 +80,4 @@ If no configuration is given, module will fail to run.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fproxysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fproxysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md
index b97eb70c..295db014 100644
--- a/collectors/python.d.plugin/puppet/README.md
+++ b/collectors/python.d.plugin/puppet/README.md
@@ -4,21 +4,27 @@ Monitor status of Puppet Server and Puppet DB.
Following charts are drawn:
-1. **JVM Heap**
- * committed (allocated from OS)
- * used (actual use)
-2. **JVM Non-Heap**
- * committed (allocated from OS)
- * used (actual use)
-3. **CPU Usage**
- * execution
- * GC (taken by garbage collection)
-4. **File Descriptors**
- * max
- * used
-
-
-### configuration
+1. **JVM Heap**
+
+ - committed (allocated from OS)
+ - used (actual use)
+
+2. **JVM Non-Heap**
+
+ - committed (allocated from OS)
+ - used (actual use)
+
+3. **CPU Usage**
+
+ - execution
+ - GC (taken by garbage collection)
+
+4. **File Descriptors**
+
+ - max
+ - used
+
+## configuration
```yaml
puppetdb:
@@ -36,12 +42,12 @@ When no configuration is given, module uses `https://fqdn.example.com:8140`.
### notes
-* Exact Fully Qualified Domain Name of the node should be used.
-* Usually Puppet Server/DB startup time is VERY long. So, there should
- be quite reasonable retry count.
-* Secure PuppetDB config may require client certificate. Not applies
- to default PuppetDB configuration though.
+- Exact Fully Qualified Domain Name of the node should be used.
+- Usually Puppet Server/DB startup time is VERY long. So, there should
+ be quite reasonable retry count.
+- Secure PuppetDB config may require client certificate. Not applies
+ to default PuppetDB configuration though.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpuppet%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpuppet%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/python.d.plugin b/collectors/python.d.plugin/python.d.plugin
new file mode 100644
index 00000000..468df13a
--- /dev/null
+++ b/collectors/python.d.plugin/python.d.plugin
@@ -0,0 +1,733 @@
+#!/usr/bin/env bash
+'''':;
+if [[ "$OSTYPE" == "darwin"* ]]; then
+ export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
+fi
+exec "$(command -v python || command -v python3 || command -v python2 ||
+echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # '''
+
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import collections
+import copy
+import gc
+import multiprocessing
+import os
+import re
+import sys
+import time
+import threading
+import types
+
+PY_VERSION = sys.version_info[:2]
+
+if PY_VERSION > (3, 1):
+ from importlib.machinery import SourceFileLoader
+else:
+ from imp import load_source as SourceFileLoader
+
+
+ENV_NETDATA_USER_CONFIG_DIR = 'NETDATA_USER_CONFIG_DIR'
+ENV_NETDATA_STOCK_CONFIG_DIR = 'NETDATA_STOCK_CONFIG_DIR'
+ENV_NETDATA_PLUGINS_DIR = 'NETDATA_PLUGINS_DIR'
+ENV_NETDATA_UPDATE_EVERY = 'NETDATA_UPDATE_EVERY'
+
+
+def dirs():
+ user_config = os.getenv(
+ ENV_NETDATA_USER_CONFIG_DIR,
+ '/etc/netdata',
+ )
+ stock_config = os.getenv(
+ ENV_NETDATA_STOCK_CONFIG_DIR,
+ '/usr/lib/netdata/conf.d',
+ )
+ modules_user_config = os.path.join(user_config, 'python.d')
+ modules_stock_config = os.path.join(stock_config, 'python.d')
+
+ modules = os.path.abspath(
+ os.getenv(
+ ENV_NETDATA_PLUGINS_DIR,
+ os.path.dirname(__file__),
+ ) + '/../python.d'
+ )
+ pythond_packages = os.path.join(modules, 'python_modules')
+
+ return collections.namedtuple(
+ 'Dirs',
+ [
+ 'user_config',
+ 'stock_config',
+ 'modules_user_config',
+ 'modules_stock_config',
+ 'modules',
+ 'pythond_packages',
+ ]
+ )(
+ user_config,
+ stock_config,
+ modules_user_config,
+ modules_stock_config,
+ modules,
+ pythond_packages,
+ )
+
+
+DIRS = dirs()
+
+sys.path.append(DIRS.pythond_packages)
+
+
+from bases.collection import safe_print
+from bases.loggers import PythonDLogger
+from bases.loaders import load_config
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+
+END_TASK_MARKER = None
+
+IS_ATTY = sys.stdout.isatty()
+
+PLUGIN_CONF_FILE = 'python.d.conf'
+
+MODULE_SUFFIX = '.chart.py'
+
+OBSOLETED_MODULES = (
+ 'apache_cache', # replaced by web_log
+ 'cpuidle', # rewritten in C
+ 'cpufreq', # rewritten in C
+ 'gunicorn_log', # replaced by web_log
+ 'linux_power_supply', # rewritten in C
+ 'nginx_log', # replaced by web_log
+ 'mdstat', # rewritten in C
+ 'sslcheck', # memory leak bug https://github.com/netdata/netdata/issues/5624
+)
+
+
+AVAILABLE_MODULES = [
+ m[:-len(MODULE_SUFFIX)] for m in sorted(os.listdir(DIRS.modules))
+ if m.endswith(MODULE_SUFFIX) and m[:-len(MODULE_SUFFIX)] not in OBSOLETED_MODULES
+]
+
+PLUGIN_BASE_CONF = {
+ 'enabled': True,
+ 'default_run': True,
+ 'gc_run': True,
+ 'gc_interval': 300,
+}
+
+JOB_BASE_CONF = {
+ 'update_every': os.getenv(ENV_NETDATA_UPDATE_EVERY, 1),
+ 'priority': 60000,
+ 'autodetection_retry': 0,
+ 'chart_cleanup': 10,
+ 'penalty': True,
+ 'name': str(),
+}
+
+
+def heartbeat():
+ if IS_ATTY:
+ return
+ safe_print('\n')
+
+
+class HeartBeat(threading.Thread):
+ def __init__(self, every):
+ threading.Thread.__init__(self)
+ self.daemon = True
+ self.every = every
+
+ def run(self):
+ while True:
+ time.sleep(self.every)
+ heartbeat()
+
+
+def load_module(name):
+ abs_path = os.path.join(DIRS.modules, '{0}{1}'.format(name, MODULE_SUFFIX))
+ module = SourceFileLoader(name, abs_path)
+ if isinstance(module, types.ModuleType):
+ return module
+ return module.load_module()
+
+
+def multi_path_find(name, paths):
+ for path in paths:
+ abs_name = os.path.join(path, name)
+ if os.path.isfile(abs_name):
+ return abs_name
+ return ''
+
+
+Task = collections.namedtuple(
+ 'Task',
+ [
+ 'module_name',
+ 'explicitly_enabled',
+ ],
+)
+
+Result = collections.namedtuple(
+ 'Result',
+ [
+ 'module_name',
+ 'jobs_configs',
+ ],
+)
+
+
+class ModuleChecker(multiprocessing.Process):
+ def __init__(
+ self,
+ task_queue,
+ result_queue,
+ ):
+ multiprocessing.Process.__init__(self)
+ self.log = PythonDLogger()
+ self.log.job_name = 'checker'
+ self.task_queue = task_queue
+ self.result_queue = result_queue
+
+ def run(self):
+ self.log.info('starting...')
+ HeartBeat(1).start()
+ while self.run_once():
+ pass
+ self.log.info('terminating...')
+
+ def run_once(self):
+ task = self.task_queue.get()
+
+ if task is END_TASK_MARKER:
+ # TODO: find better solution, understand why heartbeat thread doesn't work
+ heartbeat()
+ self.task_queue.task_done()
+ self.result_queue.put(END_TASK_MARKER)
+ return False
+
+ result = self.do_task(task)
+ if result:
+ self.result_queue.put(result)
+ self.task_queue.task_done()
+
+ return True
+
+ def do_task(self, task):
+ self.log.info("{0} : checking".format(task.module_name))
+
+ # LOAD SOURCE
+ module = Module(task.module_name)
+ try:
+ module.load_source()
+ except Exception as error:
+ self.log.warning("{0} : error on loading source : {1}, skipping module".format(
+ task.module_name,
+ error,
+ ))
+ return None
+ else:
+ self.log.info("{0} : source successfully loaded".format(task.module_name))
+
+ if module.is_disabled_by_default() and not task.explicitly_enabled:
+ self.log.info("{0} : disabled by default".format(task.module_name))
+ return None
+
+ # LOAD CONFIG
+ paths = [
+ DIRS.modules_user_config,
+ DIRS.modules_stock_config,
+ ]
+
+ conf_abs_path = multi_path_find(
+ name='{0}.conf'.format(task.module_name),
+ paths=paths,
+ )
+
+ if conf_abs_path:
+ self.log.info("{0} : found config file '{1}'".format(task.module_name, conf_abs_path))
+ try:
+ module.load_config(conf_abs_path)
+ except Exception as error:
+ self.log.warning("{0} : error on loading config : {1}, skipping module".format(
+ task.module_name, error))
+ return None
+ else:
+ self.log.info("{0} : config was not found in '{1}', using default 1 job config".format(
+ task.module_name, paths))
+
+ # CHECK JOBS
+ jobs = module.create_jobs()
+ self.log.info("{0} : created {1} job(s) from the config".format(task.module_name, len(jobs)))
+
+ successful_jobs_configs = list()
+ for job in jobs:
+ if job.autodetection_retry() > 0:
+ successful_jobs_configs.append(job.config)
+ self.log.info("{0}[{1}]: autodetection job, will be checked in main".format(task.module_name, job.name))
+ continue
+
+ try:
+ job.init()
+ except Exception as error:
+ self.log.warning("{0}[{1}] : unhandled exception on init : {2}, skipping the job)".format(
+ task.module_name, job.name, error))
+ continue
+
+ try:
+ ok = job.check()
+ except Exception as error:
+ self.log.warning("{0}[{1}] : unhandled exception on check : {2}, skipping the job".format(
+ task.module_name, job.name, error))
+ continue
+
+ if not ok:
+ self.log.info("{0}[{1}] : check failed, skipping the job".format(task.module_name, job.name))
+ continue
+
+ self.log.info("{0}[{1}] : check successful".format(task.module_name, job.name))
+
+ job.config['autodetection_retry'] = job.config['update_every']
+ successful_jobs_configs.append(job.config)
+
+ if not successful_jobs_configs:
+ self.log.info("{0} : all jobs failed, skipping module".format(task.module_name))
+ return None
+
+ return Result(module.source.__name__, successful_jobs_configs)
+
+
+class JobConf(OrderedDict):
+ def __init__(self, *args):
+ OrderedDict.__init__(self, *args)
+
+ def set_defaults_from_module(self, module):
+ for k in [k for k in JOB_BASE_CONF if hasattr(module, k)]:
+ self[k] = getattr(module, k)
+
+ def set_defaults_from_config(self, module_config):
+ for k in [k for k in JOB_BASE_CONF if k in module_config]:
+ self[k] = module_config[k]
+
+ def set_job_name(self, name):
+ self['job_name'] = re.sub(r'\s+', '_', name)
+
+ def set_override_name(self, name):
+ self['override_name'] = re.sub(r'\s+', '_', name)
+
+ def as_dict(self):
+ return copy.deepcopy(OrderedDict(self))
+
+
+class Job:
+ def __init__(
+ self,
+ service,
+ module_name,
+ config,
+ ):
+ self.service = service
+ self.config = config
+ self.module_name = module_name
+ self.name = config['job_name']
+ self.override_name = config['override_name']
+ self.wrapped = None
+
+ def init(self):
+ self.wrapped = self.service(configuration=self.config.as_dict())
+
+ def check(self):
+ return self.wrapped.check()
+
+ def post_check(self, min_update_every):
+ if self.wrapped.update_every < min_update_every:
+ self.wrapped.update_every = min_update_every
+
+ def create(self):
+ return self.wrapped.create()
+
+ def autodetection_retry(self):
+ return self.config['autodetection_retry']
+
+ def run(self):
+ self.wrapped.run()
+
+
+class Module:
+ def __init__(self, name):
+ self.name = name
+ self.source = None
+ self.config = dict()
+
+ def is_disabled_by_default(self):
+ return bool(getattr(self.source, 'disabled_by_default', False))
+
+ def load_source(self):
+ self.source = load_module(self.name)
+
+ def load_config(self, abs_path):
+ self.config = load_config(abs_path) or dict()
+
+ def gather_jobs_configs(self):
+ job_names = [v for v in self.config if isinstance(self.config[v], dict)]
+
+ if len(job_names) == 0:
+ job_conf = JobConf(JOB_BASE_CONF)
+ job_conf.set_defaults_from_module(self.source)
+ job_conf.update(self.config)
+ job_conf.set_job_name(self.name)
+ job_conf.set_override_name(job_conf.pop('name'))
+ return [job_conf]
+
+ configs = list()
+ for job_name in job_names:
+ raw_job_conf = self.config[job_name]
+ job_conf = JobConf(JOB_BASE_CONF)
+ job_conf.set_defaults_from_module(self.source)
+ job_conf.set_defaults_from_config(self.config)
+ job_conf.update(raw_job_conf)
+ job_conf.set_job_name(job_name)
+ job_conf.set_override_name(job_conf.pop('name'))
+ configs.append(job_conf)
+
+ return configs
+
+ def create_jobs(self, jobs_conf=None):
+ return [Job(self.source.Service, self.name, conf) for conf in jobs_conf or self.gather_jobs_configs()]
+
+
+class JobRunner(threading.Thread):
+ def __init__(self, job):
+ threading.Thread.__init__(self)
+ self.daemon = True
+ self.wrapped = job
+
+ def run(self):
+ self.wrapped.run()
+
+
+class PluginConf(dict):
+ def __init__(self, *args):
+ dict.__init__(self, *args)
+
+ def is_module_enabled(self, module_name, explicit):
+ if module_name in self:
+ return self[module_name]
+ if explicit:
+ return False
+ return self['default_run']
+
+
+class Plugin:
+ def __init__(
+ self,
+ min_update_every=1,
+ modules_to_run=tuple(AVAILABLE_MODULES),
+ ):
+ self.log = PythonDLogger()
+ self.config = PluginConf(PLUGIN_BASE_CONF)
+ self.task_queue = multiprocessing.JoinableQueue()
+ self.result_queue = multiprocessing.JoinableQueue()
+ self.min_update_every = min_update_every
+ self.modules_to_run = modules_to_run
+ self.auto_detection_jobs = list()
+ self.tasks = list()
+ self.results = list()
+ self.checked_jobs = collections.defaultdict(list)
+ self.runs = 0
+
+ @staticmethod
+ def shutdown():
+ safe_print('DISABLE')
+ exit(0)
+
+ def run(self):
+ jobs = self.create_jobs()
+ if not jobs:
+ return
+
+ for job in self.prepare_jobs(jobs):
+ self.log.info('{0}[{1}] : started in thread'.format(job.module_name, job.name))
+ JobRunner(job).start()
+
+ self.serve()
+
+ def enqueue_tasks(self):
+ for task in self.tasks:
+ self.task_queue.put(task)
+ self.task_queue.put(END_TASK_MARKER)
+
+ def dequeue_results(self):
+ while True:
+ result = self.result_queue.get()
+ self.result_queue.task_done()
+ if result is END_TASK_MARKER:
+ break
+ self.results.append(result)
+
+ def load_config(self):
+ paths = [
+ DIRS.user_config,
+ DIRS.stock_config,
+ ]
+
+ self.log.info("checking for config in {0}".format(paths))
+ abs_path = multi_path_find(name=PLUGIN_CONF_FILE, paths=paths)
+ if not abs_path:
+ self.log.warning('config was not found, using defaults')
+ return True
+
+ self.log.info("config found, loading config '{0}'".format(abs_path))
+ try:
+ config = load_config(abs_path) or dict()
+ except Exception as error:
+ self.log.error('error on loading config : {0}'.format(error))
+ return False
+
+ self.log.info('config successfully loaded')
+ self.config.update(config)
+ return True
+
+ def setup(self):
+ self.log.info('starting setup')
+ if not self.load_config():
+ return False
+
+ if not self.config['enabled']:
+ self.log.info('disabled in configuration file')
+ return False
+
+ for mod in self.modules_to_run:
+ if self.config.is_module_enabled(mod, False):
+ task = Task(mod, self.config.is_module_enabled(mod, True))
+ self.tasks.append(task)
+ else:
+ self.log.info("{0} : disabled in configuration file".format(mod))
+
+ if not self.tasks:
+ self.log.info('no modules to run')
+ return False
+
+ worker = ModuleChecker(self.task_queue, self.result_queue)
+ self.log.info('starting checker process ({0} module(s) to check)'.format(len(self.tasks)))
+ worker.start()
+
+ # TODO: timeouts?
+ self.enqueue_tasks()
+ self.task_queue.join()
+ self.dequeue_results()
+ self.result_queue.join()
+ self.task_queue.close()
+ self.result_queue.close()
+ self.log.info('stopping checker process')
+ worker.join()
+
+ if not self.results:
+ self.log.info('no modules to run')
+ return False
+
+ self.log.info("setup complete, {0} active module(s) : '{1}'".format(
+ len(self.results),
+ [v.module_name for v in self.results])
+ )
+
+ return True
+
+ def create_jobs(self):
+ jobs = list()
+ for result in self.results:
+ module = Module(result.module_name)
+ try:
+ module.load_source()
+ except Exception as error:
+ self.log.warning("{0} : error on loading module source : {1}, skipping module".format(
+ result.module_name, error))
+ continue
+
+ module_jobs = module.create_jobs(result.jobs_configs)
+ self.log.info("{0} : created {1} job(s)".format(module.name, len(module_jobs)))
+ jobs.extend(module_jobs)
+
+ return jobs
+
+ def prepare_jobs(self, jobs):
+ prepared = list()
+
+ for job in jobs:
+ check_name = job.override_name or job.name
+ if check_name in self.checked_jobs[job.module_name]:
+ self.log.info('{0}[{1}] : already served by another job, skipping the job'.format(
+ job.module_name, job.name))
+ continue
+
+ try:
+ job.init()
+ except Exception as error:
+ self.log.warning("{0}[{1}] : unhandled exception on init : {2}, skipping the job".format(
+ job.module_name, job.name, error))
+ continue
+
+ self.log.info("{0}[{1}] : init successful".format(job.module_name, job.name))
+
+ try:
+ ok = job.check()
+ except Exception as error:
+ self.log.warning("{0}[{1}] : unhandled exception on check : {2}, skipping the job".format(
+ job.module_name, job.name, error))
+ continue
+
+ if not ok:
+ self.log.info('{0}[{1}] : check failed'.format(job.module_name, job.name))
+ if job.autodetection_retry() > 0:
+ self.log.info('{0}[{1}] : will recheck every {2} second(s)'.format(
+ job.module_name, job.name, job.autodetection_retry()))
+ self.auto_detection_jobs.append(job)
+ continue
+
+ self.log.info('{0}[{1}] : check successful'.format(job.module_name, job.name))
+
+ job.post_check(int(self.min_update_every))
+
+ if not job.create():
+ self.log.info('{0}[{1}] : create failed'.format(job.module_name, job.name))
+
+ self.checked_jobs[job.module_name].append(check_name)
+ prepared.append(job)
+
+ return prepared
+
+ def serve(self):
+ gc_run = self.config['gc_run']
+ gc_interval = self.config['gc_interval']
+
+ while True:
+ self.runs += 1
+
+ # threads: main + heartbeat
+ if threading.active_count() <= 2 and not self.auto_detection_jobs:
+ return
+
+ time.sleep(1)
+
+ if gc_run and self.runs % gc_interval == 0:
+ v = gc.collect()
+ self.log.debug('GC collection run result: {0}'.format(v))
+
+ self.auto_detection_jobs = [job for job in self.auto_detection_jobs if not self.retry_job(job)]
+
+ def retry_job(self, job):
+ stop_retrying = True
+ retry_later = False
+
+ if self.runs % job.autodetection_retry() != 0:
+ return retry_later
+
+ check_name = job.override_name or job.name
+ if check_name in self.checked_jobs[job.module_name]:
+ self.log.info("{0}[{1}]: already served by another job, give up on retrying".format(
+ job.module_name, job.name))
+ return stop_retrying
+
+ try:
+ ok = job.check()
+ except Exception as error:
+ self.log.warning("{0}[{1}] : unhandled exception on recheck : {2}, give up on retrying".format(
+ job.module_name, job.name, error))
+ return stop_retrying
+
+ if not ok:
+ self.log.info('{0}[{1}] : recheck failed, will retry in {2} second(s)'.format(
+ job.module_name, job.name, job.autodetection_retry()))
+ return retry_later
+ self.log.info('{0}[{1}] : recheck successful'.format(job.module_name, job.name))
+
+ if not job.create():
+ return stop_retrying
+
+ job.post_check(int(self.min_update_every))
+ self.checked_jobs[job.module_name].append(check_name)
+ JobRunner(job).start()
+
+ return stop_retrying
+
+
+def parse_cmd():
+ opts = sys.argv[:][1:]
+ debug = False
+ trace = False
+ update_every = 1
+ modules_to_run = list()
+
+ v = next((opt for opt in opts if opt.isdigit() and int(opt) >= 1), None)
+ if v:
+ update_every = v
+ opts.remove(v)
+ if 'debug' in opts:
+ debug = True
+ opts.remove('debug')
+ if 'trace' in opts:
+ trace = True
+ opts.remove('trace')
+ if opts:
+ modules_to_run = list(opts)
+
+ return collections.namedtuple(
+ 'CMD',
+ [
+ 'update_every',
+ 'debug',
+ 'trace',
+ 'modules_to_run',
+ ],
+ )(
+ update_every,
+ debug,
+ trace,
+ modules_to_run,
+ )
+
+
+def main():
+ cmd = parse_cmd()
+ logger = PythonDLogger()
+
+ if cmd.debug:
+ logger.logger.severity = 'DEBUG'
+ if cmd.trace:
+ logger.log_traceback = True
+
+ logger.info('using python v{0}'.format(PY_VERSION[0]))
+
+ unknown_modules = set(cmd.modules_to_run) - set(AVAILABLE_MODULES)
+ if unknown_modules:
+ logger.error('unknown modules : {0}'.format(sorted(list(unknown_modules))))
+ safe_print('DISABLE')
+ return
+
+ plugin = Plugin(
+ cmd.update_every,
+ cmd.modules_to_run or AVAILABLE_MODULES,
+ )
+
+ HeartBeat(1).start()
+
+ if not plugin.setup():
+ safe_print('DISABLE')
+ return
+
+ plugin.run()
+ logger.info('exiting from main...')
+ plugin.shutdown()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md
index 4ac60605..346cc23f 100644
--- a/collectors/python.d.plugin/rabbitmq/README.md
+++ b/collectors/python.d.plugin/rabbitmq/README.md
@@ -4,42 +4,51 @@ Module monitor rabbitmq performance and health metrics.
Following charts are drawn:
-1. **Queued Messages**
- * ready
- * unacknowledged
+1. **Queued Messages**
-2. **Message Rates**
- * ack
- * redelivered
- * deliver
- * publish
+ - ready
+ - unacknowledged
-3. **Global Counts**
- * channels
- * consumers
- * connections
- * queues
- * exchanges
+2. **Message Rates**
-4. **File Descriptors**
- * used descriptors
+ - ack
+ - redelivered
+ - deliver
+ - publish
-5. **Socket Descriptors**
- * used descriptors
+3. **Global Counts**
-6. **Erlang processes**
- * used processes
+ - channels
+ - consumers
+ - connections
+ - queues
+ - exchanges
-7. **Erlang run queue**
- * Erlang run queue
+4. **File Descriptors**
-8. **Memory**
- * free memory in megabytes
+ - used descriptors
-9. **Disk Space**
- * free disk space in gigabytes
+5. **Socket Descriptors**
-### configuration
+ - used descriptors
+
+6. **Erlang processes**
+
+ - used processes
+
+7. **Erlang run queue**
+
+ - Erlang run queue
+
+8. **Memory**
+
+ - free memory in megabytes
+
+9. **Disk Space**
+
+ - free disk space in gigabytes
+
+## configuration
```yaml
socket:
@@ -48,11 +57,10 @@ socket:
port : 15672
user : 'guest'
pass : 'guest'
-
```
When no configuration file is found, module tries to connect to: `localhost:15672`.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frabbitmq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frabbitmq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md
index 0bea0376..e7ddd382 100644
--- a/collectors/python.d.plugin/redis/README.md
+++ b/collectors/python.d.plugin/redis/README.md
@@ -4,27 +4,33 @@ Get INFO data from redis instance.
Following charts are drawn:
-1. **Operations** per second
- * operations
+1. **Operations** per second
-2. **Hit rate** in percent
- * rate
+ - operations
-3. **Memory utilization** in kilobytes
- * total
- * lua
+2. **Hit rate** in percent
-4. **Database keys**
- * lines are creates dynamically based on how many databases are there
+ - rate
-5. **Clients**
- * connected
- * blocked
+3. **Memory utilization** in kilobytes
-6. **Slaves**
- * connected
+ - total
+ - lua
-### configuration
+4. **Database keys**
+
+ - lines are creates dynamically based on how many databases are there
+
+5. **Clients**
+
+ - connected
+ - blocked
+
+6. **Slaves**
+
+ - connected
+
+## configuration
```yaml
socket:
@@ -41,4 +47,4 @@ When no configuration file is found, module tries to connect to TCP/IP socket: `
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fredis%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fredis%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md
index 183c7f73..27715433 100644
--- a/collectors/python.d.plugin/rethinkdbs/README.md
+++ b/collectors/python.d.plugin/rethinkdbs/README.md
@@ -4,23 +4,26 @@ Module monitor rethinkdb health metrics.
Following charts are drawn:
-1. **Connected Servers**
- * connected
- * missing
+1. **Connected Servers**
-2. **Active Clients**
- * active
+ - connected
+ - missing
-3. **Queries** per second
- * queries
+2. **Active Clients**
-4. **Documents** per second
- * documents
+ - active
-### configuration
+3. **Queries** per second
-```yaml
+ - queries
+
+4. **Documents** per second
+
+ - documents
+## configuration
+
+```yaml
localhost:
name : 'local'
host : '127.0.0.1'
@@ -33,4 +36,4 @@ When no configuration file is found, module tries to connect to `127.0.0.1:28015
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frethinkdbs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frethinkdbs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md
index a8a58880..9a82f2ff 100644
--- a/collectors/python.d.plugin/retroshare/README.md
+++ b/collectors/python.d.plugin/retroshare/README.md
@@ -1,3 +1,3 @@
# retroshare
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fretroshare%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fretroshare%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/riakkv/README.md b/collectors/python.d.plugin/riakkv/README.md
index 0bcf22c5..04343dd9 100644
--- a/collectors/python.d.plugin/riakkv/README.md
+++ b/collectors/python.d.plugin/riakkv/README.md
@@ -4,95 +4,97 @@ Monitors one or more Riak KV servers.
**Requirements:**
-* An accessible `/stats` endpoint. See [the Riak KV configuration reference]
- documentation](https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces)
- for how to enable this.
+- An accessible `/stats` endpoint. See [the Riak KV configuration reference documentation](https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces)
+ for how to enable this.
The following charts are included, which are mostly derived from the metrics
listed
[here](https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#riak-metrics-to-graph).
-1. **Throughput** in operations/s
- * **KV operations**
- * gets
- * puts
+1. **Throughput** in operations/s
- * **Data type updates**
- * counters
- * sets
- * maps
+- **KV operations**
+ - gets
+ - puts
- * **Search queries**
- * queries
+- **Data type updates**
+ - counters
+ - sets
+ - maps
- * **Search documents**
- * indexed
+- **Search queries**
+ - queries
- * **Strong consistency operations**
- * gets
- * puts
+- **Search documents**
+ - indexed
-2. **Latency** in milliseconds
- * **KV latency** of the past minute
- * get (mean, median, 95th / 99th / 100th percentile)
- * put (mean, median, 95th / 99th / 100th percentile)
+- **Strong consistency operations**
+ - gets
+ - puts
- * **Data type latency** of the past minute
- * counter_merge (mean, median, 95th / 99th / 100th percentile)
- * set_merge (mean, median, 95th / 99th / 100th percentile)
- * map_merge (mean, median, 95th / 99th / 100th percentile)
+2. **Latency** in milliseconds
- * **Search latency** of the past minute
- * query (median, min, max, 95th / 99th percentile)
- * index (median, min, max, 95th / 99th percentile)
+- **KV latency** of the past minute
+ - get (mean, median, 95th / 99th / 100th percentile)
+ - put (mean, median, 95th / 99th / 100th percentile)
- * **Strong consistency latency** of the past minute
- * get (mean, median, 95th / 99th / 100th percentile)
- * put (mean, median, 95th / 99th / 100th percentile)
+- **Data type latency** of the past minute
+ - counter_merge (mean, median, 95th / 99th / 100th percentile)
+ - set_merge (mean, median, 95th / 99th / 100th percentile)
+ - map_merge (mean, median, 95th / 99th / 100th percentile)
-3. **Erlang VM metrics**
- * **System counters**
- * processes
+- **Search latency** of the past minute
+ - query (median, min, max, 95th / 99th percentile)
+ - index (median, min, max, 95th / 99th percentile)
- * **Memory allocation** in MB
- * processes.allocated
- * processes.used
+- **Strong consistency latency** of the past minute
+ - get (mean, median, 95th / 99th / 100th percentile)
+ - put (mean, median, 95th / 99th / 100th percentile)
-4. **General load / health metrics**
- * **Siblings encountered in KV operations** during the past minute
- * get (mean, median, 95th / 99th / 100th percentile)
+3. **Erlang VM metrics**
- * **Object size in KV operations** during the past minute in KB
- * get (mean, median, 95th / 99th / 100th percentile)
+- **System counters**
+ - processes
- * **Message queue length** in unprocessed messages
- * vnodeq_size (mean, median, 95th / 99th / 100th percentile)
+- **Memory allocation** in MB
+ - processes.allocated
+ - processes.used
- * **Index operations** encountered by Search
- * errors
+4. **General load / health metrics**
- * **Protocol buffer connections**
- * active
+- **Siblings encountered in KV operations** during the past minute
+ - get (mean, median, 95th / 99th / 100th percentile)
- * **Repair operations coordinated by this node**
- * read
+- **Object size in KV operations** during the past minute in KB
+ - get (mean, median, 95th / 99th / 100th percentile)
- * **Active finite state machines by kind**
- * get
- * put
- * secondary_index
- * list_keys
+- **Message queue length** in unprocessed messages
+ - vnodeq_size (mean, median, 95th / 99th / 100th percentile)
- * **Rejected finite state machines**
- * get
- * put
+- **Index operations** encountered by Search
+ - errors
- * **Number of writes to Search failed due to bad data format by reason**
- * bad_entry
- * extract_fail
+- **Protocol buffer connections**
+ - active
+- **Repair operations coordinated by this node**
+ - read
-### configuration
+- **Active finite state machines by kind**
+ - get
+ - put
+ - secondary_index
+ - list_keys
+
+- **Rejected finite state machines**
+ - get
+ - put
+
+- **Number of writes to Search failed due to bad data format by reason**
+ - bad_entry
+ - extract_fail
+
+## configuration
The module needs to be passed the full URL to Riak's stats endpoint.
For example:
diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md
index 97f2e3d3..ad99dead 100644
--- a/collectors/python.d.plugin/samba/README.md
+++ b/collectors/python.d.plugin/samba/README.md
@@ -3,60 +3,71 @@
Performance metrics of Samba file sharing.
**Requirements:**
-* `smbstatus` program
-* `sudo` program
-* `smbd` must be compiled with profiling enabled
-* `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`
-* `netdata` user needs to be able to sudo the `smbstatus` program without password
+
+- `smbstatus` program
+- `sudo` program
+- `smbd` must be compiled with profiling enabled
+- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`
+- `netdata` user needs to be able to sudo the `smbstatus` program without password
It produces the following charts:
-1. **Syscall R/Ws** in kilobytes/s
- * sendfile
- * recvfle
-
-2. **Smb2 R/Ws** in kilobytes/s
- * readout
- * writein
- * readin
- * writeout
-
-3. **Smb2 Create/Close** in operations/s
- * create
- * close
-
-4. **Smb2 Info** in operations/s
- * getinfo
- * setinfo
-
-5. **Smb2 Find** in operations/s
- * find
-
-6. **Smb2 Notify** in operations/s
- * notify
-
-7. **Smb2 Lesser Ops** as counters
- * tcon
- * negprot
- * tdis
- * cancel
- * logoff
- * flush
- * lock
- * keepalive
- * break
- * sessetup
-
-### prerequisite
+1. **Syscall R/Ws** in kilobytes/s
+
+ - sendfile
+ - recvfle
+
+2. **Smb2 R/Ws** in kilobytes/s
+
+ - readout
+ - writein
+ - readin
+ - writeout
+
+3. **Smb2 Create/Close** in operations/s
+
+ - create
+ - close
+
+4. **Smb2 Info** in operations/s
+
+ - getinfo
+ - setinfo
+
+5. **Smb2 Find** in operations/s
+
+ - find
+
+6. **Smb2 Notify** in operations/s
+
+ - notify
+
+7. **Smb2 Lesser Ops** as counters
+
+ - tcon
+ - negprot
+ - tdis
+ - cancel
+ - logoff
+ - flush
+ - lock
+ - keepalive
+ - break
+ - sessetup
+
+## prerequisite
+
This module uses `smbstatus` which can only be executed by root. It uses
`sudo` and assumes that it is configured such that the `netdata` user can
execute `smbstatus` as root without password.
Add to `sudoers`:
- netdata ALL=(root) NOPASSWD: /path/to/smbstatus
+```
+netdata ALL=(root) NOPASSWD: /path/to/smbstatus
+```
-### configuration
+## configuration
**samba** is disabled by default. Should be explicitly enabled in `python.d.conf`.
@@ -66,4 +77,4 @@ samba: yes
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsamba%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsamba%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md
index e3f956f1..1c0613c7 100644
--- a/collectors/python.d.plugin/sensors/README.md
+++ b/collectors/python.d.plugin/sensors/README.md
@@ -4,7 +4,7 @@ System sensors information.
Charts are created dynamically.
-### configuration
+## configuration
For detailed configuration information please read [`sensors.conf`](sensors.conf) file.
@@ -16,4 +16,4 @@ Please join this discussion for help.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsensors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsensors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md
index f6584be7..6f4dda50 100644
--- a/collectors/python.d.plugin/smartd_log/README.md
+++ b/collectors/python.d.plugin/smartd_log/README.md
@@ -3,43 +3,44 @@
Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
**Requirements:**
-* `smartmontools`
-It produces following charts for SCSI devices:
+- `smartmontools`
-1. **Read Error Corrected**
+It produces following charts for SCSI devices:
-2. **Read Error Uncorrected**
+1. **Read Error Corrected**
-3. **Write Error Corrected**
+2. **Read Error Uncorrected**
-4. **Write Error Uncorrected**
+3. **Write Error Corrected**
-5. **Verify Error Corrected**
+4. **Write Error Uncorrected**
-6. **Verify Error Uncorrected**
+5. **Verify Error Corrected**
-7. **Temperature**
+6. **Verify Error Uncorrected**
+7. **Temperature**
For ATA devices:
-1. **Read Error Rate**
-2. **Seek Error Rate**
+1. **Read Error Rate**
+
+2. **Seek Error Rate**
-3. **Soft Read Error Rate**
+3. **Soft Read Error Rate**
-4. **Write Error Rate**
+4. **Write Error Rate**
-5. **SATA Interface Downshift**
+5. **SATA Interface Downshift**
-6. **UDMA CRC Error Count**
+6. **UDMA CRC Error Count**
-7. **Throughput Performance**
+7. **Throughput Performance**
-8. **Seek Time Performance**
+8. **Seek Time Performance**
-9. **Start/Stop Count**
+9. **Start/Stop Count**
10. **Power-On Hours Count**
@@ -75,25 +76,28 @@ For ATA devices:
26. **Percent Lifetime Used**
-### prerequisite
+## prerequisite
+
`smartd` must be running with `-A` option to write smartd attribute information to files.
For this you need to set `smartd_opts` (or `SMARTD_ARGS`, check _smartd.service_ content) in `/etc/default/smartmontools`:
-
```
# dump smartd attrs info every 600 seconds
smartd_opts="-A /var/log/smartd/ -i 600"
```
+
You may need to create the smartd directory before smartd will write to it:
-```
+
+```sh
mkdir -p /var/log/smartd
```
-Otherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontools` (default location). See also [https://linux.die.net/man/8/smartd](https://linux.die.net/man/8/smartd) for more info on the `-A --attributelog=PREFIX` command.
+
+Otherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontools` (default location). See also <https://linux.die.net/man/8/smartd> for more info on the `-A --attributelog=PREFIX` command.
`smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files.
-### configuration
+## configuration
```yaml
local:
@@ -104,4 +108,4 @@ If no configuration is given, module will attempt to read log files in `/var/log
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsmartd_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsmartd_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md
index c3893055..8b74913d 100644
--- a/collectors/python.d.plugin/spigotmc/README.md
+++ b/collectors/python.d.plugin/spigotmc/README.md
@@ -9,7 +9,7 @@ active users.
This is not compatible with Spigot plugins which change the format of
the data returned by the `tps` or `list` console commands.
-### configuration
+## configuration
```yaml
host: localhost
@@ -21,4 +21,4 @@ By default, a connection to port 25575 on the local system is attempted with an
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspigotmc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspigotmc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
index 536fbe6a..79d17058 100644
--- a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
@@ -52,7 +52,13 @@ _TPS_REGEX = re.compile(
re.X
)
_LIST_REGEX = re.compile(
- r'(\d+)', # Current user count.
+ # Examples:
+ # There are 4 of a max 50 players online: player1, player2, player3, player4
+ # §6There are §c4§6 out of maximum §c50§6 players online.
+ # §6There are §c3§6/§c1§6 out of maximum §c50§6 players online.
+ # §6当前有 §c4§6 个玩家在线,最大在线人数为 §c50§6 个玩家.
+ # §c4§6 人のプレイヤーが接続中です。最大接続可能人数\:§c 50
+ r'[^§](\d+)(?:.*?(?=/).*?[^§](\d+))?', # Current user count.
re.X
)
@@ -85,6 +91,7 @@ class Service(SimpleService):
self.console.connect(self.host, self.port, self.password)
def reconnect(self):
+ self.error('try reconnect.')
try:
try:
self.console.disconnect()
@@ -99,7 +106,7 @@ class Service(SimpleService):
return True
def is_alive(self):
- if not any(
+ if any(
[
not self.alive,
self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1
@@ -139,7 +146,13 @@ class Service(SimpleService):
raw = self.console.command(COMMAND_ONLINE)
match = _LIST_REGEX.search(raw)
if match:
- data['users'] = int(match.group(1))
+ users = int(match.group(1))
+ hidden_users = match.group(2)
+ if hidden_users:
+ hidden_users = int(hidden_users)
+ else:
+ hidden_users = 0
+ data['users'] = users + hidden_users
else:
if not raw:
self.error("'{0}' and '{1}' commands returned no value, make sure you set correct password".format(
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.conf b/collectors/python.d.plugin/spigotmc/spigotmc.conf
index ccb5e263..f0064ea2 100644
--- a/collectors/python.d.plugin/spigotmc/spigotmc.conf
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.conf
@@ -62,5 +62,5 @@
# In addition to the above, spigotmc supports the following:
#
# host: localhost # The host to connect to. Defaults to the local system.
-# port: 25575 # THe port the remote console is listening on.
+# port: 25575 # The port the remote console is listening on.
# password: '' # The remote console password. Most be set correctly.
diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md
index b5b776dd..75cfa22e 100644
--- a/collectors/python.d.plugin/springboot/README.md
+++ b/collectors/python.d.plugin/springboot/README.md
@@ -6,11 +6,13 @@ Netdata can be used to monitor running Java [Spring Boot](https://spring.io/) ap
## Configuration
The Spring Boot Actuator exposes these metrics over HTTP and is very easy to use:
-* add `org.springframework.boot:spring-boot-starter-actuator` to your application dependencies
-* set `endpoints.metrics.sensitive=false` in your `application.properties`
+
+- add `org.springframework.boot:spring-boot-starter-actuator` to your application dependencies
+- set `endpoints.metrics.sensitive=false` in your `application.properties`
You can create custom Metrics by add and inject a PublicMetrics in your application.
This is a example to add custom metrics:
+
```java
package com.example;
@@ -65,26 +67,30 @@ Please refer [Spring Boot Actuator: Production-ready features](https://docs.spri
## Charts
-1. **Response Codes** in requests/s
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
- * others
+1. **Response Codes** in requests/s
+
+ - 1xx
+ - 2xx
+ - 3xx
+ - 4xx
+ - 5xx
+ - others
+
+2. **Threads**
-2. **Threads**
- * daemon
- * total
+ - daemon
+ - total
-3. **GC Time** in milliseconds and **GC Operations** in operations/s
- * Copy
- * MarkSweep
- * ...
+3. **GC Time** in milliseconds and **GC Operations** in operations/s
-4. **Heap Mmeory Usage** in KB
- * used
- * committed
+ - Copy
+ - MarkSweep
+ - ...
+
+4. **Heap Mmeory Usage** in KB
+
+ - used
+ - committed
## Usage
@@ -93,6 +99,7 @@ The springboot module is enabled by default. It looks up `http://localhost:8080/
This module defines some common charts, and you can add custom charts by change the configurations.
The configuration format is like:
+
```yaml
<id>:
name: '<name>'
@@ -121,4 +128,4 @@ The dimension name of extras charts should replace `.` to `_`.
Please check [springboot.conf](springboot.conf) for more examples.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspringboot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspringboot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md
index b278f419..e1e3d074 100644
--- a/collectors/python.d.plugin/squid/README.md
+++ b/collectors/python.d.plugin/squid/README.md
@@ -4,25 +4,29 @@ This module will monitor one or more squid instances depending on configuration.
It produces following charts:
-1. **Client Bandwidth** in kilobits/s
- * in
- * out
- * hits
+1. **Client Bandwidth** in kilobits/s
-2. **Client Requests** in requests/s
- * requests
- * hits
- * errors
+ - in
+ - out
+ - hits
-3. **Server Bandwidth** in kilobits/s
- * in
- * out
+2. **Client Requests** in requests/s
-4. **Server Requests** in requests/s
- * requests
- * errors
+ - requests
+ - hits
+ - errors
-### configuration
+3. **Server Bandwidth** in kilobits/s
+
+ - in
+ - out
+
+4. **Server Requests** in requests/s
+
+ - requests
+ - errors
+
+## configuration
```yaml
priority : 50000
@@ -37,4 +41,4 @@ Without any configuration module will try to autodetect where squid presents its
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsquid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsquid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md
index 21e3896a..4d492c2d 100644
--- a/collectors/python.d.plugin/tomcat/README.md
+++ b/collectors/python.d.plugin/tomcat/README.md
@@ -4,20 +4,24 @@ Present tomcat containers memory utilization.
Charts:
-1. **Requests** per second
- * accesses
+1. **Requests** per second
-2. **Volume** in KB/s
- * volume
+ - accesses
-3. **Threads**
- * current
- * busy
+2. **Volume** in KB/s
-4. **JVM Free Memory** in MB
- * jvm
+ - volume
-### configuration
+3. **Threads**
+
+ - current
+ - busy
+
+4. **JVM Free Memory** in MB
+
+ - jvm
+
+## configuration
```yaml
localhost:
@@ -32,4 +36,4 @@ So it will probably fail.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftomcat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftomcat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/tor/README.md b/collectors/python.d.plugin/tor/README.md
index 2ce0f25f..40905a95 100644
--- a/collectors/python.d.plugin/tor/README.md
+++ b/collectors/python.d.plugin/tor/README.md
@@ -3,16 +3,18 @@
Module connects to tor control port to collect traffic statistics.
**Requirements:**
-* `tor` program
-* `stem` python package
+
+- `tor` program
+- `stem` python package
It produces only one chart:
-1. **Traffic**
- * read
- * write
+1. **Traffic**
+
+ - read
+ - write
-### configuration
+## configuration
Needs only `control_port`
@@ -45,4 +47,4 @@ Without configuration, module attempts to connect to `127.0.0.1:9051`.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md
index 61e0fdb7..9ced6060 100644
--- a/collectors/python.d.plugin/traefik/README.md
+++ b/collectors/python.d.plugin/traefik/README.md
@@ -4,37 +4,42 @@ Module uses the `health` API to provide statistics.
It produces:
-1. **Responses** by statuses
- * success (1xx, 2xx, 304)
- * error (5xx)
- * redirect (3xx except 304)
- * bad (4xx)
- * other (all other responses)
+1. **Responses** by statuses
-2. **Responses** by codes
- * 2xx (successful)
- * 5xx (internal server errors)
- * 3xx (redirect)
- * 4xx (bad)
- * 1xx (informational)
- * other (non-standart responses)
+ - success (1xx, 2xx, 304)
+ - error (5xx)
+ - redirect (3xx except 304)
+ - bad (4xx)
+ - other (all other responses)
-3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
+2. **Responses** by codes
-4. **Requests**/s
- * request statistics
+ - 2xx (successful)
+ - 5xx (internal server errors)
+ - 3xx (redirect)
+ - 4xx (bad)
+ - 1xx (informational)
+ - other (non-standart responses)
-5. **Total response time**
- * sum of all response time
+3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
-6. **Average response time**
+4. **Requests**/s
-7. **Average response time per iteration**
+ - request statistics
-8. **Uptime**
- * Traefik server uptime
+5. **Total response time**
-### configuration
+ - sum of all response time
+
+6. **Average response time**
+
+7. **Average response time per iteration**
+
+8. **Uptime**
+
+ - Traefik server uptime
+
+## configuration
Needs only `url` to server's `health`
@@ -52,4 +57,4 @@ Without configuration, module attempts to connect to `http://localhost:8080/heal
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftraefik%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftraefik%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/unbound/README.md b/collectors/python.d.plugin/unbound/README.md
index d9cbc0b8..d4ad3da1 100644
--- a/collectors/python.d.plugin/unbound/README.md
+++ b/collectors/python.d.plugin/unbound/README.md
@@ -4,37 +4,41 @@ Monitoring uses the remote control interface to fetch statistics.
Provides the following charts:
-1. **Queries Processed**
- * Ratelimited
- * Cache Misses
- * Cache Hits
- * Expired
- * Prefetched
- * Recursive
-
-2. **Request List**
- * Average Size
- * Max Size
- * Overwritten Requests
- * Overruns
- * Current Size
- * User Requests
-
-3. **Recursion Timings**
- * Average recursion processing time
- * Median recursion processing time
+1. **Queries Processed**
+
+ - Ratelimited
+ - Cache Misses
+ - Cache Hits
+ - Expired
+ - Prefetched
+ - Recursive
+
+2. **Request List**
+
+ - Average Size
+ - Max Size
+ - Overwritten Requests
+ - Overruns
+ - Current Size
+ - User Requests
+
+3. **Recursion Timings**
+
+- Average recursion processing time
+- Median recursion processing time
If extended stats are enabled, also provides:
-4. **Cache Sizes**
- * Message Cache
- * RRset Cache
- * Infra Cache
- * DNSSEC Key Cache
- * DNSCrypt Shared Secret Cache
- * DNSCrypt Nonce Cache
+4. **Cache Sizes**
+
+ - Message Cache
+ - RRset Cache
+ - Infra Cache
+ - DNSSEC Key Cache
+ - DNSCrypt Shared Secret Cache
+ - DNSCrypt Nonce Cache
-### Configuration
+## Configuration
Unbound must be manually configured to enable the remote-control protocol.
Check the Unbound documentation for info on how to do this. Additionally,
@@ -78,27 +82,27 @@ that you use a UNIX socket as it provides far better performance.
If you've configured the module and can't get it to work, make sure and
check all of the following:
-* If you're using autodetection, double check that your `unbound.conf`
- file is actually using spaces instead of tabs, and that appropriate
- indentation is present. Most Linux distributions ship a default config
- for Unbound that uses tabs, and the plugin can't read such a config file
- correctly. Also, make sure this file is actually readable by Netdata.
-* Ensure that the control protocol is actually configured correctly.
- You can check this quickly by running `unbound-control stats_noreset`
- as root, which should print out a bunch of info about the internal
- statistics of the server. If this returns an error, you don't have
- the control protocol set up correctly.
-* If using the regular control interface, make sure that the certificate
- and key file you have configured in `unbound.conf` are readable by
- Netdata. In general, it's preferred to use ACL's on the files to
- provide the required permissions.
-* If using a UNIX socket, make sure that the socket is both readable
- _and_ writable by Netdata. Just like with the regular control
- interface, it's preferred to use ACL's to provide these permissions.
-* Make sure that SELinux, Apparmor, or any other mandatory access control
- system isn't interfering with the access requirements mentioned above.
- In some cases, you may have to add a local rule to allow this access.
+- If you're using autodetection, double check that your `unbound.conf`
+ file is actually using spaces instead of tabs, and that appropriate
+ indentation is present. Most Linux distributions ship a default config
+ for Unbound that uses tabs, and the plugin can't read such a config file
+ correctly. Also, make sure this file is actually readable by Netdata.
+- Ensure that the control protocol is actually configured correctly.
+ You can check this quickly by running `unbound-control stats_noreset`
+ as root, which should print out a bunch of info about the internal
+ statistics of the server. If this returns an error, you don't have
+ the control protocol set up correctly.
+- If using the regular control interface, make sure that the certificate
+ and key file you have configured in `unbound.conf` are readable by
+ Netdata. In general, it's preferred to use ACL's on the files to
+ provide the required permissions.
+- If using a UNIX socket, make sure that the socket is both readable
+ _and_ writable by Netdata. Just like with the regular control
+ interface, it's preferred to use ACL's to provide these permissions.
+- Make sure that SELinux, Apparmor, or any other mandatory access control
+ system isn't interfering with the access requirements mentioned above.
+ In some cases, you may have to add a local rule to allow this access.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Funbound%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Funbound%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md
index 9d455cfc..a8111965 100644
--- a/collectors/python.d.plugin/uwsgi/README.md
+++ b/collectors/python.d.plugin/uwsgi/README.md
@@ -2,26 +2,28 @@
Module monitor uwsgi performance metrics.
-https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html
+<https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html>
lines are creates dynamically based on how many workers are there
Following charts are drawn:
-1. **Requests**
- * requests per second
- * transmitted data
- * average request time
+1. **Requests**
-2. **Memory**
- * rss
- * vsz
+ - requests per second
+ - transmitted data
+ - average request time
-3. **Exceptions**
-4. **Harakiris**
-5. **Respawns**
+2. **Memory**
-### configuration
+ - rss
+ - vsz
+
+3. **Exceptions**
+4. **Harakiris**
+5. **Respawns**
+
+## configuration
```yaml
socket:
@@ -36,4 +38,4 @@ localhost:
When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:1717`.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fuwsgi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fuwsgi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
index 44d64efe..4de883d3 100644
--- a/collectors/python.d.plugin/varnish/README.md
+++ b/collectors/python.d.plugin/varnish/README.md
@@ -4,65 +4,77 @@ Module uses the `varnishstat` command to provide varnish cache statistics.
It produces:
-1. **Connections Statistics** in connections/s
- * accepted
- * dropped
+1. **Connections Statistics** in connections/s
-2. **Client Requests** in requests/s
- * received
+ - accepted
+ - dropped
-3. **All History Hit Rate Ratio** in percent
- * hit
- * miss
- * hitpass
+2. **Client Requests** in requests/s
-4. **Current Poll Hit Rate Ratio** in percent
- * hit
- * miss
- * hitpass
+ - received
-5. **Expired Objects** in expired/s
- * objects
+3. **All History Hit Rate Ratio** in percent
-6. **Least Recently Used Nuked Objects** in nuked/s
- * objects
+ - hit
+ - miss
+ - hitpass
+4. **Current Poll Hit Rate Ratio** in percent
-7. **Number Of Threads In All Pools** in threads
- * threads
+ - hit
+ - miss
+ - hitpass
-8. **Threads Statistics** in threads/s
- * created
- * failed
- * limited
+5. **Expired Objects** in expired/s
-9. **Current Queue Length** in requests
- * in queue
+ - objects
+
+6. **Least Recently Used Nuked Objects** in nuked/s
+
+ - objects
+
+7. **Number Of Threads In All Pools** in threads
+
+ - threads
+
+8. **Threads Statistics** in threads/s
+
+ - created
+ - failed
+ - limited
+
+9. **Current Queue Length** in requests
+
+ - in queue
10. **Backend Connections Statistics** in connections/s
- * successful
- * unhealthy
- * reused
- * closed
- * resycled
- * failed
-10. **Requests To The Backend** in requests/s
- * received
+ - successful
+ - unhealthy
+ - reused
+ - closed
+ - resycled
+ - failed
+
+11. **Requests To The Backend** in requests/s
+
+ - received
+
+12. **ESI Statistics** in problems/s
+
+ - errors
+ - warnings
-11. **ESI Statistics** in problems/s
- * errors
- * warnings
+13. **Memory Usage** in MB
-12. **Memory Usage** in MB
- * free
- * allocated
+ - free
+ - allocated
-13. **Uptime** in seconds
- * uptime
+14. **Uptime** in seconds
+ - uptime
-### configuration
+## configuration
Only one parameter is supported:
@@ -74,4 +86,4 @@ The name of the varnishd instance to get logs from. If not specified, the host n
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fvarnish%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fvarnish%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md
index 94717c81..74edcc0a 100644
--- a/collectors/python.d.plugin/w1sensor/README.md
+++ b/collectors/python.d.plugin/w1sensor/README.md
@@ -6,10 +6,10 @@ Currently temperature sensors are supported and automatically detected.
Charts are created dynamically based on the number of detected sensors.
-### configuration
+## configuration
For detailed configuration information please read [`w1sensor.conf`](w1sensor.conf) file.
---
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fw1sensor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fw1sensor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md
index 03199458..33dfd696 100644
--- a/collectors/python.d.plugin/web_log/README.md
+++ b/collectors/python.d.plugin/web_log/README.md
@@ -6,19 +6,19 @@ Web server log files exist for more than 20 years. All web servers of all kinds,
Yet, after the appearance of google analytics and similar services, and the recent rise of APM (Application Performance Monitoring) with sophisticated time-series databases that collect and analyze metrics at the application level, all these web server log files are mostly just filling our disks, rotated every night without any use whatsoever.
-netdata turns this "useless" log file, into a powerful performance and health monitoring tool, capable of detecting, **in real-time**, most common web server problems, such as:
+Netdata turns this "useless" log file, into a powerful performance and health monitoring tool, capable of detecting, **in real-time**, most common web server problems, such as:
-- too many redirects (i.e. **oops!** *this should not redirect clients to itself*)
-- too many bad requests (i.e. **oops!** *a few files were not uploaded*)
-- too many internal server errors (i.e. **oops!** *this release crashes too much*)
-- unreasonably too many requests (i.e. **oops!** *we are under attack*)
-- unreasonably few requests (i.e. **oops!** *call the network guys*)
-- unreasonably slow responses (i.e. **oops!** *the database is slow again*)
-- too few successful responses (i.e. **oops!** *help us God!*)
+- too many redirects (i.e. **oops!** *this should not redirect clients to itself*)
+- too many bad requests (i.e. **oops!** *a few files were not uploaded*)
+- too many internal server errors (i.e. **oops!** *this release crashes too much*)
+- unreasonably too many requests (i.e. **oops!** *we are under attack*)
+- unreasonably few requests (i.e. **oops!** *call the network guys*)
+- unreasonably slow responses (i.e. **oops!** *the database is slow again*)
+- too few successful responses (i.e. **oops!** *help us God!*)
## Usage
-If netdata is installed on a system running a web server, it will detect it and it will automatically present a series of charts, with information obtained from the web server API, like these (*these do not come from the web server log file*):
+If Netdata is installed on a system running a web server, it will detect it and it will automatically present a series of charts, with information obtained from the web server API, like these (*these do not come from the web server log file*):
![image](https://cloud.githubusercontent.com/assets/2662304/22900686/e283f636-f237-11e6-93d2-cbdf63de150c.png)
*[**netdata**](https://my-netdata.io/) charts based on metrics collected by querying the `nginx` API (i.e. `/stub_status`).*
@@ -29,7 +29,6 @@ If netdata is installed on a system running a web server, it will detect it and
[**netdata**](https://my-netdata.io/) has a powerful `web_log` plugin, capable of incrementally parsing any number of web server log files. This plugin is automatically started with [**netdata**](https://my-netdata.io/) and comes, pre-configured, for finding web server log files on popular distributions. Its configuration is at [`/etc/netdata/python.d/web_log.conf`](web_log.conf), like this:
-
```yaml
nginx_log:
name : 'nginx_log'
@@ -56,11 +55,11 @@ Once you have all log files configured and [**netdata**](https://my-netdata.io/)
In this chart we tried to provide a meaningful status for all responses. So:
-- `success` counts all the valid responses (i.e. `1xx` informational, `2xx` successful and `304` not modified).
-- `error` are `5xx` internal server errors. These are very bad, they mean your web site or API is facing difficulties.
-- `redirect` are `3xx` responses, except `304`. All `3xx` are redirects, but `304` means "not modified" - it tells the browsers the content they already have is still valid and can be used as-is. So, we decided to account it as a successful response.
-- `bad` are bad requests that cannot be served.
-- `other` as all the other, non-standard, types of responses.
+- `success` counts all the valid responses (i.e. `1xx` informational, `2xx` successful and `304` not modified).
+- `error` are `5xx` internal server errors. These are very bad, they mean your web site or API is facing difficulties.
+- `redirect` are `3xx` responses, except `304`. All `3xx` are redirects, but `304` means "not modified" - it tells the browsers the content they already have is still valid and can be used as-is. So, we decided to account it as a successful response.
+- `bad` are bad requests that cannot be served.
+- `other` as all the other, non-standard, types of responses.
![image](https://cloud.githubusercontent.com/assets/2662304/22902194/ea0affc6-f23c-11e6-85f1-a4951dd4bb40.png)
@@ -68,11 +67,12 @@ In this chart we tried to provide a meaningful status for all responses. So:
Then, we group all responses by code family, without interpreting their meaning.
**Response by type** requests/s
- * success (1xx, 2xx, 304)
- * error (5xx)
- * redirect (3xx except 304)
- * bad (4xx)
- * other (all other responses)
+
+- success (1xx, 2xx, 304)
+- error (5xx)
+- redirect (3xx except 304)
+- bad (4xx)
+- other (all other responses)
![image](https://cloud.githubusercontent.com/assets/2662304/22901883/dea7d33a-f23b-11e6-960d-00a913b58936.png)
@@ -81,18 +81,18 @@ Then, we group all responses by code family, without interpreting their meaning.
Here we show all the response codes in detail.
**Response by code family** requests/s
- * 1xx (informational)
- * 2xx (successful)
- * 3xx (redirect)
- * 4xx (bad)
- * 5xx (internal server errors)
- * other (non-standart responses)
- * unmatched (the lines in the log file that are not matched)
-
+
+- 1xx (informational)
+- 2xx (successful)
+- 3xx (redirect)
+- 4xx (bad)
+- 5xx (internal server errors)
+- other (non-standart responses)
+- unmatched (the lines in the log file that are not matched)
![image](https://cloud.githubusercontent.com/assets/2662304/22901965/1a5d84ba-f23c-11e6-9d38-3deebcc8b879.png)
->**Important**<br/>If your application is using hundreds of non-standard response codes, your browser may become slow while viewing this chart, so we have added a configuration [option to disable this chart](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L63).
+> **Important**<br/>If your application is using hundreds of non-standard response codes, your browser may become slow while viewing this chart, so we have added a configuration [option to disable this chart](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L63).
### Detailed Response Codes
@@ -107,9 +107,10 @@ What is important to know for this chart, is that the bandwidth used for each re
As the legend on the chart suggests, you can use FireQoS to setup QoS on the web server ports and IPs to accurately measure the bandwidth the web server is using. Actually, [there may be a few more reasons to install QoS on your servers](../../tc.plugin/#tcplugin)...
**Bandwidth** KB/s
- * received (bandwidth of requests)
- * send (bandwidth of responses)
-
+
+- received (bandwidth of requests)
+- send (bandwidth of responses)
+
![image](https://cloud.githubusercontent.com/assets/2662304/22902266/245141d6-f23d-11e6-90f9-98729733e0da.png)
> **Important**<br/>Most web servers do not log the request size by default.<br/>So, [unless you have configured your web server to log the size of requests](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L76-L89), the `received` dimension will be always zero.
@@ -121,10 +122,11 @@ As the legend on the chart suggests, you can use FireQoS to setup QoS on the web
Keep in mind most web servers timings start at the reception of the full request, until the dispatch of the last byte of the response. So, they include network latencies of responses, but they do not include network latencies of requests.
**Timings** ms (request processing time)
- * min (bandwidth of requests)
- * max (bandwidth of responses)
- * average (bandwidth of responses)
-
+
+- min (bandwidth of requests)
+- max (bandwidth of responses)
+- average (bandwidth of responses)
+
![image](https://cloud.githubusercontent.com/assets/2662304/22902283/369e3f92-f23d-11e6-9359-53e5d4ecb18e.png)
> **Important**<br/>Most web servers do not log timing information by default.<br/>So, [unless you have configured your web server to also log timings](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L76-L89), this chart will not exist.
@@ -179,25 +181,23 @@ The last charts are about the unique IPs accessing your web server.
![image](https://cloud.githubusercontent.com/assets/2662304/22902407/92dd27e6-f23d-11e6-900d-eede7bc08e64.png)
->**Important**<br/>To provide this information `web_log` plugin keeps in memory all the IPs seen by the web server. Although this does not require so much memory, if you have a web server with several million unique client IPs, we suggest to [disable this chart](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L64).
-
+> **Important**<br/>To provide this information `web_log` plugin keeps in memory all the IPs seen by the web server. Although this does not require so much memory, if you have a web server with several million unique client IPs, we suggest to [disable this chart](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L64).
## Alarms
The magic of [**netdata**](https://my-netdata.io/) is that all metrics are collected per second, and all metrics can be used or correlated to provide real-time alarms. Out of the box, [**netdata**](https://my-netdata.io/) automatically attaches the [following alarms](../../../health/health.d/web_log.conf) to all `web_log` charts (i.e. to all log files configured, individually):
-alarm|description|minimum<br/>requests|warning|critical
-:-------|-------|:------:|:-----:|:------:
-`1m_redirects`|The ratio of HTTP redirects (3xx except 304) over all the requests, during the last minute.<br/>&nbsp;<br/>*Detects if the site or the web API is suffering from too many or circular redirects.*<br/>&nbsp;<br/>(i.e. **oops!** *this should not redirect clients to itself*)|120/min|&gt; 20%|&gt; 30%
-`1m_bad_requests`|The ratio of HTTP bad requests (4xx) over all the requests, during the last minute.<br/>&nbsp;<br/>*Detects if the site or the web API is receiving too many bad requests, including `404`, not found.*<br/>&nbsp;<br/>(i.e. **oops!** *a few files were not uploaded*)|120/min|&gt; 30%|&gt; 50%
-`1m_internal_errors`|The ratio of HTTP internal server errors (5xx), over all the requests, during the last minute.<br/>&nbsp;<br/>*Detects if the site is facing difficulties to serve requests.*<br/>&nbsp;<br/>(i.e. **oops!** *this release crashes too much*)|120/min|&gt; 2%|&gt; 5%
-`5m_requests_ratio`|The percentage of successful web requests of the last 5 minutes, compared with the previous 5 minutes.<br/>&nbsp;<br/>*Detects if the site or the web API is suddenly getting too many or too few requests.*<br/>&nbsp;<br/>(i.e. too many = **oops!** *we are under attack*)<br/>(i.e. too few = **oops!** *call the network guys*)|120/5min|&gt; double or &lt; half|&gt; 4x or &lt; 1/4x
-`web_slow`|The average time to respond to requests, over the last 1 minute, compared to the average of last 10 minutes.<br/>&nbsp;<br/>*Detects if the site or the web API is suddenly a lot slower.*<br/>&nbsp;<br/>(i.e. **oops!** *the database is slow again*)|120/min|&gt; 2x|&gt; 4x
-`1m_successful`|The ratio of successful HTTP responses (1xx, 2xx, 304) over all the requests, during the last minute.<br/>&nbsp;<br/>*Detects if the site or the web API is performing within limits.*<br/>&nbsp;<br/>(i.e. **oops!** *help us God!*)|120/min|&lt; 85%|&lt; 75%
+| alarm|description|minimum<br/>requests|warning|critical|
+|:----|-----------|:------------------:|:-----:|:------:|
+| `1m_redirects`|The ratio of HTTP redirects (3xx except 304) over all the requests, during the last minute.<br/> <br/>*Detects if the site or the web API is suffering from too many or circular redirects.*<br/> <br/>(i.e. **oops!** *this should not redirect clients to itself*)|120/min|> 20%|> 30%|
+| `1m_bad_requests`|The ratio of HTTP bad requests (4xx) over all the requests, during the last minute.<br/> <br/>*Detects if the site or the web API is receiving too many bad requests, including `404`, not found.*<br/> <br/>(i.e. **oops!** *a few files were not uploaded*)|120/min|> 30%|> 50%|
+| `1m_internal_errors`|The ratio of HTTP internal server errors (5xx), over all the requests, during the last minute.<br/> <br/>*Detects if the site is facing difficulties to serve requests.*<br/> <br/>(i.e. **oops!** *this release crashes too much*)|120/min|> 2%|> 5%|
+| `5m_requests_ratio`|The percentage of successful web requests of the last 5 minutes, compared with the previous 5 minutes.<br/> <br/>*Detects if the site or the web API is suddenly getting too many or too few requests.*<br/> <br/>(i.e. too many = **oops!** *we are under attack*)<br/>(i.e. too few = **oops!** *call the network guys*)|120/5min|> double or \< half|> 4x or \< 1/4x|
+| `web_slow`|The average time to respond to requests, over the last 1 minute, compared to the average of last 10 minutes.<br/> <br/>*Detects if the site or the web API is suddenly a lot slower.*<br/> <br/>(i.e. **oops!** *the database is slow again*)|120/min|> 2x|> 4x|
+| `1m_successful`|The ratio of successful HTTP responses (1xx, 2xx, 304) over all the requests, during the last minute.<br/> <br/>*Detects if the site or the web API is performing within limits.*<br/> <br/>(i.e. **oops!** *help us God!*)|120/min|\< 85%|\< 75%|
The column `minimum requests` state the minimum number of requests required for the alarm to be evaluated. We found that when the site is receiving requests above this rate, these alarms are pretty accurate (i.e. no false-positives).
-[**netdata**](https://my-netdata.io/) alarms are user configurable. Sample config files can be found under directory `health/health.d` of the netdata github repository. So, even [`web_log` alarms can be adapted to your needs](../../../health/health.d/web_log.conf).
-
+[**netdata**](https://my-netdata.io/) alarms are user configurable. Sample config files can be found under directory `health/health.d` of the [Netdata GitHub repository](https://github.com/netdata/netdata/). So, even [`web_log` alarms can be adapted to your needs](../../../health/health.d/web_log.conf).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fweb_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fweb_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/statsd.plugin/Makefile.in b/collectors/statsd.plugin/Makefile.in
new file mode 100644
index 00000000..cd1119c9
--- /dev/null
+++ b/collectors/statsd.plugin/Makefile.in
@@ -0,0 +1,605 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/statsd.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(dist_statsdconfig_DATA) $(dist_userstatsdconfig_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(statsdconfigdir)" \
+ "$(DESTDIR)$(userstatsdconfigdir)"
+DATA = $(dist_noinst_DATA) $(dist_statsdconfig_DATA) \
+ $(dist_userstatsdconfig_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+statsdconfigdir = $(libconfigdir)/statsd.d
+dist_statsdconfig_DATA = \
+ example.conf \
+ $(NULL)
+
+userstatsdconfigdir = $(configdir)/statsd.d
+dist_userstatsdconfig_DATA = \
+ .keep \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/statsd.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/statsd.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_statsdconfigDATA: $(dist_statsdconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(statsdconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(statsdconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(statsdconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(statsdconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_statsdconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(statsdconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_userstatsdconfigDATA: $(dist_userstatsdconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_userstatsdconfig_DATA)'; test -n "$(userstatsdconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(userstatsdconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(userstatsdconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userstatsdconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(userstatsdconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_userstatsdconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_userstatsdconfig_DATA)'; test -n "$(userstatsdconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(userstatsdconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(statsdconfigdir)" "$(DESTDIR)$(userstatsdconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_statsdconfigDATA \
+ install-dist_userstatsdconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_statsdconfigDATA \
+ uninstall-dist_userstatsdconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_statsdconfigDATA \
+ install-dist_userstatsdconfigDATA install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-dist_statsdconfigDATA \
+ uninstall-dist_userstatsdconfigDATA
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/statsd.plugin/README.md b/collectors/statsd.plugin/README.md
index 399918dc..dcbae6c5 100644
--- a/collectors/statsd.plugin/README.md
+++ b/collectors/statsd.plugin/README.md
@@ -4,7 +4,7 @@ statsd is a system to collect data from any application. Applications are sendin
There is a [plethora of client libraries](https://github.com/etsy/statsd/wiki#client-implementations) for embedding statsd metrics to any application framework. This makes statsd quite popular for custom application metrics.
-netdata is a fully featured statsd server. It can collect statsd formatted metrics, visualize them on its dashboards, stream them to other netdata servers or archive them to backend time-series databases.
+Netdata is a fully featured statsd server. It can collect statsd formatted metrics, visualize them on its dashboards, stream them to other Netdata servers or archive them to backend time-series databases.
Netdata statsd is inside Netdata (an internal plugin, running inside the Netdata daemon), it is configured via `netdata.conf` and by-default listens on standard statsd ports (tcp and udp 8125 - yes, Netdata statsd server supports both tcp and udp at the same time).
@@ -16,45 +16,45 @@ Netdata statsd is fast. It can collect more than **1.200.000 metrics per second*
Netdata fully supports the statsd protocol. All statsd client libraries can be used with Netdata too.
-- **Gauges**
+- **Gauges**
- The application sends `name:value|g`, where `value` is any **decimal/fractional** number, statsd reports the latest value collected and the number of times it was updated (events).
+ The application sends `name:value|g`, where `value` is any **decimal/fractional** number, statsd reports the latest value collected and the number of times it was updated (events).
- The application may increment or decrement a previous value, by setting the first character of the value to ` + ` or ` - ` (so, the only way to set a gauge to an absolute negative value, is to first set it to zero).
+ The application may increment or decrement a previous value, by setting the first character of the value to `+` or `-` (so, the only way to set a gauge to an absolute negative value, is to first set it to zero).
- Sampling rate is supported (check below).
+ Sampling rate is supported (check below).
- When a gauge is not collected and the setting is not to show gaps on the charts (the default), the last value will be shown, until a data collection event changes it.
+ When a gauge is not collected and the setting is not to show gaps on the charts (the default), the last value will be shown, until a data collection event changes it.
-- **Counters** and **Meters**
+- **Counters** and **Meters**
- The application sends `name:value|c`, `name:value|C` or `name:value|m`, where `value` is a positive or negative **integer** number of events occurred, statsd reports the **rate** and the number of times it was updated (events).
+ The application sends `name:value|c`, `name:value|C` or `name:value|m`, where `value` is a positive or negative **integer** number of events occurred, statsd reports the **rate** and the number of times it was updated (events).
- `:value` can be omitted and statsd will assume it is `1`. `|c`, `|C` and `|m` can be omitted an statsd will assume it is `|m`. So, the application may send just `name` and statsd will parse it as `name:1|m`.
+ `:value` can be omitted and statsd will assume it is `1`. `|c`, `|C` and `|m` can be omitted an statsd will assume it is `|m`. So, the application may send just `name` and statsd will parse it as `name:1|m`.
- For counters use `|c` (esty/statsd compatible) or `|C` (brubeck compatible), for meters use `|m`.
+ For counters use `|c` (esty/statsd compatible) or `|C` (brubeck compatible), for meters use `|m`.
- Sampling rate is supported (check below).
+ Sampling rate is supported (check below).
- When a counter or meter is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it.
+ When a counter or meter is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it.
-- **Timers** and **Histograms**
+- **Timers** and **Histograms**
- The application sends `name:value|ms` or `name:value|h`, where ` value` is any **decimal/fractional** number, statsd reports **min**, **max**, **average**, **sum**, **95th percentile**, **median** and **standard deviation** and the total number of times it was updated (events).
+ The application sends `name:value|ms` or `name:value|h`, where `value` is any **decimal/fractional** number, statsd reports **min**, **max**, **average**, **sum**, **95th percentile**, **median** and **standard deviation** and the total number of times it was updated (events).
- For timers use `|ms`, or histograms use `|h`. The only difference between the two, is the `units` of the charts (timers report milliseconds).
+ For timers use `|ms`, or histograms use `|h`. The only difference between the two, is the `units` of the charts (timers report milliseconds).
- Sampling rate is supported (check below).
+ Sampling rate is supported (check below).
- When a timer or histogram is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it.
+ When a timer or histogram is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it.
-- **Sets**
+- **Sets**
- The application sends `name:value|s`, where `value` is anything (**number or text**, leading and trailing spaces are removed), statsd reports the number of unique values sent and the number of times it was updated (events).
+ The application sends `name:value|s`, where `value` is anything (**number or text**, leading and trailing spaces are removed), statsd reports the number of unique values sent and the number of times it was updated (events).
- Sampling rate is **not** supported for Sets. `value` is always considered text.
+ Sampling rate is **not** supported for Sets. `value` is always considered text.
- When a set is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it.
+ When a set is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it.
#### Sampling Rates
@@ -62,19 +62,19 @@ The application may append `|@sampling_rate`, where `sampling_rate` is a number
#### Overlapping metrics
-netdata statsd maintains different indexes for each of the types supported. This means the same metric `name` may exist under different types concurrently.
+Netdata's statsd server maintains different indexes for each of the types supported. This means the same metric `name` may exist under different types concurrently.
#### Multiple metrics per packet
-netdata accepts multiple metrics per packet if each is terminated with `\n`.
+Netdata accepts multiple metrics per packet if each is terminated with `\n`.
#### TCP packets
-netdata listens for both TCP and UDP packets. For TCP though, is it important to always append `\n` on each metric. netdata uses this to detect if a metric is split into multiple TCP packets. On disconnect, even the remaining (non terminated with `\n`) buffer, is processed.
+Netdata listens for both TCP and UDP packets. For TCP though, is it important to always append `\n` on each metric. Netdata uses this to detect if a metric is split into multiple TCP packets. On disconnect, even the remaining (non terminated with `\n`) buffer, is processed.
#### UDP packets
-When sending multiple packets over UDP, it is important not to exceed the network MTU (usually 1500 bytes minus a few bytes for the headers). netdata will accept UDP packets up to 9000 bytes, but the underlying network will not exceed MTU.
+When sending multiple packets over UDP, it is important not to exceed the network MTU (usually 1500 bytes minus a few bytes for the headers). Netdata will accept UDP packets up to 9000 bytes, but the underlying network will not exceed MTU.
## configuration
@@ -105,31 +105,32 @@ This is the statsd configuration at `/etc/netdata/netdata.conf`:
```
### statsd main config options
-- `enabled = yes|no`
- controls if statsd will be enabled for this netdata. The default is enabled.
+- `enabled = yes|no`
-- `default port = 8125`
+ controls if statsd will be enabled for this Netdata. The default is enabled.
- controls the port statsd will use. This is the default, since the next line, allows defining ports too.
+- `default port = 8125`
-- `bind to = udp:localhost tcp:localhost`
+ controls the port statsd will use. This is the default, since the next line, allows defining ports too.
- is a space separated list of IPs and ports to listen to. The format is `PROTOCOL:IP:PORT` - if `PORT` is omitted, the `default port` will be used. If `IP` is IPv6, it needs to be enclosed in `[]`. `IP` can also be ` * ` (to listen on all IPs) or even a hostname.
+- `bind to = udp:localhost tcp:localhost`
-- `update every (flushInterval) = 1` seconds, controls the frequency statsd will push the collected metrics to netdata charts.
+ is a space separated list of IPs and ports to listen to. The format is `PROTOCOL:IP:PORT` - if `PORT` is omitted, the `default port` will be used. If `IP` is IPv6, it needs to be enclosed in `[]`. `IP` can also be `*` (to listen on all IPs) or even a hostname.
-- `decimal detail = 1000` controls the number of fractional digits in gauges and histograms. netdata collects metrics using signed 64 bit integers and their fractional detail is controlled using multipliers and divisors. This setting is used to multiply all collected values to convert them to integers and is also set as the divisors, so that the final data will be a floating point number with this fractional detail (1000 = X.0 - X.999, 10000 = X.0 - X.9999, etc).
+- `update every (flushInterval) = 1` seconds, controls the frequency statsd will push the collected metrics to Netdata charts.
+
+- `decimal detail = 1000` controls the number of fractional digits in gauges and histograms. Netdata collects metrics using signed 64 bit integers and their fractional detail is controlled using multipliers and divisors. This setting is used to multiply all collected values to convert them to integers and is also set as the divisors, so that the final data will be a floating point number with this fractional detail (1000 = X.0 - X.999, 10000 = X.0 - X.9999, etc).
The rest of the settings are discussed below.
## statsd charts
-netdata can visualize statsd collected metrics in 2 ways:
+Netdata can visualize statsd collected metrics in 2 ways:
-1. Each metric gets its own **private chart**. This is the default and does not require any configuration (although there are a few options to tweak).
+1. Each metric gets its own **private chart**. This is the default and does not require any configuration (although there are a few options to tweak).
-2. **Synthetic charts** can be created, combining multiple metrics, independently of their metric types. For this type of charts, special configuration is required, to define the chart title, type, units, its dimensions, etc.
+2. **Synthetic charts** can be created, combining multiple metrics, independently of their metric types. For this type of charts, special configuration is required, to define the chart title, type, units, its dimensions, etc.
### private metric charts
@@ -143,11 +144,11 @@ create private charts for metrics matching = !myapp.*.badmetric myapp.*
The default is to render private charts for all metrics.
-The `memory mode` of the round robin database and the `history` of private metric charts are controlled with `private charts memory mode` and `private charts history`. The defaults for both settings is to use the global netdata settings. So, you need to edit them only when you want statsd to use different settings compared to the global ones.
+The `memory mode` of the round robin database and the `history` of private metric charts are controlled with `private charts memory mode` and `private charts history`. The defaults for both settings is to use the global Netdata settings. So, you need to edit them only when you want statsd to use different settings compared to the global ones.
-If you have thousands of metrics, each with its own private chart, you may notice that your web browser becomes slow when you view the netdata dashboard (this is a web browser issue we need to address at the netdata UI). So, netdata has a protection to stop creating charts when `max private charts allowed = 200` (soft limit) is reached.
+If you have thousands of metrics, each with its own private chart, you may notice that your web browser becomes slow when you view the Netdata dashboard (this is a web browser issue we need to address at the Netdata UI). So, Netdata has a protection to stop creating charts when `max private charts allowed = 200` (soft limit) is reached.
-The metrics above this soft limit are still processed by netdata and will be available to be sent to backend time-series databases, up to `max private charts hard limit = 1000`. So, between 200 and 1000 charts, netdata will still generate charts, but they will automatically be created with `memory mode = none` (netdata will not maintain a database for them). These metrics will be sent to backend time series databases, if the backend configuration is set to `as collected`.
+The metrics above this soft limit are still processed by Netdata and will be available to be sent to backend time-series databases, up to `max private charts hard limit = 1000`. So, between 200 and 1000 charts, Netdata will still generate charts, but they will automatically be created with `memory mode = none` (Netdata will not maintain a database for them). These metrics will be sent to backend time series databases, if the backend configuration is set to `as collected`.
Metrics above the hard limit are still collected, but they can only be used in synthetic charts (once a metric is added to chart, it will be sent to backend servers too).
@@ -155,25 +156,25 @@ Example private charts (automatically generated without any configuration):
#### counters
-- Scope: **count the events of something** (e.g. number of file downloads)
-- Format: `name:INTEGER|c` or `name:INTEGER|C` or `name|c`
-- statsd increments the counter by the `INTEGER` number supplied (positive, or negative).
+- Scope: **count the events of something** (e.g. number of file downloads)
+- Format: `name:INTEGER|c` or `name:INTEGER|C` or `name|c`
+- statsd increments the counter by the `INTEGER` number supplied (positive, or negative).
![image](https://cloud.githubusercontent.com/assets/2662304/26131553/4a26d19c-3aa3-11e7-94e8-c53b5ed6ebc3.png)
#### gauges
-- Scope: **report the value of something** (e.g. cache memory used by the application server)
-- Format: `name:FLOAT|g`
-- statsd remembers the last value supplied, and can increment or decrement the latest value if `FLOAT` begins with ` + ` or ` - `.
+- Scope: **report the value of something** (e.g. cache memory used by the application server)
+- Format: `name:FLOAT|g`
+- statsd remembers the last value supplied, and can increment or decrement the latest value if `FLOAT` begins with `+` or `-`.
![image](https://cloud.githubusercontent.com/assets/2662304/26131575/5d54e6f0-3aa3-11e7-9099-bc4440cd4592.png)
#### histograms
-- Scope: **statistics on a size of events** (e.g. statistics on the sizes of files downloaded)
-- Format: `name:FLOAT|h`
-- statsd maintains a list of all the values supplied and provides statistics on them.
+- Scope: **statistics on a size of events** (e.g. statistics on the sizes of files downloaded)
+- Format: `name:FLOAT|h`
+- statsd maintains a list of all the values supplied and provides statistics on them.
![image](https://cloud.githubusercontent.com/assets/2662304/26131587/704de72a-3aa3-11e7-9ea9-0d2bb778c150.png)
@@ -184,42 +185,40 @@ The same chart with `sum` unselected, to show the detail of the dimensions suppo
This is identical to `counter`.
-- Scope: **count the events of something** (e.g. number of file downloads)
-- Format: `name:INTEGER|m` or `name|m` or just `name`
-- statsd increments the counter by the `INTEGER` number supplied (positive, or negative).
+- Scope: **count the events of something** (e.g. number of file downloads)
+- Format: `name:INTEGER|m` or `name|m` or just `name`
+- statsd increments the counter by the `INTEGER` number supplied (positive, or negative).
![image](https://cloud.githubusercontent.com/assets/2662304/26131605/8fdf5a06-3aa3-11e7-963f-7ecf207d1dbc.png)
#### sets
-- Scope: **count the unique occurrences of something** (e.g. unique filenames downloaded, or unique users that downloaded files)
-- Format: `name:TEXT|s`
-- statsd maintains a unique index of all values supplied, and reports the unique entries in it.
+- Scope: **count the unique occurrences of something** (e.g. unique filenames downloaded, or unique users that downloaded files)
+- Format: `name:TEXT|s`
+- statsd maintains a unique index of all values supplied, and reports the unique entries in it.
![image](https://cloud.githubusercontent.com/assets/2662304/26131612/9eaa7b1a-3aa3-11e7-903b-d881e9a35be2.png)
#### timers
-- Scope: **statistics on the duration of events** (e.g. statistics for the duration of file downloads)
-- Format: `name:FLOAT|ms`
-- statsd maintains a list of all the values supplied and provides statistics on them.
+- Scope: **statistics on the duration of events** (e.g. statistics for the duration of file downloads)
+- Format: `name:FLOAT|ms`
+- statsd maintains a list of all the values supplied and provides statistics on them.
![image](https://cloud.githubusercontent.com/assets/2662304/26131620/acbea6a4-3aa3-11e7-8bdd-4a8996847767.png)
The same chart with the `sum` unselected:
![image](https://cloud.githubusercontent.com/assets/2662304/26131629/bc34f2d2-3aa3-11e7-8a07-f2fc94ba4352.png)
-
-
### synthetic statsd charts
Using synthetic charts, you can create dedicated sections on the dashboard to render the charts. You can control everything: the main menu, the submenus, the charts, the dimensions on each chart, etc.
Synthetic charts are organized in
-- **applications** (i.e. entries at the main menu of the netdata dashboard)
-- **charts for each application** (grouped in families - i.e. submenus at the dashboard menu)
-- **statsd metrics for each chart** (i.e. dimensions of the charts)
+- **applications** (i.e. entries at the main menu of the Netdata dashboard)
+- **charts for each application** (grouped in families - i.e. submenus at the dashboard menu)
+- **statsd metrics for each chart** (i.e. dimensions of the charts)
For each application you need to create a `.conf` file in `/etc/netdata/statsd.d`.
@@ -256,12 +255,12 @@ Using the above configuration `myapp` should get its own section on the dashboar
`[app]` starts a new application definition. The supported settings in this section are:
-- `name` defines the name of the app.
-- `metrics` is a netdata simple pattern (space separated patterns, using `*` for wildcard, possibly starting with `!` for negative match). This pattern should match all the possible statsd metrics that will be participating in the application `myapp`.
-- `private charts = yes|no`, enables or disables private charts for the metrics matched.
-- `gaps when not collected = yes|no`, enables or disables gaps on the charts of the application, when metrics are not collected.
-- `memory mode` sets the memory mode for all charts of the application. The default is the global default for netdata (not the global default for statsd private charts).
-- `history` sets the size of the round robin database for this application. The default is the global default for netdata (not the global default for statsd private charts).
+- `name` defines the name of the app.
+- `metrics` is a Netdata simple pattern (space separated patterns, using `*` for wildcard, possibly starting with `!` for negative match). This pattern should match all the possible statsd metrics that will be participating in the application `myapp`.
+- `private charts = yes|no`, enables or disables private charts for the metrics matched.
+- `gaps when not collected = yes|no`, enables or disables gaps on the charts of the application, when metrics are not collected.
+- `memory mode` sets the memory mode for all charts of the application. The default is the global default for Netdata (not the global default for statsd private charts).
+- `history` sets the size of the round robin database for this application. The default is the global default for Netdata (not the global default for statsd private charts).
`[dictionary]` defines name-value associations. These are used to renaming metrics, when added to synthetic charts. Metric names are also defined at each `dimension` line. However, using the dictionary dimension names can be declared globally, for each app and is the only way to rename dimensions when using patterns. Of course the dictionary can be empty or missing.
@@ -269,36 +268,37 @@ Then, you can add any number of charts. Each chart should start with `[id]`. Th
You can add any number of metrics to a chart, using `dimension` lines. These lines accept 5 space separated parameters:
-1. the metric name, as it is collected (it has to be matched by the `metrics = ` pattern of the app)
-2. the dimension name, as it should be shown on the chart
-3. an optional selector (type) of the value to shown (see below)
-4. an optional multiplier
-5. an optional divider
-6. optional flags, space separated and enclosed in quotes. All the external plugins `DIMENSION` flags can be used. Currently the only usable flag is `hidden`, to add the dimension, but not show it on the dashboard. This is usually needed to have the values available for percentage calculation, or use them in alarms.
+1. the metric name, as it is collected (it has to be matched by the `metrics =` pattern of the app)
+2. the dimension name, as it should be shown on the chart
+3. an optional selector (type) of the value to shown (see below)
+4. an optional multiplier
+5. an optional divider
+6. optional flags, space separated and enclosed in quotes. All the external plugins `DIMENSION` flags can be used. Currently the only usable flag is `hidden`, to add the dimension, but not show it on the dashboard. This is usually needed to have the values available for percentage calculation, or use them in alarms.
So, the format is this:
+
```
dimension = [pattern] METRIC NAME TYPE MULTIPLIER DIVIDER OPTIONS
```
-`pattern` is a keyword. When set, `METRIC` is expected to be a netdata simple pattern that will be used to match all the statsd metrics to be added to the chart. So, `pattern` automatically matches any number of statsd metrics, all of which will be added as separate chart dimensions.
+`pattern` is a keyword. When set, `METRIC` is expected to be a Netdata simple pattern that will be used to match all the statsd metrics to be added to the chart. So, `pattern` automatically matches any number of statsd metrics, all of which will be added as separate chart dimensions.
`TYPE`, `MUTLIPLIER`, `DIVIDER` and `OPTIONS` are optional.
`TYPE` can be:
-- `events` to show the number of events received by statsd for this metric
-- `last` to show the last value, as calculated at the flush interval of the metric (the default)
+- `events` to show the number of events received by statsd for this metric
+- `last` to show the last value, as calculated at the flush interval of the metric (the default)
Then for histograms and timers the following types are also supported:
-- `min`, show the minimum value
-- `max`, show the maximum value
-- `sum`, show the sum of all values
-- `average` (same as `last`)
-- `percentile`, show the 95th percentile (or any other percentile, as configured at statsd global config)
-- `median`, show the median of all values (i.e. sort all values and get the middle value)
-- `stddev`, show the standard deviation of the values
+- `min`, show the minimum value
+- `max`, show the maximum value
+- `sum`, show the sum of all values
+- `average` (same as `last`)
+- `percentile`, show the 95th percentile (or any other percentile, as configured at statsd global config)
+- `median`, show the median of all values (i.e. sort all values and get the middle value)
+- `stddev`, show the standard deviation of the values
#### example synthetic charts
@@ -336,26 +336,26 @@ and this synthetic chart:
The `[dictionary]` section accepts any number of `name = value` pairs.
-netdata uses this dictionary as follows:
+Netdata uses this dictionary as follows:
-1. When a `dimension` has a non-empty `NAME`, that name is looked up at the dictionary.
+1. When a `dimension` has a non-empty `NAME`, that name is looked up at the dictionary.
-2. If the above lookup gives nothing, or the `dimension` has an empty `NAME`, the original statsd metric name is looked up at the dictionary.
+2. If the above lookup gives nothing, or the `dimension` has an empty `NAME`, the original statsd metric name is looked up at the dictionary.
-3. If any of the above succeeds, netdata uses the `value` of the dictionary, to set the name of the dimension. The dimensions will have as ID the original statsd metric name, and as name, the dictionary value.
+3. If any of the above succeeds, Netdata uses the `value` of the dictionary, to set the name of the dimension. The dimensions will have as ID the original statsd metric name, and as name, the dictionary value.
So, you can use the dictionary in 2 ways:
-1. set `dimension = myapp.metric1 ''` and have at the dictionary `myapp.metric1 = metric1 name`
-2. set `dimension = myapp.metric1 'm1'` and have at the dictionary `m1 = metric1 name`
+1. set `dimension = myapp.metric1 ''` and have at the dictionary `myapp.metric1 = metric1 name`
+2. set `dimension = myapp.metric1 'm1'` and have at the dictionary `m1 = metric1 name`
In both cases, the dimension will be added with ID `myapp.metric1` and will be named `metric1 name`. So, in alarms you can use either of the 2 as `${myapp.metric1}` or `${metric1 name}`.
-> keep in mind that if you add multiple times the same statsd metric to a chart, netdata will append `TYPE` to the dimension ID, so `myapp.metric1` will be added as `myapp.metric1_last` or `myapp.metric1_events`, etc. If you add multiple times the same metric with the same `TYPE` to a chart, netdata will also append an incremental counter to the dimension ID, i.e. `myapp.metric1_last1`, `myapp.metric1_last2`, etc.
+> keep in mind that if you add multiple times the same statsd metric to a chart, Netdata will append `TYPE` to the dimension ID, so `myapp.metric1` will be added as `myapp.metric1_last` or `myapp.metric1_events`, etc. If you add multiple times the same metric with the same `TYPE` to a chart, Netdata will also append an incremental counter to the dimension ID, i.e. `myapp.metric1_last1`, `myapp.metric1_last2`, etc.
#### dimension patterns
-netdata allows adding multiple dimensions to a chart, by matching the statsd metrics with a netdata simple pattern.
+Netdata allows adding multiple dimensions to a chart, by matching the statsd metrics with a Netdata simple pattern.
Assume we have an API that provides statsd metrics for each response code per method it supports, like these:
@@ -382,7 +382,7 @@ To add all response codes of `myapp.api.get` to a chart use this:
dimension = pattern 'myapp.api.get.* '' last 1 1
```
-The above will add dimension named `200`, `400` and `500` (yes, netdata extracts the wildcarded part of the metric name - so the dimensions will be named with whatever the `*` matched). You can rename the dimensions with this:
+The above will add dimension named `200`, `400` and `500` (yes, Netdata extracts the wildcarded part of the metric name - so the dimensions will be named with whatever the `*` matched). You can rename the dimensions with this:
```
[dictionary]
@@ -432,20 +432,19 @@ To automatically rename the methods, use this:
Using the above, the dimensions will be added as `GET`, `ADD` and `DELETE`.
-
## interpolation
-~~If you send just one value to statsd, you will notice that the chart is created but no value is shown. The reason is that netdata interpolates all values at second boundaries. For incremental values (`counters` and `meters` in statsd terminology), if you send 10 at 00:00:00.500, 20 at 00:00:01.500 and 30 at 00:00:02.500, netdata will show 15 at 00:00:01 and 25 at 00:00:02.~~
+~~If you send just one value to statsd, you will notice that the chart is created but no value is shown. The reason is that Netdata interpolates all values at second boundaries. For incremental values (`counters` and `meters` in statsd terminology), if you send 10 at 00:00:00.500, 20 at 00:00:01.500 and 30 at 00:00:02.500, Netdata will show 15 at 00:00:01 and 25 at 00:00:02.~~
-~~This interpolation is automatic and global in netdata for all charts, for incremental values. This means that for the chart to start showing values you need to send 2 values across 2 flush intervals.~~
+~~This interpolation is automatic and global in Netdata for all charts, for incremental values. This means that for the chart to start showing values you need to send 2 values across 2 flush intervals.~~
-~~(although this is required for incremental values, netdata allows mixing incremental and absolute values on the same charts, so this little limitation [i.e. 2 values to start visualization], is applied on all netdata dimensions).~~
+~~(although this is required for incremental values, Netdata allows mixing incremental and absolute values on the same charts, so this little limitation [i.e. 2 values to start visualization], is applied on all Netdata dimensions).~~
(statsd metrics do not loose their first data collection due to interpolation anymore - fixed with [PR #2411](https://github.com/netdata/netdata/pull/2411))
## sending statsd metrics from shell scripts
-You can send/update statsd metrics from shell scripts. You can use this feature, to visualize in netdata automated jobs you run on your servers.
+You can send/update statsd metrics from shell scripts. You can use this feature, to visualize in Netdata automated jobs you run on your servers.
The command you need to run is:
@@ -455,9 +454,9 @@ echo "NAME:VALUE|TYPE" | nc -u --send-only localhost 8125
Where:
-- `NAME` is the metric name
-- `VALUE` is the value for that metric (**gauges** `|g`, **timers** `|ms` and **histograms** `|h` accept decimal/fractional numbers, **counters** `|c` and **meters** `|m` accept integers, **sets** `|s` accept anything)
-- `TYPE` is one of `g`, `ms`, `h`, `c`, `m`, `s` to select the metric type.
+- `NAME` is the metric name
+- `VALUE` is the value for that metric (**gauges** `|g`, **timers** `|ms` and **histograms** `|h` accept decimal/fractional numbers, **counters** `|c` and **meters** `|m` accept integers, **sets** `|s` accept anything)
+- `TYPE` is one of `g`, `ms`, `h`, `c`, `m`, `s` to select the metric type.
So, to set `metric1` as gauge to value `10`, use:
@@ -520,4 +519,4 @@ statsd "metric1:10|g" "metric2:10|c" ...
The function is smart enough to call `nc` just once and pass all the metrics to it. It will also automatically switch to TCP if the metrics to send are above 1000 bytes.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fstatsd.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fstatsd.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/tc.plugin/Makefile.in b/collectors/tc.plugin/Makefile.in
new file mode 100644
index 00000000..ba107113
--- /dev/null
+++ b/collectors/tc.plugin/Makefile.in
@@ -0,0 +1,612 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/tc.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ tc-qos-helper.sh \
+ $(NULL)
+
+SUFFIXES = .in
+dist_plugins_SCRIPTS = \
+ tc-qos-helper.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ tc-qos-helper.sh.in \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/tc.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/tc.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_pluginsSCRIPTS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_pluginsSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_pluginsSCRIPTS install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_pluginsSCRIPTS
+
+.PRECIOUS: Makefile
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md
index 4133b4f8..4095c6a3 100644
--- a/collectors/tc.plugin/README.md
+++ b/collectors/tc.plugin/README.md
@@ -28,58 +28,60 @@ One of the features the Linux kernel has, but it is rarely used, is its ability
QoS is about 2 features:
-1. **Classify traffic**
+1. **Classify traffic**
Classification is the process of organizing traffic in groups, called **classes**. Classification can evaluate every aspect of network packets, like source and destination ports, source and destination IPs, netfilter marks, etc.
When you classify traffic, you just assign a label to it. Of course classes have some properties themselves (like queuing mechanisms), but let's say it is that simple: **a label**. For example **I call `web server` traffic, the traffic from my server's tcp/80, tcp/443 and to my server's tcp/80, tcp/443, while I call `web surfing` all other tcp/80 and tcp/443 traffic**. You can use any combinations you like. There is no limit.
-2. **Apply traffic shaping rules to these classes**
+2. **Apply traffic shaping rules to these classes**
Traffic shaping is used to control how network interface bandwidth should be shared among the classes. Normally, you need to do this, when there is not enough bandwidth to satisfy all the demand, or when you want to control the supply of bandwidth to certain services. Of course classification is sufficient for monitoring traffic, but traffic shaping is also quite important, as we will explain in the next section.
## Why you want QoS
-1. **Monitoring the bandwidth used by services**
+1. **Monitoring the bandwidth used by services**
- netdata provides wonderful real-time charts, like this one (wait to see the orange `rsync` part):
+ Netdata provides wonderful real-time charts, like this one (wait to see the orange `rsync` part):
- ![qos3](https://cloud.githubusercontent.com/assets/2662304/14474189/713ede84-0104-11e6-8c9c-8dca5c2abd63.gif)
+ ![qos3](https://cloud.githubusercontent.com/assets/2662304/14474189/713ede84-0104-11e6-8c9c-8dca5c2abd63.gif)
-2. **Ensure sensitive administrative tasks will not starve for bandwidth**
+2. **Ensure sensitive administrative tasks will not starve for bandwidth**
- Have you tried to ssh to a server when the network is congested? If you have, you already know it does not work very well. QoS can guarantee that services like ssh, dns, ntp, etc will always have a small supply of bandwidth. So, no matter what happens, you will be able to ssh to your server and DNS will always work.
+ Have you tried to ssh to a server when the network is congested? If you have, you already know it does not work very well. QoS can guarantee that services like ssh, dns, ntp, etc will always have a small supply of bandwidth. So, no matter what happens, you will be able to ssh to your server and DNS will always work.
-3. **Ensure administrative tasks will not monopolize all the bandwidth**
+3. **Ensure administrative tasks will not monopolize all the bandwidth**
- Services like backups, file copies, database dumps, etc can easily monopolize all the available bandwidth. It is common for example a nightly backup, or a huge file transfer to negatively influence the end-user experience. QoS can fix that.
+ Services like backups, file copies, database dumps, etc can easily monopolize all the available bandwidth. It is common for example a nightly backup, or a huge file transfer to negatively influence the end-user experience. QoS can fix that.
-4. **Ensure each end-user connection will get a fair cut of the available bandwidth.**
+4. **Ensure each end-user connection will get a fair cut of the available bandwidth.**
- Several QoS queuing disciplines in Linux do this automatically, without any configuration from you. The result is that new sockets are favored over older ones, so that users will get a snappier experience, while others are transferring large amounts of traffic.
+ Several QoS queuing disciplines in Linux do this automatically, without any configuration from you. The result is that new sockets are favored over older ones, so that users will get a snappier experience, while others are transferring large amounts of traffic.
-5. **Protect the servers from DDoS attacks.**
+5. **Protect the servers from DDoS attacks.**
- When your system is under a DDoS attack, it will get a lot more bandwidth compared to the one it can handle and probably your applications will crash. Setting a limit on the inbound traffic using QoS, will protect your servers (throttle the requests) and depending on the size of the attack may allow your legitimate users to access the server, while the attack is taking place.
+ When your system is under a DDoS attack, it will get a lot more bandwidth compared to the one it can handle and probably your applications will crash. Setting a limit on the inbound traffic using QoS, will protect your servers (throttle the requests) and depending on the size of the attack may allow your legitimate users to access the server, while the attack is taking place.
- Using QoS together with a [SYNPROXY](../proc.plugin/README.md#linux-anti-ddos) will provide a great degree of protection against most DDoS attacks. Actually when I wrote that article, a few folks tried to DDoS the netdata demo site to see in real-time the SYNPROXY operation. They did not do it right, but anyway a great deal of requests reached the netdata server. What saved netdata was QoS. The netdata demo server has QoS installed, so the requests were throttled and the server did not even reach the point of resource starvation. Read about it [here](../proc.plugin/README.md#linux-anti-ddos).
+ Using QoS together with a [SYNPROXY](../proc.plugin/README.md#linux-anti-ddos) will provide a great degree of protection against most DDoS attacks. Actually when I wrote that article, a few folks tried to DDoS the Netdata demo site to see in real-time the SYNPROXY operation. They did not do it right, but anyway a great deal of requests reached the Netdata server. What saved Netdata was QoS. The Netdata demo server has QoS installed, so the requests were throttled and the server did not even reach the point of resource starvation. Read about it [here](../proc.plugin/README.md#linux-anti-ddos).
On top of all these, QoS is extremely light. You will configure it once, and this is it. It will not bother you again and it will not use any noticeable CPU resources, especially on application and database servers.
- - ensure administrative tasks (like ssh, dns, etc) will always have a small but guaranteed bandwidth. So, no matter what happens, I will be able to ssh to my server and DNS will work.
+```
+- ensure administrative tasks (like ssh, dns, etc) will always have a small but guaranteed bandwidth. So, no matter what happens, I will be able to ssh to my server and DNS will work.
- - ensure other administrative tasks will not monopolize all the available bandwidth. So, my nightly backup will not hurt my users, a developer that is copying files over the net will not get all the available bandwidth, etc.
+- ensure other administrative tasks will not monopolize all the available bandwidth. So, my nightly backup will not hurt my users, a developer that is copying files over the net will not get all the available bandwidth, etc.
- - ensure each end-user connection will get a fair cut of the available bandwidth.
+- ensure each end-user connection will get a fair cut of the available bandwidth.
+```
-Once **traffic classification** is applied, we can use **[netdata](https://github.com/netdata/netdata)** to visualize the bandwidth consumption per class in real-time (no configuration is needed for netdata - it will figure it out).
+Once **traffic classification** is applied, we can use **[netdata](https://github.com/netdata/netdata)** to visualize the bandwidth consumption per class in real-time (no configuration is needed for Netdata - it will figure it out).
QoS, is extremely light. You will configure it once, and this is it. It will not bother you again and it will not use any noticeable CPU resources, especially on application and database servers.
This is QoS from a home linux router. Check these features:
-1. It is real-time (per second updates)
-2. QoS really works in Linux - check that the `background` traffic is squeezed when `surfing` needs it.
+1. It is real-time (per second updates)
+2. QoS really works in Linux - check that the `background` traffic is squeezed when `surfing` needs it.
![test2](https://cloud.githubusercontent.com/assets/2662304/14093004/68966020-f553-11e5-98fe-ffee2086fafd.gif)
@@ -115,10 +117,10 @@ To do it the hard way, you can go through the [tc configuration steps](#qos-conf
The **[FireHOL](https://firehol.org/)** package already distributes **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**. Check the **[FireQOS tutorial](https://firehol.org/tutorial/fireqos-new-user/)** to learn how to write your own QoS configuration.
-With **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, it is **really simple for everyone to use QoS in Linux**. Just install the package `firehol`. It should already be available for your distribution. If not, check the **[FireHOL Installation Guide](https://firehol.org/installing/)**. After that, you will have the `fireqos` command which uses a configuration like the following `/etc/firehol/fireqos.conf`, used at the netdata demo site:
+With **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, it is **really simple for everyone to use QoS in Linux**. Just install the package `firehol`. It should already be available for your distribution. If not, check the **[FireHOL Installation Guide](https://firehol.org/installing/)**. After that, you will have the `fireqos` command which uses a configuration like the following `/etc/firehol/fireqos.conf`, used at the Netdata demo site:
```sh
- # configure the netdata ports
+ # configure the Netdata ports
server_netdata_ports="tcp/19999"
interface eth0 world bidirectional ethernet balanced rate 50Mbit
@@ -155,7 +157,7 @@ With **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, it is **real
match input src 10.2.3.5
```
-Nothing more is needed. You just run `fireqos start` to apply this configuration, restart netdata and you have real-time visualization of the bandwidth consumption of your applications. FireQOS is not a daemon. It will just convert the configuration to `tc` commands. It will run them and it will exit.
+Nothing more is needed. You just run `fireqos start` to apply this configuration, restart Netdata and you have real-time visualization of the bandwidth consumption of your applications. FireQOS is not a daemon. It will just convert the configuration to `tc` commands. It will run them and it will exit.
**IMPORTANT**: If you copy this configuration to apply it to your system, please adapt the speeds - experiment in non-production environments to learn the tool, before applying it on your servers.
@@ -168,6 +170,7 @@ And this is what you are going to get:
First, setup the tc rules in rc.local using commands to assign different DSCP markings to different classids. You can see one such example in [github issue #4563](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).
Then, map the classids to names by creating `/etc/iproute2/tc_cls`. For example:
+
```
2:1 Standard
2:8 LowPriorityData
@@ -184,14 +187,14 @@ Then, map the classids to names by creating `/etc/iproute2/tc_cls`. For example:
```
Add the following configuration option in `/etc/netdata.conf`:
-```[plugin:tc]
+
+```\[plugin:tc]
enable show all classes and qdiscs for all interfaces = yes
```
Finally, create `/etc/netdata/tc-qos-helper.conf` with this content:
-```tc_show="class"```
-
-Please note, that by default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+`tc_show="class"`
+Please note, that by default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ftc.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ftc.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/tc.plugin/tc-qos-helper.sh b/collectors/tc.plugin/tc-qos-helper.sh
new file mode 100644
index 00000000..c9e6727a
--- /dev/null
+++ b/collectors/tc.plugin/tc-qos-helper.sh
@@ -0,0 +1,297 @@
+#!/usr/bin/env bash
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This script is a helper to allow netdata collect tc data.
+# tc output parsing has been implemented in C, inside netdata
+# This script allows setting names to dimensions.
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+# -----------------------------------------------------------------------------
+# logging functions
+
+PROGRAM_NAME="$(basename "$0")"
+PROGRAM_NAME="${PROGRAM_NAME/.plugin/}"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# find /var/run/fireqos
+
+# the default
+fireqos_run_dir="/var/run/fireqos"
+
+function realdir() {
+ local r
+ local t
+ r="$1"
+ t="$(readlink "$r")"
+
+ while [ "$t" ]; do
+ r=$(cd "$(dirname "$r")" && cd "$(dirname "$t")" && pwd -P)/$(basename "$t")
+ t=$(readlink "$r")
+ done
+
+ dirname "$r"
+}
+
+if [ ! -d "${fireqos_run_dir}" ]; then
+
+ # the fireqos executable - we will use it to find its config
+ fireqos="$(command -v fireqos 2>/dev/null)"
+
+ if [ -n "${fireqos}" ]; then
+
+ fireqos_exec_dir="$(realdir "${fireqos}")"
+
+ if [ -n "${fireqos_exec_dir}" ] && [ "${fireqos_exec_dir}" != "." ] && [ -f "${fireqos_exec_dir}/install.config" ]; then
+ LOCALSTATEDIR=
+ #shellcheck source=/dev/null
+ source "${fireqos_exec_dir}/install.config"
+
+ if [ -d "${LOCALSTATEDIR}/run/fireqos" ]; then
+ fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos"
+ else
+ warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)."
+ fi
+ else
+ warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'."
+ fi
+ else
+ warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/tree/master/collectors/tc.plugin#tcplugin"
+ fi
+fi
+
+# -----------------------------------------------------------------------------
+
+[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/lib/netdata/conf.d"
+
+plugins_dir="${NETDATA_PLUGINS_DIR}"
+tc="$(command -v tc 2>/dev/null)"
+
+# -----------------------------------------------------------------------------
+# user configuration
+
+# time in seconds to refresh QoS class/qdisc names
+qos_get_class_names_every=120
+
+# time in seconds to exit - netdata will restart the script
+qos_exit_every=3600
+
+# what to use? classes or qdiscs?
+tc_show="qdisc" # can also be "class"
+
+# -----------------------------------------------------------------------------
+# check if we have a valid number for interval
+
+t=${1}
+update_every=$((t))
+[ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY}
+[ $((update_every)) -lt 1 ] && update_every=1
+
+# -----------------------------------------------------------------------------
+# allow the user to override our defaults
+
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf"; do
+ if [ -f "${CONFIG}" ]; then
+ info "Loading config file '${CONFIG}'..."
+ #shellcheck source=/dev/null
+ source "${CONFIG}" || error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+done
+
+case "${tc_show}" in
+qdisc | class) ;;
+
+*)
+ error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'."
+ tc_show="qdisc"
+ ;;
+esac
+
+# -----------------------------------------------------------------------------
+# default sleep function
+
+LOOPSLEEPMS_LASTWORK=0
+loopsleepms() {
+ sleep "$1"
+}
+
+# if found and included, this file overwrites loopsleepms()
+# with a high resolution timer function for precise looping.
+#shellcheck source=/dev/null
+. "${plugins_dir}/loopsleepms.sh.inc"
+
+# -----------------------------------------------------------------------------
+# final checks we can run
+
+if [ -z "${tc}" ] || [ ! -x "${tc}" ]; then
+ fatal "cannot find command 'tc' in this system."
+fi
+
+tc_devices=
+fix_names=
+
+# -----------------------------------------------------------------------------
+
+setclassname() {
+ if [ "${tc_show}" = "qdisc" ]; then
+ echo "SETCLASSNAME $4 $2"
+ else
+ echo "SETCLASSNAME $3 $2"
+ fi
+}
+
+show_tc_cls() {
+ [ "${tc_show}" = "qdisc" ] && return 1
+
+ local x="${1}"
+
+ if [ -f /etc/iproute2/tc_cls ]; then
+ local classid name rest
+ while read -r classid name rest; do
+ if [ -z "${classid}" ] ||
+ [ -z "${name}" ] ||
+ [ "${classid}" = "#" ] ||
+ [ "${name}" = "#" ] ||
+ [ "${classid:0:1}" = "#" ] ||
+ [ "${name:0:1}" = "#" ]; then
+ continue
+ fi
+ setclassname "" "${name}" "${classid}"
+ done </etc/iproute2/tc_cls
+ return 0
+ fi
+ return 1
+}
+
+show_fireqos_names() {
+ local x="${1}" name n interface_dev interface_classes_monitor
+
+ if [ -f "${fireqos_run_dir}/ifaces/${x}" ]; then
+ name="$(<"${fireqos_run_dir}/ifaces/${x}")"
+ echo "SETDEVICENAME ${name}"
+
+ #shellcheck source=/dev/null
+ source "${fireqos_run_dir}/${name}.conf"
+ for n in ${interface_classes_monitor}; do
+ # shellcheck disable=SC2086
+ setclassname ${n//|/ }
+ done
+ [ -n "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}"
+
+ return 0
+ fi
+
+ return 1
+}
+
+show_tc() {
+ local x="${1}"
+
+ echo "BEGIN ${x}"
+
+ # netdata can parse the output of tc
+ ${tc} -s ${tc_show} show dev "${x}"
+
+ # check FireQOS names for classes
+ if [ -n "${fix_names}" ]; then
+ show_fireqos_names "${x}" || show_tc_cls "${x}"
+ fi
+
+ echo "END ${x}"
+}
+
+find_tc_devices() {
+ local count=0 devs dev rest l
+
+ # find all the devices in the system
+ # without forking
+ while IFS=":| " read -r dev rest; do
+ count=$((count + 1))
+ [ ${count} -le 2 ] && continue
+ devs="${devs} ${dev}"
+ done </proc/net/dev
+
+ # from all the devices find the ones
+ # that have QoS defined
+ # unfortunately, one fork per device cannot be avoided
+ tc_devices=
+ for dev in ${devs}; do
+ l="$(${tc} class show dev "${dev}" 2>/dev/null)"
+ [ -n "${l}" ] && tc_devices="${tc_devices} ${dev}"
+ done
+}
+
+# update devices and class names
+# once every 2 minutes
+names_every=$((qos_get_class_names_every / update_every))
+
+# exit this script every hour
+# it will be restarted automatically
+exit_after=$((qos_exit_every / update_every))
+
+c=0
+gc=0
+while true; do
+ fix_names=
+ c=$((c + 1))
+ gc=$((gc + 1))
+
+ if [ ${c} -le 1 ] || [ ${c} -ge ${names_every} ]; then
+ c=1
+ fix_names="YES"
+ find_tc_devices
+ fi
+
+ for d in ${tc_devices}; do
+ show_tc "${d}"
+ done
+
+ echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}"
+
+ loopsleepms ${update_every}
+
+ [ ${gc} -gt ${exit_after} ] && exit 0
+done
diff --git a/collectors/xenstat.plugin/Makefile.in b/collectors/xenstat.plugin/Makefile.in
new file mode 100644
index 00000000..292e4d2c
--- /dev/null
+++ b/collectors/xenstat.plugin/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = collectors/xenstat.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/xenstat.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/xenstat.plugin/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/xenstat.plugin/README.md b/collectors/xenstat.plugin/README.md
index f803dbf3..9943ba1a 100644
--- a/collectors/xenstat.plugin/README.md
+++ b/collectors/xenstat.plugin/README.md
@@ -4,9 +4,9 @@
## Prerequisites
-1. install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.
+1. install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.
-2. re-install netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.
+2. re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.
Keep in mind that `libxenstat` requires root access, so the plugin is setuid to root.
@@ -15,17 +15,19 @@ Keep in mind that `libxenstat` requires root access, so the plugin is setuid to
The plugin provides XenServer and XCP-ng host and domains statistics:
Host:
-1. Number of domains.
+
+1. Number of domains.
Domain:
-1. CPU.
-2. Memory.
-3. Networks.
-4. VBDs.
+
+1. CPU.
+2. Memory.
+3. Networks.
+4. VBDs.
## Configuration
-If you need to disable xenstat for netdata, edit /etc/netdata/netdata.conf and set:
+If you need to disable xenstat for Netdata, edit /etc/netdata/netdata.conf and set:
```
[plugins]
@@ -42,4 +44,4 @@ sudo /usr/libexec/netdata/plugins.d/xenstat.plugin 1 debug
You will get verbose output on what the plugin does.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnfacct.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnfacct.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/compile b/compile
new file mode 100755
index 00000000..a85b723c
--- /dev/null
+++ b/compile
@@ -0,0 +1,347 @@
+#! /bin/sh
+# Wrapper for compilers which do not understand '-c -o'.
+
+scriptversion=2012-10-14.11; # UTC
+
+# Copyright (C) 1999-2014 Free Software Foundation, Inc.
+# Written by Tom Tromey <tromey@cygnus.com>.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# This file is maintained in Automake, please report
+# bugs to <bug-automake@gnu.org> or send patches to
+# <automake-patches@gnu.org>.
+
+nl='
+'
+
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent tools from complaining about whitespace usage.
+IFS=" "" $nl"
+
+file_conv=
+
+# func_file_conv build_file lazy
+# Convert a $build file to $host form and store it in $file
+# Currently only supports Windows hosts. If the determined conversion
+# type is listed in (the comma separated) LAZY, no conversion will
+# take place.
+func_file_conv ()
+{
+ file=$1
+ case $file in
+ / | /[!/]*) # absolute file, and not a UNC file
+ if test -z "$file_conv"; then
+ # lazily determine how to convert abs files
+ case `uname -s` in
+ MINGW*)
+ file_conv=mingw
+ ;;
+ CYGWIN*)
+ file_conv=cygwin
+ ;;
+ *)
+ file_conv=wine
+ ;;
+ esac
+ fi
+ case $file_conv/,$2, in
+ *,$file_conv,*)
+ ;;
+ mingw/*)
+ file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'`
+ ;;
+ cygwin/*)
+ file=`cygpath -m "$file" || echo "$file"`
+ ;;
+ wine/*)
+ file=`winepath -w "$file" || echo "$file"`
+ ;;
+ esac
+ ;;
+ esac
+}
+
+# func_cl_dashL linkdir
+# Make cl look for libraries in LINKDIR
+func_cl_dashL ()
+{
+ func_file_conv "$1"
+ if test -z "$lib_path"; then
+ lib_path=$file
+ else
+ lib_path="$lib_path;$file"
+ fi
+ linker_opts="$linker_opts -LIBPATH:$file"
+}
+
+# func_cl_dashl library
+# Do a library search-path lookup for cl
+func_cl_dashl ()
+{
+ lib=$1
+ found=no
+ save_IFS=$IFS
+ IFS=';'
+ for dir in $lib_path $LIB
+ do
+ IFS=$save_IFS
+ if $shared && test -f "$dir/$lib.dll.lib"; then
+ found=yes
+ lib=$dir/$lib.dll.lib
+ break
+ fi
+ if test -f "$dir/$lib.lib"; then
+ found=yes
+ lib=$dir/$lib.lib
+ break
+ fi
+ if test -f "$dir/lib$lib.a"; then
+ found=yes
+ lib=$dir/lib$lib.a
+ break
+ fi
+ done
+ IFS=$save_IFS
+
+ if test "$found" != yes; then
+ lib=$lib.lib
+ fi
+}
+
+# func_cl_wrapper cl arg...
+# Adjust compile command to suit cl
+func_cl_wrapper ()
+{
+ # Assume a capable shell
+ lib_path=
+ shared=:
+ linker_opts=
+ for arg
+ do
+ if test -n "$eat"; then
+ eat=
+ else
+ case $1 in
+ -o)
+ # configure might choose to run compile as 'compile cc -o foo foo.c'.
+ eat=1
+ case $2 in
+ *.o | *.[oO][bB][jJ])
+ func_file_conv "$2"
+ set x "$@" -Fo"$file"
+ shift
+ ;;
+ *)
+ func_file_conv "$2"
+ set x "$@" -Fe"$file"
+ shift
+ ;;
+ esac
+ ;;
+ -I)
+ eat=1
+ func_file_conv "$2" mingw
+ set x "$@" -I"$file"
+ shift
+ ;;
+ -I*)
+ func_file_conv "${1#-I}" mingw
+ set x "$@" -I"$file"
+ shift
+ ;;
+ -l)
+ eat=1
+ func_cl_dashl "$2"
+ set x "$@" "$lib"
+ shift
+ ;;
+ -l*)
+ func_cl_dashl "${1#-l}"
+ set x "$@" "$lib"
+ shift
+ ;;
+ -L)
+ eat=1
+ func_cl_dashL "$2"
+ ;;
+ -L*)
+ func_cl_dashL "${1#-L}"
+ ;;
+ -static)
+ shared=false
+ ;;
+ -Wl,*)
+ arg=${1#-Wl,}
+ save_ifs="$IFS"; IFS=','
+ for flag in $arg; do
+ IFS="$save_ifs"
+ linker_opts="$linker_opts $flag"
+ done
+ IFS="$save_ifs"
+ ;;
+ -Xlinker)
+ eat=1
+ linker_opts="$linker_opts $2"
+ ;;
+ -*)
+ set x "$@" "$1"
+ shift
+ ;;
+ *.cc | *.CC | *.cxx | *.CXX | *.[cC]++)
+ func_file_conv "$1"
+ set x "$@" -Tp"$file"
+ shift
+ ;;
+ *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO])
+ func_file_conv "$1" mingw
+ set x "$@" "$file"
+ shift
+ ;;
+ *)
+ set x "$@" "$1"
+ shift
+ ;;
+ esac
+ fi
+ shift
+ done
+ if test -n "$linker_opts"; then
+ linker_opts="-link$linker_opts"
+ fi
+ exec "$@" $linker_opts
+ exit 1
+}
+
+eat=
+
+case $1 in
+ '')
+ echo "$0: No command. Try '$0 --help' for more information." 1>&2
+ exit 1;
+ ;;
+ -h | --h*)
+ cat <<\EOF
+Usage: compile [--help] [--version] PROGRAM [ARGS]
+
+Wrapper for compilers which do not understand '-c -o'.
+Remove '-o dest.o' from ARGS, run PROGRAM with the remaining
+arguments, and rename the output as expected.
+
+If you are trying to build a whole package this is not the
+right script to run: please start by reading the file 'INSTALL'.
+
+Report bugs to <bug-automake@gnu.org>.
+EOF
+ exit $?
+ ;;
+ -v | --v*)
+ echo "compile $scriptversion"
+ exit $?
+ ;;
+ cl | *[/\\]cl | cl.exe | *[/\\]cl.exe )
+ func_cl_wrapper "$@" # Doesn't return...
+ ;;
+esac
+
+ofile=
+cfile=
+
+for arg
+do
+ if test -n "$eat"; then
+ eat=
+ else
+ case $1 in
+ -o)
+ # configure might choose to run compile as 'compile cc -o foo foo.c'.
+ # So we strip '-o arg' only if arg is an object.
+ eat=1
+ case $2 in
+ *.o | *.obj)
+ ofile=$2
+ ;;
+ *)
+ set x "$@" -o "$2"
+ shift
+ ;;
+ esac
+ ;;
+ *.c)
+ cfile=$1
+ set x "$@" "$1"
+ shift
+ ;;
+ *)
+ set x "$@" "$1"
+ shift
+ ;;
+ esac
+ fi
+ shift
+done
+
+if test -z "$ofile" || test -z "$cfile"; then
+ # If no '-o' option was seen then we might have been invoked from a
+ # pattern rule where we don't need one. That is ok -- this is a
+ # normal compilation that the losing compiler can handle. If no
+ # '.c' file was seen then we are probably linking. That is also
+ # ok.
+ exec "$@"
+fi
+
+# Name of file we expect compiler to create.
+cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'`
+
+# Create the lock directory.
+# Note: use '[/\\:.-]' here to ensure that we don't use the same name
+# that we are using for the .o file. Also, base the name on the expected
+# object file name, since that is what matters with a parallel build.
+lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d
+while true; do
+ if mkdir "$lockdir" >/dev/null 2>&1; then
+ break
+ fi
+ sleep 1
+done
+# FIXME: race condition here if user kills between mkdir and trap.
+trap "rmdir '$lockdir'; exit 1" 1 2 15
+
+# Run the compile.
+"$@"
+ret=$?
+
+if test -f "$cofile"; then
+ test "$cofile" = "$ofile" || mv "$cofile" "$ofile"
+elif test -f "${cofile}bj"; then
+ test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile"
+fi
+
+rmdir "$lockdir"
+exit $ret
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/config.guess b/config.guess
new file mode 100755
index 00000000..f50dcdb6
--- /dev/null
+++ b/config.guess
@@ -0,0 +1,1480 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright 1992-2018 Free Software Foundation, Inc.
+
+timestamp='2018-02-24'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+#
+# Originally written by Per Bothner; maintained since 2000 by Ben Elliston.
+#
+# You can get the latest version of this script from:
+# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
+#
+# Please send patches to <config-patches@gnu.org>.
+
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Options:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright 1992-2018 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,) echo "int x;" > "$dummy.c" ;
+ for c in cc gcc c89 c99 ; do
+ if ($c -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$c"; break ;
+ fi ;
+ done ;
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found ;
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+case "$UNAME_SYSTEM" in
+Linux|GNU|GNU/*)
+ # If the system lacks a compiler, then just pick glibc.
+ # We could probably try harder.
+ LIBC=gnu
+
+ eval "$set_cc_for_build"
+ cat <<-EOF > "$dummy.c"
+ #include <features.h>
+ #if defined(__UCLIBC__)
+ LIBC=uclibc
+ #elif defined(__dietlibc__)
+ LIBC=dietlibc
+ #else
+ LIBC=gnu
+ #endif
+ EOF
+ eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`"
+
+ # If ldd exists, use it to detect musl libc.
+ if command -v ldd >/dev/null && \
+ ldd --version 2>&1 | grep -q ^musl
+ then
+ LIBC=musl
+ fi
+ ;;
+esac
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \
+ "/sbin/$sysctl" 2>/dev/null || \
+ "/usr/sbin/$sysctl" 2>/dev/null || \
+ echo unknown)`
+ case "$UNAME_MACHINE_ARCH" in
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ sh5el) machine=sh5le-unknown ;;
+ earmv*)
+ arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'`
+ endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'`
+ machine="${arch}${endian}"-unknown
+ ;;
+ *) machine="$UNAME_MACHINE_ARCH"-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently (or will in the future) and ABI.
+ case "$UNAME_MACHINE_ARCH" in
+ earm*)
+ os=netbsdelf
+ ;;
+ arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ eval "$set_cc_for_build"
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ELF__
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # Determine ABI tags.
+ case "$UNAME_MACHINE_ARCH" in
+ earm*)
+ expr='s/^earmv[0-9]/-eabi/;s/eb$//'
+ abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"`
+ ;;
+ esac
+ # The OS release
+ # Debian GNU/NetBSD machines have a different userland, and
+ # thus, need a distinct triplet. However, they do not need
+ # kernel version information, so it can be replaced with a
+ # suitable tag, in the style of linux-gnu.
+ case "$UNAME_VERSION" in
+ Debian*)
+ release='-gnu'
+ ;;
+ *)
+ release=`echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2`
+ ;;
+ esac
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "$machine-${os}${release}${abi}"
+ exit ;;
+ *:Bitrig:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
+ echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE"
+ exit ;;
+ *:OpenBSD:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+ echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE"
+ exit ;;
+ *:LibertyBSD:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'`
+ echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE"
+ exit ;;
+ *:MidnightBSD:*:*)
+ echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE"
+ exit ;;
+ *:ekkoBSD:*:*)
+ echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE"
+ exit ;;
+ *:SolidBSD:*:*)
+ echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE"
+ exit ;;
+ macppc:MirBSD:*:*)
+ echo powerpc-unknown-mirbsd"$UNAME_RELEASE"
+ exit ;;
+ *:MirBSD:*:*)
+ echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE"
+ exit ;;
+ *:Sortix:*:*)
+ echo "$UNAME_MACHINE"-unknown-sortix
+ exit ;;
+ *:Redox:*:*)
+ echo "$UNAME_MACHINE"-unknown-redox
+ exit ;;
+ mips:OSF1:*.*)
+ echo mips-dec-osf1
+ exit ;;
+ alpha:OSF1:*:*)
+ case $UNAME_RELEASE in
+ *4.0)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ ;;
+ *5.*)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+ ;;
+ esac
+ # According to Compaq, /usr/sbin/psrinfo has been available on
+ # OSF/1 and Tru64 systems produced since 1995. I hope that
+ # covers most systems running today. This code pipes the CPU
+ # types through head -n 1, so we only detect the type of CPU 0.
+ ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+ case "$ALPHA_CPU_TYPE" in
+ "EV4 (21064)")
+ UNAME_MACHINE=alpha ;;
+ "EV4.5 (21064)")
+ UNAME_MACHINE=alpha ;;
+ "LCA4 (21066/21068)")
+ UNAME_MACHINE=alpha ;;
+ "EV5 (21164)")
+ UNAME_MACHINE=alphaev5 ;;
+ "EV5.6 (21164A)")
+ UNAME_MACHINE=alphaev56 ;;
+ "EV5.6 (21164PC)")
+ UNAME_MACHINE=alphapca56 ;;
+ "EV5.7 (21164PC)")
+ UNAME_MACHINE=alphapca57 ;;
+ "EV6 (21264)")
+ UNAME_MACHINE=alphaev6 ;;
+ "EV6.7 (21264A)")
+ UNAME_MACHINE=alphaev67 ;;
+ "EV6.8CB (21264C)")
+ UNAME_MACHINE=alphaev68 ;;
+ "EV6.8AL (21264B)")
+ UNAME_MACHINE=alphaev68 ;;
+ "EV6.8CX (21264D)")
+ UNAME_MACHINE=alphaev68 ;;
+ "EV6.9A (21264/EV69A)")
+ UNAME_MACHINE=alphaev69 ;;
+ "EV7 (21364)")
+ UNAME_MACHINE=alphaev7 ;;
+ "EV7.9 (21364A)")
+ UNAME_MACHINE=alphaev79 ;;
+ esac
+ # A Pn.n version is a patched version.
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ echo "$UNAME_MACHINE"-dec-osf"`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`"
+ # Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+ exitcode=$?
+ trap '' 0
+ exit $exitcode ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit ;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo "$UNAME_MACHINE"-unknown-amigaos
+ exit ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo "$UNAME_MACHINE"-unknown-morphos
+ exit ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit ;;
+ *:z/VM:*:*)
+ echo s390-ibm-zvmoe
+ exit ;;
+ *:OS400:*:*)
+ echo powerpc-ibm-os400
+ exit ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix"$UNAME_RELEASE"
+ exit ;;
+ arm*:riscos:*:*|arm*:RISCOS:*:*)
+ echo arm-unknown-riscos
+ exit ;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit ;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit ;;
+ DRS?6000:unix:4.0:6*)
+ echo sparc-icl-nx6
+ exit ;;
+ DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+ case `/usr/bin/uname -p` in
+ sparc) echo sparc-icl-nx7; exit ;;
+ esac ;;
+ s390x:SunOS:*:*)
+ echo "$UNAME_MACHINE"-ibm-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`"
+ exit ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
+ exit ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`"
+ exit ;;
+ i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+ echo i386-pc-auroraux"$UNAME_RELEASE"
+ exit ;;
+ i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+ eval "$set_cc_for_build"
+ SUN_ARCH=i386
+ # If there is a compiler, see if it is configured for 64-bit objects.
+ # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+ # This test works for both compilers.
+ if [ "$CC_FOR_BUILD" != no_compiler_found ]; then
+ if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ SUN_ARCH=x86_64
+ fi
+ fi
+ echo "$SUN_ARCH"-pc-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
+ exit ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
+ exit ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos"`echo "$UNAME_RELEASE"|sed -e 's/-/_/'`"
+ exit ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos"$UNAME_RELEASE"
+ exit ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos"$UNAME_RELEASE"
+ ;;
+ sun4)
+ echo sparc-sun-sunos"$UNAME_RELEASE"
+ ;;
+ esac
+ exit ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos"$UNAME_RELEASE"
+ exit ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint"$UNAME_RELEASE"
+ exit ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint"$UNAME_RELEASE"
+ exit ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint"$UNAME_RELEASE"
+ exit ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint"$UNAME_RELEASE"
+ exit ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint"$UNAME_RELEASE"
+ exit ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint"$UNAME_RELEASE"
+ exit ;;
+ m68k:machten:*:*)
+ echo m68k-apple-machten"$UNAME_RELEASE"
+ exit ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten"$UNAME_RELEASE"
+ exit ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix"$UNAME_RELEASE"
+ exit ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix"$UNAME_RELEASE"
+ exit ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix"$UNAME_RELEASE"
+ exit ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ eval "$set_cc_for_build"
+ sed 's/^ //' << EOF > "$dummy.c"
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD -o "$dummy" "$dummy.c" &&
+ dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+ SYSTEM_NAME=`"$dummy" "$dummyarg"` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo mips-mips-riscos"$UNAME_RELEASE"
+ exit ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit ;;
+ Motorola:*:4.3:PL8-*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ "$UNAME_PROCESSOR" = mc88100 ] || [ "$UNAME_PROCESSOR" = mc88110 ]
+ then
+ if [ "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx ] || \
+ [ "$TARGET_BINARY_INTERFACE"x = x ]
+ then
+ echo m88k-dg-dgux"$UNAME_RELEASE"
+ else
+ echo m88k-dg-dguxbcs"$UNAME_RELEASE"
+ fi
+ else
+ echo i586-dg-dgux"$UNAME_RELEASE"
+ fi
+ exit ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix"`echo "$UNAME_RELEASE"|sed -e 's/-/_/g'`"
+ exit ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit ;;
+ ia64:AIX:*:*)
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
+ fi
+ echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV"
+ exit ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ eval "$set_cc_for_build"
+ sed 's/^ //' << EOF > "$dummy.c"
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"`
+ then
+ echo "$SYSTEM_NAME"
+ else
+ echo rs6000-ibm-aix3.2.5
+ fi
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit ;;
+ *:AIX:*:[4567])
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/lslpp ] ; then
+ IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc |
+ awk -F: '{ print $3 }' | sed s/[0-9]*$/0/`
+ else
+ IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
+ fi
+ echo "$IBM_ARCH"-ibm-aix"$IBM_REV"
+ exit ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit ;;
+ ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*)
+ echo romp-ibm-bsd4.4
+ exit ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to
+ exit ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'`
+ case "$UNAME_MACHINE" in
+ 9000/31?) HP_ARCH=m68000 ;;
+ 9000/[34]??) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if [ -x /usr/bin/getconf ]; then
+ sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "$sc_cpu_version" in
+ 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "$sc_kernel_bits" in
+ 32) HP_ARCH=hppa2.0n ;;
+ 64) HP_ARCH=hppa2.0w ;;
+ '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if [ "$HP_ARCH" = "" ]; then
+ eval "$set_cc_for_build"
+ sed 's/^ //' << EOF > "$dummy.c"
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=`"$dummy"`
+ test -z "$HP_ARCH" && HP_ARCH=hppa
+ fi ;;
+ esac
+ if [ "$HP_ARCH" = hppa2.0w ]
+ then
+ eval "$set_cc_for_build"
+
+ # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+ # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
+ # generating 64-bit code. GNU and HP use different nomenclature:
+ #
+ # $ CC_FOR_BUILD=cc ./config.guess
+ # => hppa2.0w-hp-hpux11.23
+ # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+ # => hppa64-hp-hpux11.23
+
+ if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) |
+ grep -q __LP64__
+ then
+ HP_ARCH=hppa2.0w
+ else
+ HP_ARCH=hppa64
+ fi
+ fi
+ echo "$HP_ARCH"-hp-hpux"$HPUX_REV"
+ exit ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux"$HPUX_REV"
+ exit ;;
+ 3050*:HI-UX:*:*)
+ eval "$set_cc_for_build"
+ sed 's/^ //' << EOF > "$dummy.c"
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo unknown-hitachi-hiuxwe2
+ exit ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*)
+ echo hppa1.1-hp-bsd
+ exit ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*)
+ echo hppa1.1-hp-osf
+ exit ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit ;;
+ i*86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo "$UNAME_MACHINE"-unknown-osf1mk
+ else
+ echo "$UNAME_MACHINE"-unknown-osf1
+ fi
+ exit ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ *:UNICOS/mp:*:*)
+ echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
+ FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
+ FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ 5000:UNIX_System_V:4.*:*)
+ FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
+ FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'`
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE"
+ exit ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi"$UNAME_RELEASE"
+ exit ;;
+ *:BSD/OS:*:*)
+ echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE"
+ exit ;;
+ *:FreeBSD:*:*)
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ case "$UNAME_PROCESSOR" in
+ amd64)
+ UNAME_PROCESSOR=x86_64 ;;
+ i386)
+ UNAME_PROCESSOR=i586 ;;
+ esac
+ echo "$UNAME_PROCESSOR"-unknown-freebsd"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`"
+ exit ;;
+ i*:CYGWIN*:*)
+ echo "$UNAME_MACHINE"-pc-cygwin
+ exit ;;
+ *:MINGW64*:*)
+ echo "$UNAME_MACHINE"-pc-mingw64
+ exit ;;
+ *:MINGW*:*)
+ echo "$UNAME_MACHINE"-pc-mingw32
+ exit ;;
+ *:MSYS*:*)
+ echo "$UNAME_MACHINE"-pc-msys
+ exit ;;
+ i*:PW*:*)
+ echo "$UNAME_MACHINE"-pc-pw32
+ exit ;;
+ *:Interix*:*)
+ case "$UNAME_MACHINE" in
+ x86)
+ echo i586-pc-interix"$UNAME_RELEASE"
+ exit ;;
+ authenticamd | genuineintel | EM64T)
+ echo x86_64-unknown-interix"$UNAME_RELEASE"
+ exit ;;
+ IA64)
+ echo ia64-unknown-interix"$UNAME_RELEASE"
+ exit ;;
+ esac ;;
+ i*:UWIN*:*)
+ echo "$UNAME_MACHINE"-pc-uwin
+ exit ;;
+ amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+ echo x86_64-unknown-cygwin
+ exit ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
+ exit ;;
+ *:GNU:*:*)
+ # the GNU system
+ echo "`echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,'`-unknown-$LIBC`echo "$UNAME_RELEASE"|sed -e 's,/.*$,,'`"
+ exit ;;
+ *:GNU/*:*:*)
+ # other systems with GNU libc and userland
+ echo "$UNAME_MACHINE-unknown-`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`-$LIBC"
+ exit ;;
+ i*86:Minix:*:*)
+ echo "$UNAME_MACHINE"-pc-minix
+ exit ;;
+ aarch64:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ aarch64_be:Linux:*:*)
+ UNAME_MACHINE=aarch64_be
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep -q ld.so.1
+ if test "$?" = 0 ; then LIBC=gnulibc1 ; fi
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ arc:Linux:*:* | arceb:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ arm*:Linux:*:*)
+ eval "$set_cc_for_build"
+ if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_EABI__
+ then
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ else
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi
+ else
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf
+ fi
+ fi
+ exit ;;
+ avr32*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ cris:Linux:*:*)
+ echo "$UNAME_MACHINE"-axis-linux-"$LIBC"
+ exit ;;
+ crisv32:Linux:*:*)
+ echo "$UNAME_MACHINE"-axis-linux-"$LIBC"
+ exit ;;
+ e2k:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ frv:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ hexagon:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ i*86:Linux:*:*)
+ echo "$UNAME_MACHINE"-pc-linux-"$LIBC"
+ exit ;;
+ ia64:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ k1om:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ m32r*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ m68*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ mips:Linux:*:* | mips64:Linux:*:*)
+ eval "$set_cc_for_build"
+ sed 's/^ //' << EOF > "$dummy.c"
+ #undef CPU
+ #undef ${UNAME_MACHINE}
+ #undef ${UNAME_MACHINE}el
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=${UNAME_MACHINE}el
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=${UNAME_MACHINE}
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU'`"
+ test "x$CPU" != x && { echo "$CPU-unknown-linux-$LIBC"; exit; }
+ ;;
+ mips64el:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ openrisc*:Linux:*:*)
+ echo or1k-unknown-linux-"$LIBC"
+ exit ;;
+ or32:Linux:*:* | or1k*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ padre:Linux:*:*)
+ echo sparc-unknown-linux-"$LIBC"
+ exit ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-"$LIBC"
+ exit ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+ PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;;
+ PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;;
+ *) echo hppa-unknown-linux-"$LIBC" ;;
+ esac
+ exit ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-"$LIBC"
+ exit ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-"$LIBC"
+ exit ;;
+ ppc64le:Linux:*:*)
+ echo powerpc64le-unknown-linux-"$LIBC"
+ exit ;;
+ ppcle:Linux:*:*)
+ echo powerpcle-unknown-linux-"$LIBC"
+ exit ;;
+ riscv32:Linux:*:* | riscv64:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo "$UNAME_MACHINE"-ibm-linux-"$LIBC"
+ exit ;;
+ sh64*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ sh*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ tile*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ vax:Linux:*:*)
+ echo "$UNAME_MACHINE"-dec-linux-"$LIBC"
+ exit ;;
+ x86_64:Linux:*:*)
+ if objdump -f /bin/sh | grep -q elf32-x86-64; then
+ echo "$UNAME_MACHINE"-pc-linux-"$LIBC"x32
+ else
+ echo "$UNAME_MACHINE"-pc-linux-"$LIBC"
+ fi
+ exit ;;
+ xtensa*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION"
+ exit ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo "$UNAME_MACHINE"-pc-os2-emx
+ exit ;;
+ i*86:XTS-300:*:STOP)
+ echo "$UNAME_MACHINE"-unknown-stop
+ exit ;;
+ i*86:atheos:*:*)
+ echo "$UNAME_MACHINE"-unknown-atheos
+ exit ;;
+ i*86:syllable:*:*)
+ echo "$UNAME_MACHINE"-pc-syllable
+ exit ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
+ echo i386-unknown-lynxos"$UNAME_RELEASE"
+ exit ;;
+ i*86:*DOS:*:*)
+ echo "$UNAME_MACHINE"-pc-msdosdjgpp
+ exit ;;
+ i*86:*:4.*:*)
+ UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL"
+ else
+ echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL"
+ fi
+ exit ;;
+ i*86:*:5:[678]*)
+ # UnixWare 7.x, OpenUNIX and OpenServer 6.
+ case `/bin/uname -X | grep "^Machine"` in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}{$UNAME_VERSION}"
+ exit ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+ echo "$UNAME_MACHINE"-pc-isc"$UNAME_REL"
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL"
+ else
+ echo "$UNAME_MACHINE"-pc-sysv32
+ fi
+ exit ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i586.
+ # Note: whatever this is, it MUST be the same as what config.sub
+ # prints for the "djgpp" host, or else GDB configure will decide that
+ # this is a cross-build.
+ echo i586-pc-msdosdjgpp
+ exit ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4
+ fi
+ exit ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit ;;
+ mc68k:UNIX:SYSTEM5:3.51m)
+ echo m68k-convergent-sysv
+ exit ;;
+ M680?0:D-NIX:5.3:*)
+ echo m68k-diab-dnix
+ exit ;;
+ M68*:*:R3V[5678]*:*)
+ test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+ 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4; exit; } ;;
+ NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+ OS_REL='.3'
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos"$UNAME_RELEASE"
+ exit ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos"$UNAME_RELEASE"
+ exit ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos"$UNAME_RELEASE"
+ exit ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
+ echo powerpc-unknown-lynxos"$UNAME_RELEASE"
+ exit ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv"$UNAME_RELEASE"
+ exit ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo "$UNAME_MACHINE"-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit ;;
+ i*86:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo "$UNAME_MACHINE"-stratus-vos
+ exit ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux"$UNAME_RELEASE"
+ exit ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv"$UNAME_RELEASE"
+ else
+ echo mips-unknown-sysv"$UNAME_RELEASE"
+ fi
+ exit ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit ;;
+ BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
+ echo i586-pc-haiku
+ exit ;;
+ x86_64:Haiku:*:*)
+ echo x86_64-unknown-haiku
+ exit ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ SX-6:SUPER-UX:*:*)
+ echo sx6-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ SX-7:SUPER-UX:*:*)
+ echo sx7-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ SX-8:SUPER-UX:*:*)
+ echo sx8-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ SX-8R:SUPER-UX:*:*)
+ echo sx8r-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ SX-ACE:SUPER-UX:*:*)
+ echo sxace-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody"$UNAME_RELEASE"
+ exit ;;
+ *:Rhapsody:*:*)
+ echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE"
+ exit ;;
+ *:Darwin:*:*)
+ UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+ eval "$set_cc_for_build"
+ if test "$UNAME_PROCESSOR" = unknown ; then
+ UNAME_PROCESSOR=powerpc
+ fi
+ if test "`echo "$UNAME_RELEASE" | sed -e 's/\..*//'`" -le 10 ; then
+ if [ "$CC_FOR_BUILD" != no_compiler_found ]; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ case $UNAME_PROCESSOR in
+ i386) UNAME_PROCESSOR=x86_64 ;;
+ powerpc) UNAME_PROCESSOR=powerpc64 ;;
+ esac
+ fi
+ # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc
+ if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \
+ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_PPC >/dev/null
+ then
+ UNAME_PROCESSOR=powerpc
+ fi
+ fi
+ elif test "$UNAME_PROCESSOR" = i386 ; then
+ # Avoid executing cc on OS X 10.9, as it ships with a stub
+ # that puts up a graphical alert prompting to install
+ # developer tools. Any system running Mac OS X 10.7 or
+ # later (Darwin 11 and later) is required to have a 64-bit
+ # processor. This is not true of the ARM version of Darwin
+ # that Apple uses in portable devices.
+ UNAME_PROCESSOR=x86_64
+ fi
+ echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE"
+ exit ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=`uname -p`
+ if test "$UNAME_PROCESSOR" = x86; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE"
+ exit ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit ;;
+ NEO-*:NONSTOP_KERNEL:*:*)
+ echo neo-tandem-nsk"$UNAME_RELEASE"
+ exit ;;
+ NSE-*:NONSTOP_KERNEL:*:*)
+ echo nse-tandem-nsk"$UNAME_RELEASE"
+ exit ;;
+ NSR-*:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk"$UNAME_RELEASE"
+ exit ;;
+ NSV-*:NONSTOP_KERNEL:*:*)
+ echo nsv-tandem-nsk"$UNAME_RELEASE"
+ exit ;;
+ NSX-*:NONSTOP_KERNEL:*:*)
+ echo nsx-tandem-nsk"$UNAME_RELEASE"
+ exit ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit ;;
+ DS/*:UNIX_System_V:*:*)
+ echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE"
+ exit ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = 386; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo "$UNAME_MACHINE"-unknown-plan9
+ exit ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit ;;
+ SEI:*:*:SEIUX)
+ echo mips-sei-seiux"$UNAME_RELEASE"
+ exit ;;
+ *:DragonFly:*:*)
+ echo "$UNAME_MACHINE"-unknown-dragonfly"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`"
+ exit ;;
+ *:*VMS:*:*)
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ case "$UNAME_MACHINE" in
+ A*) echo alpha-dec-vms ; exit ;;
+ I*) echo ia64-dec-vms ; exit ;;
+ V*) echo vax-dec-vms ; exit ;;
+ esac ;;
+ *:XENIX:*:SysV)
+ echo i386-pc-xenix
+ exit ;;
+ i*86:skyos:*:*)
+ echo "$UNAME_MACHINE"-pc-skyos"`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'`"
+ exit ;;
+ i*86:rdos:*:*)
+ echo "$UNAME_MACHINE"-pc-rdos
+ exit ;;
+ i*86:AROS:*:*)
+ echo "$UNAME_MACHINE"-pc-aros
+ exit ;;
+ x86_64:VMkernel:*:*)
+ echo "$UNAME_MACHINE"-unknown-esx
+ exit ;;
+ amd64:Isilon\ OneFS:*:*)
+ echo x86_64-unknown-onefs
+ exit ;;
+esac
+
+echo "$0: unable to guess system type" >&2
+
+case "$UNAME_MACHINE:$UNAME_SYSTEM" in
+ mips:Linux | mips64:Linux)
+ # If we got here on MIPS GNU/Linux, output extra information.
+ cat >&2 <<EOF
+
+NOTE: MIPS GNU/Linux systems require a C compiler to fully recognize
+the system type. Please install a C compiler and try again.
+EOF
+ ;;
+esac
+
+cat >&2 <<EOF
+
+This script (version $timestamp), has failed to recognize the
+operating system you are using. If your script is old, overwrite *all*
+copies of config.guess and config.sub with the latest versions from:
+
+ https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
+and
+ https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
+
+If $0 has already been updated, send the following data and any
+information you think might be pertinent to config-patches@gnu.org to
+provide the necessary information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = "$UNAME_MACHINE"
+UNAME_RELEASE = "$UNAME_RELEASE"
+UNAME_SYSTEM = "$UNAME_SYSTEM"
+UNAME_VERSION = "$UNAME_VERSION"
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-functions 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/config.h.in b/config.h.in
new file mode 100644
index 00000000..1719ad2b
--- /dev/null
+++ b/config.h.in
@@ -0,0 +1,358 @@
+/* config.h.in. Generated from configure.ac by autoheader. */
+
+/* netdata dbengine usability */
+#undef ENABLE_DBENGINE
+
+/* netdata HTTPS usability */
+#undef ENABLE_HTTPS
+
+/* compile and link with jemalloc */
+#undef ENABLE_JEMALLOC
+
+/* netdata json-c usability */
+#undef ENABLE_JSONC
+
+/* Prometheus remote write API usability */
+#undef ENABLE_PROMETHEUS_REMOTE_WRITE
+
+/* compile and link with tcmalloc */
+#undef ENABLE_TCMALLOC
+
+/* Define to 1 if you have the `accept4' function. */
+#undef HAVE_ACCEPT4
+
+/* Define to 1 if you have the <arpa/nameser.h> header file. */
+#undef HAVE_ARPA_NAMESER_H
+
+/* libcap usability */
+#undef HAVE_CAPABILITY
+
+/* Define to 1 if the system has the type `clockid_t'. */
+#undef HAVE_CLOCKID_T
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#undef HAVE_CLOCK_GETTIME
+
+/* cups usability */
+#undef HAVE_CUPS
+
+/* Define to 1 if glibc mallinfo exists. */
+#undef HAVE_C_MALLINFO
+
+/* Define to 1 if glibc mallopt exists. */
+#undef HAVE_C_MALLOPT
+
+/* Define to 1 if C11-style _Generic works. */
+#undef HAVE_C__GENERIC
+
+/* Define to 1 if __atomic operations work. */
+#undef HAVE_C___ATOMIC
+
+/* Define to 1 if you have the declaration of `strerror_r', and to 0 if you
+ don't. */
+#undef HAVE_DECL_STRERROR_R
+
+/* ipmimonitoring usability */
+#undef HAVE_FREEIPMI
+
+/* Define to 1 if the system has the `format' function attribute */
+#undef HAVE_FUNC_ATTRIBUTE_FORMAT
+
+/* Define to 1 if the system has the `malloc' function attribute */
+#undef HAVE_FUNC_ATTRIBUTE_MALLOC
+
+/* Define to 1 if the system has the `noinline' function attribute */
+#undef HAVE_FUNC_ATTRIBUTE_NOINLINE
+
+/* Define to 1 if the system has the `noreturn' function attribute */
+#undef HAVE_FUNC_ATTRIBUTE_NORETURN
+
+/* Define to 1 if the system has the `returns_nonnull' function attribute */
+#undef HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL
+
+/* Define to 1 if the system has the `warn_unused_result' function attribute
+ */
+#undef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT
+
+/* Define to 1 if you have the `getpriority' function. */
+#undef HAVE_GETPRIORITY
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Define to 1 if you have the <jemalloc/jemalloc.h> header file. */
+#undef HAVE_JEMALLOC_JEMALLOC_H
+
+/* libaws-cpp-sdk-kinesis usability */
+#undef HAVE_KINESIS
+
+/* libmnl usability */
+#undef HAVE_LIBMNL
+
+/* libnetfilter_acct usability */
+#undef HAVE_LIBNETFILTER_ACCT
+
+/* libxenlight usability */
+#undef HAVE_LIBXENLIGHT
+
+/* libxenstat usability */
+#undef HAVE_LIBXENSTAT
+
+/* libyajl usability */
+#undef HAVE_LIBYAJL
+
+/* Define to 1 if -flto works. */
+#undef HAVE_LTO
+
+/* Define to 1 if you have the <memory.h> header file. */
+#undef HAVE_MEMORY_H
+
+/* libmongoc usability */
+#undef HAVE_MONGOC
+
+/* Define to 1 if you have the <netdb.h> header file. */
+#undef HAVE_NETDB_H
+
+/* Define to 1 if you have the <netinet/in.h> header file. */
+#undef HAVE_NETINET_IN_H
+
+/* Define to 1 if you have the `nice' function. */
+#undef HAVE_NICE
+
+/* Define if you have POSIX threads libraries and header files. */
+#undef HAVE_PTHREAD
+
+/* Have PTHREAD_PRIO_INHERIT. */
+#undef HAVE_PTHREAD_PRIO_INHERIT
+
+/* Define to 1 if you have the `recvmmsg' function. */
+#undef HAVE_RECVMMSG
+
+/* Define to 1 if you have the <resolv.h> header file. */
+#undef HAVE_RESOLV_H
+
+/* Define to 1 if you have the `sched_getparam' function. */
+#undef HAVE_SCHED_GETPARAM
+
+/* Define to 1 if you have the `sched_getscheduler' function. */
+#undef HAVE_SCHED_GETSCHEDULER
+
+/* Define to 1 if you have the `sched_get_priority_max' function. */
+#undef HAVE_SCHED_GET_PRIORITY_MAX
+
+/* Define to 1 if you have the `sched_get_priority_min' function. */
+#undef HAVE_SCHED_GET_PRIORITY_MIN
+
+/* Define to 1 if you have the `sched_setscheduler' function. */
+#undef HAVE_SCHED_SETSCHEDULER
+
+/* Define 1 if you have setns() function */
+#undef HAVE_SETNS
+
+/* Define to 1 if you have the `setpriority' function. */
+#undef HAVE_SETPRIORITY
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#undef HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#undef HAVE_STDLIB_H
+
+/* Define to 1 if you have the `strerror_r' function. */
+#undef HAVE_STRERROR_R
+
+/* Define to 1 if you have the <strings.h> header file. */
+#undef HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#undef HAVE_STRING_H
+
+/* Define to 1 if the system has the type `struct timespec'. */
+#undef HAVE_STRUCT_TIMESPEC
+
+/* Define to 1 if you have the <sys/mount.h> header file. */
+#undef HAVE_SYS_MOUNT_H
+
+/* Define to 1 if you have the <sys/prctl.h> header file. */
+#undef HAVE_SYS_PRCTL_H
+
+/* Define to 1 if you have the <sys/statfs.h> header file. */
+#undef HAVE_SYS_STATFS_H
+
+/* Define to 1 if you have the <sys/statvfs.h> header file. */
+#undef HAVE_SYS_STATVFS_H
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#undef HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#undef HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <sys/vfs.h> header file. */
+#undef HAVE_SYS_VFS_H
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#undef HAVE_UNISTD_H
+
+/* Define to 1 if `major', `minor', and `makedev' are declared in <mkdev.h>.
+ */
+#undef MAJOR_IN_MKDEV
+
+/* Define to 1 if `major', `minor', and `makedev' are declared in
+ <sysmacros.h>. */
+#undef MAJOR_IN_SYSMACROS
+
+/* use this user to drop privileged */
+#undef NETDATA_USER
+
+/* uuid usability */
+#undef NETDATA_WITH_UUID
+
+/* zlib usability */
+#undef NETDATA_WITH_ZLIB
+
+/* Name of package */
+#undef PACKAGE
+
+/* Define to the address where bug reports for this package should be sent. */
+#undef PACKAGE_BUGREPORT
+
+/* Define to the full name of this package. */
+#undef PACKAGE_NAME
+
+/* Define to the full name and version of this package. */
+#undef PACKAGE_STRING
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the home page for this package. */
+#undef PACKAGE_URL
+
+/* Define to the version of this package. */
+#undef PACKAGE_VERSION
+
+/* Define to necessary symbol if this constant uses a non-standard name on
+ your system. */
+#undef PTHREAD_CREATE_JOINABLE
+
+/* The size of `void *', as computed by sizeof. */
+#undef SIZEOF_VOID_P
+
+/* Define to 1 if you have the ANSI C header files. */
+#undef STDC_HEADERS
+
+/* math usability */
+#undef STORAGE_WITH_MATH
+
+/* Define to 1 if strerror_r returns char *. */
+#undef STRERROR_R_CHAR_P
+
+/* Enable extensions on AIX 3, Interix. */
+#ifndef _ALL_SOURCE
+# undef _ALL_SOURCE
+#endif
+/* Enable GNU extensions on systems that have them. */
+#ifndef _GNU_SOURCE
+# undef _GNU_SOURCE
+#endif
+/* Enable threading extensions on Solaris. */
+#ifndef _POSIX_PTHREAD_SEMANTICS
+# undef _POSIX_PTHREAD_SEMANTICS
+#endif
+/* Enable extensions on HP NonStop. */
+#ifndef _TANDEM_SOURCE
+# undef _TANDEM_SOURCE
+#endif
+/* Enable general extensions on Solaris. */
+#ifndef __EXTENSIONS__
+# undef __EXTENSIONS__
+#endif
+
+
+/* Version number of package */
+#undef VERSION
+
+/* Define to 1 if on MINIX. */
+#undef _MINIX
+
+/* Define to 2 if the system does not provide POSIX.1 features except with
+ this defined. */
+#undef _POSIX_1_SOURCE
+
+/* Define to 1 if you need to in order for `stat' and other things to work. */
+#undef _POSIX_SOURCE
+
+/* Define for Solaris 2.5.1 so the uint32_t typedef from <sys/synch.h>,
+ <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
+ #define below would cause a syntax error. */
+#undef _UINT32_T
+
+/* Define for Solaris 2.5.1 so the uint64_t typedef from <sys/synch.h>,
+ <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
+ #define below would cause a syntax error. */
+#undef _UINT64_T
+
+/* Define for Solaris 2.5.1 so the uint8_t typedef from <sys/synch.h>,
+ <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
+ #define below would cause a syntax error. */
+#undef _UINT8_T
+
+/* dummy unused attribute */
+#undef __always_unused
+
+/* dummy unused attribute */
+#undef __maybe_unused
+
+/* Link/compile against jemalloc */
+#undef has_jemalloc
+
+/* Link/compile against tcmalloc */
+#undef has_tcmalloc
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+ calls it, or to nothing if 'inline' is not supported under any name. */
+#ifndef __cplusplus
+#undef inline
+#endif
+
+/* Define to the type of a signed integer type of width exactly 16 bits if
+ such a type exists and the standard includes do not define it. */
+#undef int16_t
+
+/* Define to the type of a signed integer type of width exactly 32 bits if
+ such a type exists and the standard includes do not define it. */
+#undef int32_t
+
+/* Define to the type of a signed integer type of width exactly 64 bits if
+ such a type exists and the standard includes do not define it. */
+#undef int64_t
+
+/* Define to the type of a signed integer type of width exactly 8 bits if such
+ a type exists and the standard includes do not define it. */
+#undef int8_t
+
+/* gcc branch optimization */
+#undef likely
+
+/* jemalloc prefix */
+#undef prefix_jemalloc
+
+/* Define to the type of an unsigned integer type of width exactly 16 bits if
+ such a type exists and the standard includes do not define it. */
+#undef uint16_t
+
+/* Define to the type of an unsigned integer type of width exactly 32 bits if
+ such a type exists and the standard includes do not define it. */
+#undef uint32_t
+
+/* Define to the type of an unsigned integer type of width exactly 64 bits if
+ such a type exists and the standard includes do not define it. */
+#undef uint64_t
+
+/* Define to the type of an unsigned integer type of width exactly 8 bits if
+ such a type exists and the standard includes do not define it. */
+#undef uint8_t
+
+/* gcc branch optimization */
+#undef unlikely
diff --git a/config.sub b/config.sub
new file mode 100755
index 00000000..1d8e98bc
--- /dev/null
+++ b/config.sub
@@ -0,0 +1,1801 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+# Copyright 1992-2018 Free Software Foundation, Inc.
+
+timestamp='2018-02-22'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+
+
+# Please send patches to <config-patches@gnu.org>.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# You can get the latest version of this script from:
+# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS
+
+Canonicalize a configuration name.
+
+Options:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright 1992-2018 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help"
+ exit 1 ;;
+
+ *local*)
+ # First pass through any local machine types.
+ echo "$1"
+ exit ;;
+
+ * )
+ break ;;
+ esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+ exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+ exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo "$1" | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+ nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
+ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+ knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \
+ kopensolaris*-gnu* | cloudabi*-eabi* | \
+ storm-chaos* | os2-emx* | rtmk-nova*)
+ os=-$maybe_os
+ basic_machine=`echo "$1" | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+ ;;
+ android-linux)
+ os=-linux-android
+ basic_machine=`echo "$1" | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
+ ;;
+ *)
+ basic_machine=`echo "$1" | sed 's/-[^-]*$//'`
+ if [ "$basic_machine" != "$1" ]
+ then os=`echo "$1" | sed 's/.*-/-/'`
+ else os=; fi
+ ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work. We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+ -sun*os*)
+ # Prevent following clause from handling this invalid input.
+ ;;
+ -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+ -apple | -axis | -knuth | -cray | -microblaze*)
+ os=
+ basic_machine=$1
+ ;;
+ -bluegene*)
+ os=-cnk
+ ;;
+ -sim | -cisco | -oki | -wec | -winbond)
+ os=
+ basic_machine=$1
+ ;;
+ -scout)
+ ;;
+ -wrs)
+ os=-vxworks
+ basic_machine=$1
+ ;;
+ -chorusos*)
+ os=-chorusos
+ basic_machine=$1
+ ;;
+ -chorusrdb)
+ os=-chorusrdb
+ basic_machine=$1
+ ;;
+ -hiux*)
+ os=-hiuxwe2
+ ;;
+ -sco6)
+ os=-sco5v6
+ basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5)
+ os=-sco3.2v5
+ basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco4)
+ os=-sco3.2v4
+ basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2v[4-9]*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5v6*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco*)
+ os=-sco3.2v2
+ basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -udk*)
+ basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -isc)
+ os=-isc2.2
+ basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -clix*)
+ basic_machine=clipper-intergraph
+ ;;
+ -isc*)
+ basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -lynx*178)
+ os=-lynxos178
+ ;;
+ -lynx*5)
+ os=-lynxos5
+ ;;
+ -lynx*)
+ os=-lynxos
+ ;;
+ -ptx*)
+ basic_machine=`echo "$1" | sed -e 's/86-.*/86-sequent/'`
+ ;;
+ -psos*)
+ os=-psos
+ ;;
+ -mint | -mint[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+ 1750a | 580 \
+ | a29k \
+ | aarch64 | aarch64_be \
+ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+ | am33_2.0 \
+ | arc | arceb \
+ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
+ | avr | avr32 \
+ | ba \
+ | be32 | be64 \
+ | bfin \
+ | c4x | c8051 | clipper \
+ | d10v | d30v | dlx | dsp16xx \
+ | e2k | epiphany \
+ | fido | fr30 | frv | ft32 \
+ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | hexagon \
+ | i370 | i860 | i960 | ia16 | ia64 \
+ | ip2k | iq2000 \
+ | k1om \
+ | le32 | le64 \
+ | lm32 \
+ | m32c | m32r | m32rle | m68000 | m68k | m88k \
+ | maxq | mb | microblaze | microblazeel | mcore | mep | metag \
+ | mips | mipsbe | mipseb | mipsel | mipsle \
+ | mips16 \
+ | mips64 | mips64el \
+ | mips64octeon | mips64octeonel \
+ | mips64orion | mips64orionel \
+ | mips64r5900 | mips64r5900el \
+ | mips64vr | mips64vrel \
+ | mips64vr4100 | mips64vr4100el \
+ | mips64vr4300 | mips64vr4300el \
+ | mips64vr5000 | mips64vr5000el \
+ | mips64vr5900 | mips64vr5900el \
+ | mipsisa32 | mipsisa32el \
+ | mipsisa32r2 | mipsisa32r2el \
+ | mipsisa32r6 | mipsisa32r6el \
+ | mipsisa64 | mipsisa64el \
+ | mipsisa64r2 | mipsisa64r2el \
+ | mipsisa64r6 | mipsisa64r6el \
+ | mipsisa64sb1 | mipsisa64sb1el \
+ | mipsisa64sr71k | mipsisa64sr71kel \
+ | mipsr5900 | mipsr5900el \
+ | mipstx39 | mipstx39el \
+ | mn10200 | mn10300 \
+ | moxie \
+ | mt \
+ | msp430 \
+ | nds32 | nds32le | nds32be \
+ | nios | nios2 | nios2eb | nios2el \
+ | ns16k | ns32k \
+ | open8 | or1k | or1knd | or32 \
+ | pdp10 | pj | pjl \
+ | powerpc | powerpc64 | powerpc64le | powerpcle \
+ | pru \
+ | pyramid \
+ | riscv32 | riscv64 \
+ | rl78 | rx \
+ | score \
+ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+ | sh64 | sh64le \
+ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
+ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
+ | spu \
+ | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
+ | ubicom32 \
+ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
+ | visium \
+ | wasm32 \
+ | x86 | xc16x | xstormy16 | xtensa \
+ | z8k | z80)
+ basic_machine=$basic_machine-unknown
+ ;;
+ c54x)
+ basic_machine=tic54x-unknown
+ ;;
+ c55x)
+ basic_machine=tic55x-unknown
+ ;;
+ c6x)
+ basic_machine=tic6x-unknown
+ ;;
+ leon|leon[3-9])
+ basic_machine=sparc-$basic_machine
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip)
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65)
+ ;;
+ ms1)
+ basic_machine=mt-unknown
+ ;;
+
+ strongarm | thumb | xscale)
+ basic_machine=arm-unknown
+ ;;
+ xgate)
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ xscaleeb)
+ basic_machine=armeb-unknown
+ ;;
+
+ xscaleel)
+ basic_machine=armel-unknown
+ ;;
+
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i*86 | x86_64)
+ basic_machine=$basic_machine-pc
+ ;;
+ # Object if more than one company name word.
+ *-*-*)
+ echo Invalid configuration \`"$1"\': machine \`"$basic_machine"\' not recognized 1>&2
+ exit 1
+ ;;
+ # Recognize the basic CPU types with company name.
+ 580-* \
+ | a29k-* \
+ | aarch64-* | aarch64_be-* \
+ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
+ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
+ | avr-* | avr32-* \
+ | ba-* \
+ | be32-* | be64-* \
+ | bfin-* | bs2000-* \
+ | c[123]* | c30-* | [cjt]90-* | c4x-* \
+ | c8051-* | clipper-* | craynv-* | cydra-* \
+ | d10v-* | d30v-* | dlx-* \
+ | e2k-* | elxsi-* \
+ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
+ | h8300-* | h8500-* \
+ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+ | hexagon-* \
+ | i*86-* | i860-* | i960-* | ia16-* | ia64-* \
+ | ip2k-* | iq2000-* \
+ | k1om-* \
+ | le32-* | le64-* \
+ | lm32-* \
+ | m32c-* | m32r-* | m32rle-* \
+ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
+ | microblaze-* | microblazeel-* \
+ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+ | mips16-* \
+ | mips64-* | mips64el-* \
+ | mips64octeon-* | mips64octeonel-* \
+ | mips64orion-* | mips64orionel-* \
+ | mips64r5900-* | mips64r5900el-* \
+ | mips64vr-* | mips64vrel-* \
+ | mips64vr4100-* | mips64vr4100el-* \
+ | mips64vr4300-* | mips64vr4300el-* \
+ | mips64vr5000-* | mips64vr5000el-* \
+ | mips64vr5900-* | mips64vr5900el-* \
+ | mipsisa32-* | mipsisa32el-* \
+ | mipsisa32r2-* | mipsisa32r2el-* \
+ | mipsisa32r6-* | mipsisa32r6el-* \
+ | mipsisa64-* | mipsisa64el-* \
+ | mipsisa64r2-* | mipsisa64r2el-* \
+ | mipsisa64r6-* | mipsisa64r6el-* \
+ | mipsisa64sb1-* | mipsisa64sb1el-* \
+ | mipsisa64sr71k-* | mipsisa64sr71kel-* \
+ | mipsr5900-* | mipsr5900el-* \
+ | mipstx39-* | mipstx39el-* \
+ | mmix-* \
+ | mt-* \
+ | msp430-* \
+ | nds32-* | nds32le-* | nds32be-* \
+ | nios-* | nios2-* | nios2eb-* | nios2el-* \
+ | none-* | np1-* | ns16k-* | ns32k-* \
+ | open8-* \
+ | or1k*-* \
+ | orion-* \
+ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
+ | pru-* \
+ | pyramid-* \
+ | riscv32-* | riscv64-* \
+ | rl78-* | romp-* | rs6000-* | rx-* \
+ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
+ | sparclite-* \
+ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \
+ | tahoe-* \
+ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+ | tile*-* \
+ | tron-* \
+ | ubicom32-* \
+ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
+ | vax-* \
+ | visium-* \
+ | wasm32-* \
+ | we32k-* \
+ | x86-* | x86_64-* | xc16x-* | xps100-* \
+ | xstormy16-* | xtensa*-* \
+ | ymp-* \
+ | z8k-* | z80-*)
+ ;;
+ # Recognize the basic CPU types without company name, with glob match.
+ xtensa*)
+ basic_machine=$basic_machine-unknown
+ ;;
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 386bsd)
+ basic_machine=i386-pc
+ os=-bsd
+ ;;
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ basic_machine=m68000-att
+ ;;
+ 3b*)
+ basic_machine=we32k-att
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ abacus)
+ basic_machine=abacus-unknown
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ os=-scout
+ ;;
+ alliant | fx80)
+ basic_machine=fx80-alliant
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=-bsd
+ ;;
+ amd64)
+ basic_machine=x86_64-pc
+ ;;
+ amd64-*)
+ basic_machine=x86_64-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=-sysv
+ ;;
+ amiga | amiga-*)
+ basic_machine=m68k-unknown
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ os=-amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ os=-sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=-sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ os=-bsd
+ ;;
+ aros)
+ basic_machine=i386-pc
+ os=-aros
+ ;;
+ asmjs)
+ basic_machine=asmjs-unknown
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=-aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=-dynix
+ ;;
+ blackfin)
+ basic_machine=bfin-unknown
+ os=-linux
+ ;;
+ blackfin-*)
+ basic_machine=bfin-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ bluegene*)
+ basic_machine=powerpc-ibm
+ os=-cnk
+ ;;
+ c54x-*)
+ basic_machine=tic54x-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ c55x-*)
+ basic_machine=tic55x-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ c6x-*)
+ basic_machine=tic6x-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ c90)
+ basic_machine=c90-cray
+ os=-unicos
+ ;;
+ cegcc)
+ basic_machine=arm-unknown
+ os=-cegcc
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=-bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=-bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=-bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=-bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=-bsd
+ ;;
+ cray | j90)
+ basic_machine=j90-cray
+ os=-unicos
+ ;;
+ craynv)
+ basic_machine=craynv-cray
+ os=-unicosmp
+ ;;
+ cr16 | cr16-*)
+ basic_machine=cr16-unknown
+ os=-elf
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ ;;
+ crisv32 | crisv32-* | etraxfs*)
+ basic_machine=crisv32-axis
+ ;;
+ cris | cris-* | etrax*)
+ basic_machine=cris-axis
+ ;;
+ crx)
+ basic_machine=crx-unknown
+ os=-elf
+ ;;
+ da30 | da30-*)
+ basic_machine=m68k-da30
+ ;;
+ decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ ;;
+ decsystem10* | dec10*)
+ basic_machine=pdp10-dec
+ os=-tops10
+ ;;
+ decsystem20* | dec20*)
+ basic_machine=pdp10-dec
+ os=-tops20
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ basic_machine=m68k-motorola
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=-sysv3
+ ;;
+ dicos)
+ basic_machine=i686-pc
+ os=-dicos
+ ;;
+ djgpp)
+ basic_machine=i586-pc
+ os=-msdosdjgpp
+ ;;
+ dpx20 | dpx20-*)
+ basic_machine=rs6000-bull
+ os=-bosx
+ ;;
+ dpx2*)
+ basic_machine=m68k-bull
+ os=-sysv3
+ ;;
+ e500v[12])
+ basic_machine=powerpc-unknown
+ os=$os"spe"
+ ;;
+ e500v[12]-*)
+ basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ os=$os"spe"
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=-ebmon
+ ;;
+ elxsi)
+ basic_machine=elxsi-elxsi
+ os=-bsd
+ ;;
+ encore | umax | mmax)
+ basic_machine=ns32k-encore
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ os=-ose
+ ;;
+ fx2800)
+ basic_machine=i860-alliant
+ ;;
+ genix)
+ basic_machine=ns32k-ns
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=-sysv
+ ;;
+ go32)
+ basic_machine=i386-pc
+ os=-go32
+ ;;
+ h3050r* | hiux*)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=-hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ os=-xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ os=-hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=-sysv3
+ ;;
+ hp300-*)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=-bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=-hpux
+ ;;
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ basic_machine=m68000-hp
+ ;;
+ hp9k3[2-9][0-9])
+ basic_machine=m68k-hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k78[0-9] | hp78[0-9])
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ os=-osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ os=-proelf
+ ;;
+ i370-ibm* | ibm*)
+ basic_machine=i370-ibm
+ ;;
+ i*86v32)
+ basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'`
+ os=-sysv32
+ ;;
+ i*86v4*)
+ basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'`
+ os=-sysv4
+ ;;
+ i*86v)
+ basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'`
+ os=-sysv
+ ;;
+ i*86sol2)
+ basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'`
+ os=-solaris2
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ os=-mach
+ ;;
+ vsta)
+ basic_machine=i386-unknown
+ os=-vsta
+ ;;
+ iris | iris4d)
+ basic_machine=mips-sgi
+ case $os in
+ -irix*)
+ ;;
+ *)
+ os=-irix4
+ ;;
+ esac
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=-sysv
+ ;;
+ leon-*|leon[3-9]-*)
+ basic_machine=sparc-`echo "$basic_machine" | sed 's/-.*//'`
+ ;;
+ m68knommu)
+ basic_machine=m68k-unknown
+ os=-linux
+ ;;
+ m68knommu-*)
+ basic_machine=m68k-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=-sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=-sysv
+ ;;
+ microblaze*)
+ basic_machine=microblaze-xilinx
+ ;;
+ mingw64)
+ basic_machine=x86_64-pc
+ os=-mingw64
+ ;;
+ mingw32)
+ basic_machine=i686-pc
+ os=-mingw32
+ ;;
+ mingw32ce)
+ basic_machine=arm-unknown
+ os=-mingw32ce
+ ;;
+ miniframe)
+ basic_machine=m68000-convergent
+ ;;
+ *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+ mips3*-*)
+ basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'`
+ ;;
+ mips3*)
+ basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'`-unknown
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ morphos)
+ basic_machine=powerpc-unknown
+ os=-morphos
+ ;;
+ moxiebox)
+ basic_machine=moxie-unknown
+ os=-moxiebox
+ ;;
+ msdos)
+ basic_machine=i386-pc
+ os=-msdos
+ ;;
+ ms1-*)
+ basic_machine=`echo "$basic_machine" | sed -e 's/ms1-/mt-/'`
+ ;;
+ msys)
+ basic_machine=i686-pc
+ os=-msys
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ os=-mvs
+ ;;
+ nacl)
+ basic_machine=le32-unknown
+ os=-nacl
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=-sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-unknown
+ os=-netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ os=-linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=-newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=-newsos
+ ;;
+ news-3600 | risc-news)
+ basic_machine=mips-sony
+ os=-newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ os=-sysv
+ ;;
+ next | m*-next)
+ basic_machine=m68k-next
+ case $os in
+ -nextstep* )
+ ;;
+ -ns2*)
+ os=-nextstep2
+ ;;
+ *)
+ os=-nextstep3
+ ;;
+ esac
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=-cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=-cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=-nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ os=-mon960
+ ;;
+ nonstopux)
+ basic_machine=mips-compaq
+ os=-nonstopux
+ ;;
+ np1)
+ basic_machine=np1-gould
+ ;;
+ neo-tandem)
+ basic_machine=neo-tandem
+ ;;
+ nse-tandem)
+ basic_machine=nse-tandem
+ ;;
+ nsr-tandem)
+ basic_machine=nsr-tandem
+ ;;
+ nsv-tandem)
+ basic_machine=nsv-tandem
+ ;;
+ nsx-tandem)
+ basic_machine=nsx-tandem
+ ;;
+ op50n-* | op60c-*)
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ openrisc | openrisc-*)
+ basic_machine=or32-unknown
+ ;;
+ os400)
+ basic_machine=powerpc-ibm
+ os=-os400
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ os=-ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ os=-os68k
+ ;;
+ pa-hitachi)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=-osf
+ ;;
+ parisc)
+ basic_machine=hppa-unknown
+ os=-linux
+ ;;
+ parisc-*)
+ basic_machine=hppa-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ pbd)
+ basic_machine=sparc-tti
+ ;;
+ pbb)
+ basic_machine=m68k-tti
+ ;;
+ pc532 | pc532-*)
+ basic_machine=ns32k-pc532
+ ;;
+ pc98)
+ basic_machine=i386-pc
+ ;;
+ pc98-*)
+ basic_machine=i386-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ pentium | p5 | k5 | k6 | nexgen | viac3)
+ basic_machine=i586-pc
+ ;;
+ pentiumpro | p6 | 6x86 | athlon | athlon_*)
+ basic_machine=i686-pc
+ ;;
+ pentiumii | pentium2 | pentiumiii | pentium3)
+ basic_machine=i686-pc
+ ;;
+ pentium4)
+ basic_machine=i786-pc
+ ;;
+ pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+ basic_machine=i586-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ pentiumpro-* | p6-* | 6x86-* | athlon-*)
+ basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
+ basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ pentium4-*)
+ basic_machine=i786-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ pn)
+ basic_machine=pn-gould
+ ;;
+ power) basic_machine=power-ibm
+ ;;
+ ppc | ppcbe) basic_machine=powerpc-unknown
+ ;;
+ ppc-* | ppcbe-*)
+ basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ ppcle | powerpclittle)
+ basic_machine=powerpcle-unknown
+ ;;
+ ppcle-* | powerpclittle-*)
+ basic_machine=powerpcle-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ ppc64) basic_machine=powerpc64-unknown
+ ;;
+ ppc64-*) basic_machine=powerpc64-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ ppc64le | powerpc64little)
+ basic_machine=powerpc64le-unknown
+ ;;
+ ppc64le-* | powerpc64little-*)
+ basic_machine=powerpc64le-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ ps2)
+ basic_machine=i386-ibm
+ ;;
+ pw32)
+ basic_machine=i586-unknown
+ os=-pw32
+ ;;
+ rdos | rdos64)
+ basic_machine=x86_64-pc
+ os=-rdos
+ ;;
+ rdos32)
+ basic_machine=i386-pc
+ os=-rdos
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ rm[46]00)
+ basic_machine=mips-siemens
+ ;;
+ rtpc | rtpc-*)
+ basic_machine=romp-ibm
+ ;;
+ s390 | s390-*)
+ basic_machine=s390-ibm
+ ;;
+ s390x | s390x-*)
+ basic_machine=s390x-ibm
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ sb1)
+ basic_machine=mipsisa64sb1-unknown
+ ;;
+ sb1el)
+ basic_machine=mipsisa64sb1el-unknown
+ ;;
+ sde)
+ basic_machine=mipsisa32-sde
+ os=-elf
+ ;;
+ sei)
+ basic_machine=mips-sei
+ os=-seiux
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ ;;
+ sh5el)
+ basic_machine=sh5le-unknown
+ ;;
+ simso-wrs)
+ basic_machine=sparclite-wrs
+ os=-vxworks
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=-sysv2
+ ;;
+ spur)
+ basic_machine=spur-unknown
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ os=-sysv4
+ ;;
+ strongarm-* | thumb-*)
+ basic_machine=arm-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=-sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=-sunos4
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=-sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=-sunos4
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=-sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=-sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=-solaris2
+ ;;
+ sun3 | sun3-*)
+ basic_machine=m68k-sun
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ os=-unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=-dynix
+ ;;
+ t3e)
+ basic_machine=alphaev5-cray
+ os=-unicos
+ ;;
+ t90)
+ basic_machine=t90-cray
+ os=-unicos
+ ;;
+ tile*)
+ basic_machine=$basic_machine-unknown
+ os=-linux-gnu
+ ;;
+ tx39)
+ basic_machine=mipstx39-unknown
+ ;;
+ tx39el)
+ basic_machine=mipstx39el-unknown
+ ;;
+ toad1)
+ basic_machine=pdp10-xkl
+ os=-tops20
+ ;;
+ tower | tower-32)
+ basic_machine=m68k-ncr
+ ;;
+ tpf)
+ basic_machine=s390x-ibm
+ os=-tpf
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=-sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ os=-none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=-sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=-vms
+ ;;
+ vpp*|vx|vx-*)
+ basic_machine=f301-fujitsu
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=-vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=-vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=-vxworks
+ ;;
+ w65*)
+ basic_machine=w65-wdc
+ os=-none
+ ;;
+ w89k-*)
+ basic_machine=hppa1.1-winbond
+ os=-proelf
+ ;;
+ x64)
+ basic_machine=x86_64-pc
+ ;;
+ xbox)
+ basic_machine=i686-pc
+ os=-mingw32
+ ;;
+ xps | xps100)
+ basic_machine=xps100-honeywell
+ ;;
+ xscale-* | xscalee[bl]-*)
+ basic_machine=`echo "$basic_machine" | sed 's/^xscale/arm/'`
+ ;;
+ ymp)
+ basic_machine=ymp-cray
+ os=-unicos
+ ;;
+ none)
+ basic_machine=none-none
+ os=-none
+ ;;
+
+# Here we handle the default manufacturer of certain CPU types. It is in
+# some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ basic_machine=hppa1.1-winbond
+ ;;
+ op50n)
+ basic_machine=hppa1.1-oki
+ ;;
+ op60c)
+ basic_machine=hppa1.1-oki
+ ;;
+ romp)
+ basic_machine=romp-ibm
+ ;;
+ mmix)
+ basic_machine=mmix-knuth
+ ;;
+ rs6000)
+ basic_machine=rs6000-ibm
+ ;;
+ vax)
+ basic_machine=vax-dec
+ ;;
+ pdp11)
+ basic_machine=pdp11-dec
+ ;;
+ we32k)
+ basic_machine=we32k-att
+ ;;
+ sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
+ basic_machine=sh-unknown
+ ;;
+ cydra)
+ basic_machine=cydra-cydrome
+ ;;
+ orion)
+ basic_machine=orion-highlevel
+ ;;
+ orion105)
+ basic_machine=clipper-highlevel
+ ;;
+ mac | mpw | mac-mpw)
+ basic_machine=m68k-apple
+ ;;
+ pmac | pmac-mpw)
+ basic_machine=powerpc-apple
+ ;;
+ *-unknown)
+ # Make sure to match an already-canonicalized machine name.
+ ;;
+ *)
+ echo Invalid configuration \`"$1"\': machine \`"$basic_machine"\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+ *-digital*)
+ basic_machine=`echo "$basic_machine" | sed 's/digital.*/dec/'`
+ ;;
+ *-commodore*)
+ basic_machine=`echo "$basic_machine" | sed 's/commodore.*/cbm/'`
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+ # First match some system type aliases that might get confused
+ # with valid system types.
+ # -solaris* is a basic system type, with this one exception.
+ -auroraux)
+ os=-auroraux
+ ;;
+ -solaris1 | -solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ ;;
+ -solaris)
+ os=-solaris2
+ ;;
+ -unixware*)
+ os=-sysv4.2uw
+ ;;
+ -gnu/linux*)
+ os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+ ;;
+ # es1800 is here to avoid being matched by es* (a different OS)
+ -es1800*)
+ os=-ose
+ ;;
+ # Now accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST end in a * to match a version number.
+ # -sysv* is not here because it comes later, after sysvr4.
+ -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
+ | -sym* | -kopensolaris* | -plan9* \
+ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+ | -aos* | -aros* | -cloudabi* | -sortix* \
+ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+ | -hiux* | -knetbsd* | -mirbsd* | -netbsd* \
+ | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \
+ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
+ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+ | -chorusos* | -chorusrdb* | -cegcc* | -glidix* \
+ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+ | -linux-newlib* | -linux-musl* | -linux-uclibc* \
+ | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \
+ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* \
+ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+ | -morphos* | -superux* | -rtmk* | -windiss* \
+ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \
+ | -onefs* | -tirtos* | -phoenix* | -fuchsia* | -redox* | -bme* \
+ | -midnightbsd*)
+ # Remember, each alternative MUST END IN *, to match a version number.
+ ;;
+ -qnx*)
+ case $basic_machine in
+ x86-* | i*86-*)
+ ;;
+ *)
+ os=-nto$os
+ ;;
+ esac
+ ;;
+ -nto-qnx*)
+ ;;
+ -nto*)
+ os=`echo $os | sed -e 's|nto|nto-qnx|'`
+ ;;
+ -sim | -xray | -os68k* | -v88r* \
+ | -windows* | -osx | -abug | -netware* | -os9* \
+ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+ ;;
+ -mac*)
+ os=`echo "$os" | sed -e 's|mac|macos|'`
+ ;;
+ -linux-dietlibc)
+ os=-linux-dietlibc
+ ;;
+ -linux*)
+ os=`echo $os | sed -e 's|linux|linux-gnu|'`
+ ;;
+ -sunos5*)
+ os=`echo "$os" | sed -e 's|sunos5|solaris2|'`
+ ;;
+ -sunos6*)
+ os=`echo "$os" | sed -e 's|sunos6|solaris3|'`
+ ;;
+ -opened*)
+ os=-openedition
+ ;;
+ -os400*)
+ os=-os400
+ ;;
+ -wince*)
+ os=-wince
+ ;;
+ -utek*)
+ os=-bsd
+ ;;
+ -dynix*)
+ os=-bsd
+ ;;
+ -acis*)
+ os=-aos
+ ;;
+ -atheos*)
+ os=-atheos
+ ;;
+ -syllable*)
+ os=-syllable
+ ;;
+ -386bsd)
+ os=-bsd
+ ;;
+ -ctix* | -uts*)
+ os=-sysv
+ ;;
+ -nova*)
+ os=-rtmk-nova
+ ;;
+ -ns2)
+ os=-nextstep2
+ ;;
+ -nsk*)
+ os=-nsk
+ ;;
+ # Preserve the version number of sinix5.
+ -sinix5.*)
+ os=`echo $os | sed -e 's|sinix|sysv|'`
+ ;;
+ -sinix*)
+ os=-sysv4
+ ;;
+ -tpf*)
+ os=-tpf
+ ;;
+ -triton*)
+ os=-sysv3
+ ;;
+ -oss*)
+ os=-sysv3
+ ;;
+ -svr4*)
+ os=-sysv4
+ ;;
+ -svr3)
+ os=-sysv3
+ ;;
+ -sysvr4)
+ os=-sysv4
+ ;;
+ # This must come after -sysvr4.
+ -sysv*)
+ ;;
+ -ose*)
+ os=-ose
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ os=-mint
+ ;;
+ -zvmoe)
+ os=-zvmoe
+ ;;
+ -dicos*)
+ os=-dicos
+ ;;
+ -pikeos*)
+ # Until real need of OS specific support for
+ # particular features comes up, bare metal
+ # configurations are quite functional.
+ case $basic_machine in
+ arm*)
+ os=-eabi
+ ;;
+ *)
+ os=-elf
+ ;;
+ esac
+ ;;
+ -nacl*)
+ ;;
+ -ios)
+ ;;
+ -none)
+ ;;
+ *)
+ # Get rid of the `-' at the beginning of $os.
+ os=`echo $os | sed 's/[^-]*-//'`
+ echo Invalid configuration \`"$1"\': system \`"$os"\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+ score-*)
+ os=-elf
+ ;;
+ spu-*)
+ os=-elf
+ ;;
+ *-acorn)
+ os=-riscix1.2
+ ;;
+ arm*-rebel)
+ os=-linux
+ ;;
+ arm*-semi)
+ os=-aout
+ ;;
+ c4x-* | tic4x-*)
+ os=-coff
+ ;;
+ c8051-*)
+ os=-elf
+ ;;
+ hexagon-*)
+ os=-elf
+ ;;
+ tic54x-*)
+ os=-coff
+ ;;
+ tic55x-*)
+ os=-coff
+ ;;
+ tic6x-*)
+ os=-coff
+ ;;
+ # This must come before the *-dec entry.
+ pdp10-*)
+ os=-tops20
+ ;;
+ pdp11-*)
+ os=-none
+ ;;
+ *-dec | vax-*)
+ os=-ultrix4.2
+ ;;
+ m68*-apollo)
+ os=-domain
+ ;;
+ i386-sun)
+ os=-sunos4.0.2
+ ;;
+ m68000-sun)
+ os=-sunos3
+ ;;
+ m68*-cisco)
+ os=-aout
+ ;;
+ mep-*)
+ os=-elf
+ ;;
+ mips*-cisco)
+ os=-elf
+ ;;
+ mips*-*)
+ os=-elf
+ ;;
+ or32-*)
+ os=-coff
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=-sysv3
+ ;;
+ sparc-* | *-sun)
+ os=-sunos4.1.1
+ ;;
+ pru-*)
+ os=-elf
+ ;;
+ *-be)
+ os=-beos
+ ;;
+ *-ibm)
+ os=-aix
+ ;;
+ *-knuth)
+ os=-mmixware
+ ;;
+ *-wec)
+ os=-proelf
+ ;;
+ *-winbond)
+ os=-proelf
+ ;;
+ *-oki)
+ os=-proelf
+ ;;
+ *-hp)
+ os=-hpux
+ ;;
+ *-hitachi)
+ os=-hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=-sysv
+ ;;
+ *-cbm)
+ os=-amigaos
+ ;;
+ *-dg)
+ os=-dgux
+ ;;
+ *-dolphin)
+ os=-sysv3
+ ;;
+ m68k-ccur)
+ os=-rtu
+ ;;
+ m88k-omron*)
+ os=-luna
+ ;;
+ *-next)
+ os=-nextstep
+ ;;
+ *-sequent)
+ os=-ptx
+ ;;
+ *-crds)
+ os=-unos
+ ;;
+ *-ns)
+ os=-genix
+ ;;
+ i370-*)
+ os=-mvs
+ ;;
+ *-gould)
+ os=-sysv
+ ;;
+ *-highlevel)
+ os=-bsd
+ ;;
+ *-encore)
+ os=-bsd
+ ;;
+ *-sgi)
+ os=-irix
+ ;;
+ *-siemens)
+ os=-sysv4
+ ;;
+ *-masscomp)
+ os=-rtu
+ ;;
+ f30[01]-fujitsu | f700-fujitsu)
+ os=-uxpv
+ ;;
+ *-rom68k)
+ os=-coff
+ ;;
+ *-*bug)
+ os=-coff
+ ;;
+ *-apple)
+ os=-macos
+ ;;
+ *-atari*)
+ os=-mint
+ ;;
+ *)
+ os=-none
+ ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+ *-unknown)
+ case $os in
+ -riscix*)
+ vendor=acorn
+ ;;
+ -sunos*)
+ vendor=sun
+ ;;
+ -cnk*|-aix*)
+ vendor=ibm
+ ;;
+ -beos*)
+ vendor=be
+ ;;
+ -hpux*)
+ vendor=hp
+ ;;
+ -mpeix*)
+ vendor=hp
+ ;;
+ -hiux*)
+ vendor=hitachi
+ ;;
+ -unos*)
+ vendor=crds
+ ;;
+ -dgux*)
+ vendor=dg
+ ;;
+ -luna*)
+ vendor=omron
+ ;;
+ -genix*)
+ vendor=ns
+ ;;
+ -mvs* | -opened*)
+ vendor=ibm
+ ;;
+ -os400*)
+ vendor=ibm
+ ;;
+ -ptx*)
+ vendor=sequent
+ ;;
+ -tpf*)
+ vendor=ibm
+ ;;
+ -vxsim* | -vxworks* | -windiss*)
+ vendor=wrs
+ ;;
+ -aux*)
+ vendor=apple
+ ;;
+ -hms*)
+ vendor=hitachi
+ ;;
+ -mpw* | -macos*)
+ vendor=apple
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ vendor=atari
+ ;;
+ -vos*)
+ vendor=stratus
+ ;;
+ esac
+ basic_machine=`echo "$basic_machine" | sed "s/unknown/$vendor/"`
+ ;;
+esac
+
+echo "$basic_machine$os"
+exit
+
+# Local variables:
+# eval: (add-hook 'write-file-functions 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/configure b/configure
new file mode 100755
index 00000000..3fd60cd0
--- /dev/null
+++ b/configure
@@ -0,0 +1,12468 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.69 for netdata v1.17.0.
+#
+#
+# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
+#
+#
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='print -r --'
+ as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in #(
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there. '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+# Use a proper internal environment variable to ensure we don't fall
+ # into an infinite loop, continuously re-executing ourselves.
+ if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
+ _as_can_reexec=no; export _as_can_reexec;
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+as_fn_exit 255
+ fi
+ # We don't want this to propagate to other subprocesses.
+ { _as_can_reexec=; unset _as_can_reexec;}
+if test "x$CONFIG_SHELL" = x; then
+ as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '\${1+\"\$@\"}'='\"\$@\"'
+ setopt NO_GLOB_SUBST
+else
+ case \`(set -o) 2>/dev/null\` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+"
+ as_required="as_fn_return () { (exit \$1); }
+as_fn_success () { as_fn_return 0; }
+as_fn_failure () { as_fn_return 1; }
+as_fn_ret_success () { return 0; }
+as_fn_ret_failure () { return 1; }
+
+exitcode=0
+as_fn_success || { exitcode=1; echo as_fn_success failed.; }
+as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
+as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
+as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
+if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
+
+else
+ exitcode=1; echo positional parameters were not saved.
+fi
+test x\$exitcode = x0 || exit 1
+test -x / || exit 1"
+ as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
+ as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
+ eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
+ test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
+test \$(( 1 + 1 )) = 2 || exit 1"
+ if (eval "$as_required") 2>/dev/null; then :
+ as_have_required=yes
+else
+ as_have_required=no
+fi
+ if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
+
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_found=false
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ as_found=:
+ case $as_dir in #(
+ /*)
+ for as_base in sh bash ksh sh5; do
+ # Try only shells that exist, to save several forks.
+ as_shell=$as_dir/$as_base
+ if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
+ CONFIG_SHELL=$as_shell as_have_required=yes
+ if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
+ break 2
+fi
+fi
+ done;;
+ esac
+ as_found=false
+done
+$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
+ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
+ CONFIG_SHELL=$SHELL as_have_required=yes
+fi; }
+IFS=$as_save_IFS
+
+
+ if test "x$CONFIG_SHELL" != x; then :
+ export CONFIG_SHELL
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
+fi
+
+ if test x$as_have_required = xno; then :
+ $as_echo "$0: This script requires a shell more modern than all"
+ $as_echo "$0: the shells that I found on your system."
+ if test x${ZSH_VERSION+set} = xset ; then
+ $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
+ $as_echo "$0: be upgraded to zsh 4.3.4 or later."
+ else
+ $as_echo "$0: Please tell bug-autoconf@gnu.org about your system,
+$0: including any error possibly output before this
+$0: message. Then install a modern shell, or manually run
+$0: the script under such a shell if you do have one."
+ fi
+ exit 1
+fi
+fi
+fi
+SHELL=${CONFIG_SHELL-/bin/sh}
+export SHELL
+# Unset more variables known to interfere with behavior of common tools.
+CLICOLOR_FORCE= GREP_OPTIONS=
+unset CLICOLOR_FORCE GREP_OPTIONS
+
+## --------------------- ##
+## M4sh Shell Functions. ##
+## --------------------- ##
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+ fi
+ $as_echo "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+
+ as_lineno_1=$LINENO as_lineno_1a=$LINENO
+ as_lineno_2=$LINENO as_lineno_2a=$LINENO
+ eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
+ test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
+ # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+
+ # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
+ # already done that, so ensure we don't try to do so again and fall
+ # in an infinite loop. This has already happened in practice.
+ _as_can_reexec=no; export _as_can_reexec
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+ case `echo 'xy\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -pR'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -pR'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -pR'
+ fi
+else
+ as_ln_s='cp -pR'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p='mkdir -p "$as_dir"'
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+test -n "$DJDIR" || exec 7<&0 </dev/null
+exec 6>&1
+
+# Name of the host.
+# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_clean_files=
+ac_config_libobj_dir=.
+LIBOBJS=
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+
+# Identity of this package.
+PACKAGE_NAME='netdata'
+PACKAGE_TARNAME='netdata'
+PACKAGE_VERSION='v1.17.0'
+PACKAGE_STRING='netdata v1.17.0'
+PACKAGE_BUGREPORT=''
+PACKAGE_URL=''
+
+ac_unique_file="daemon/main.c"
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stdio.h>
+#ifdef HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#ifdef STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+# endif
+#endif
+#ifdef HAVE_STRING_H
+# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
+# include <memory.h>
+# endif
+# include <string.h>
+#endif
+#ifdef HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#ifdef HAVE_STDINT_H
+# include <stdint.h>
+#endif
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+ac_header_list=
+ac_func_list=
+ac_subst_vars='am__EXEEXT_FALSE
+am__EXEEXT_TRUE
+LTLIBOBJS
+LIBOBJS
+OPTIONAL_MONGOC_LIBS
+OPTIONAL_MONGOC_CFLAGS
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS
+OPTIONAL_KINESIS_LIBS
+OPTIONAL_KINESIS_CFLAGS
+OPTIONAL_XENSTAT_LIBS
+OPTIONAL_XENSTAT_CFLAGS
+OPTIONAL_CUPS_LIBS
+OPTIONAL_CUPS_CFLAGS
+OPTIONAL_IPMIMONITORING_LIBS
+OPTIONAL_IPMIMONITORING_CFLAGS
+OPTIONAL_LIBCAP_LIBS
+OPTIONAL_LIBCAP_CFLAGS
+OPTIONAL_UUID_LIBS
+OPTIONAL_UUID_CFLAGS
+OPTIONAL_ZLIB_LIBS
+OPTIONAL_ZLIB_CFLAGS
+OPTIONAL_NFACCT_LIBS
+OPTIONAL_NFACCT_CFLAGS
+OPTIONAL_JSONC_LIBS
+OPTIONAL_SSL_LIBS
+OPTIONAL_JUDY_LIBS
+OPTIONAL_LZ4_LIBS
+OPTIONAL_UV_LIBS
+OPTIONAL_MATH_LIBS
+OPTIONAL_MATH_CFLAGS
+webdir
+pluginsdir
+logdir
+libconfigdir
+configdir
+pythondir
+nodedir
+chartsdir
+cachedir
+registrydir
+varlibdir
+build_target
+ENABLE_CXX_LINKER_FALSE
+ENABLE_CXX_LINKER_TRUE
+ENABLE_PLUGIN_CGROUP_NETWORK_FALSE
+ENABLE_PLUGIN_CGROUP_NETWORK_TRUE
+ENABLE_BACKEND_MONGODB_FALSE
+ENABLE_BACKEND_MONGODB_TRUE
+LIBMONGOC_LIBS
+LIBMONGOC_CFLAGS
+ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_FALSE
+ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE
+CXX_BINARY
+PROTOC
+PROTOBUF_LIBS
+PROTOBUF_CFLAGS
+ENABLE_BACKEND_KINESIS_FALSE
+ENABLE_BACKEND_KINESIS_TRUE
+LIBCURL_LIBS
+LIBCURL_CFLAGS
+LIBSSL_LIBS
+LIBSSL_CFLAGS
+LIBCRYPTO_LIBS
+LIBCRYPTO_CFLAGS
+ENABLE_PLUGIN_PERF_FALSE
+ENABLE_PLUGIN_PERF_TRUE
+ENABLE_PLUGIN_XENSTAT_FALSE
+ENABLE_PLUGIN_XENSTAT_TRUE
+XENLIGHT_LIBS
+XENLIGHT_CFLAGS
+YAJL_LIBS
+YAJL_CFLAGS
+ENABLE_PLUGIN_NFACCT_FALSE
+ENABLE_PLUGIN_NFACCT_TRUE
+LIBMNL_LIBS
+LIBMNL_CFLAGS
+NFACCT_LIBS
+NFACCT_CFLAGS
+ENABLE_PLUGIN_CUPS_FALSE
+ENABLE_PLUGIN_CUPS_TRUE
+CUPSCONFIG
+ENABLE_PLUGIN_FREEIPMI_FALSE
+ENABLE_PLUGIN_FREEIPMI_TRUE
+IPMIMONITORING_LIBS
+IPMIMONITORING_CFLAGS
+ENABLE_PLUGIN_APPS_FALSE
+ENABLE_PLUGIN_APPS_TRUE
+ENABLE_CAPABILITY_FALSE
+ENABLE_CAPABILITY_TRUE
+LIBCAP_LIBS
+LIBCAP_CFLAGS
+has_tcmalloc
+has_jemalloc
+SSE_CANDIDATE
+ENABLE_JSONC_FALSE
+ENABLE_JSONC_TRUE
+ENABLE_HTTPS_FALSE
+ENABLE_HTTPS_TRUE
+ENABLE_DBENGINE_FALSE
+ENABLE_DBENGINE_TRUE
+JSON_LIBS
+JSON_CFLAGS
+UUID_LIBS
+UUID_CFLAGS
+ZLIB_LIBS
+ZLIB_CFLAGS
+MATH_LIBS
+MATH_CFLAGS
+PTHREAD_CFLAGS
+PTHREAD_LIBS
+PTHREAD_CC
+ax_pthread_config
+LINUX_FALSE
+LINUX_TRUE
+MACOS_FALSE
+MACOS_TRUE
+FREEBSD_FALSE
+FREEBSD_TRUE
+EGREP
+GREP
+CPP
+PKG_CONFIG_LIBDIR
+PKG_CONFIG_PATH
+PKG_CONFIG
+am__fastdepCXX_FALSE
+am__fastdepCXX_TRUE
+CXXDEPMODE
+ac_ct_CXX
+CXXFLAGS
+CXX
+am__fastdepCC_FALSE
+am__fastdepCC_TRUE
+CCDEPMODE
+am__nodep
+AMDEPBACKSLASH
+AMDEP_FALSE
+AMDEP_TRUE
+am__quote
+am__include
+DEPDIR
+OBJEXT
+EXEEXT
+ac_ct_CC
+CPPFLAGS
+LDFLAGS
+CFLAGS
+CC
+host_os
+host_vendor
+host_cpu
+host
+build_os
+build_vendor
+build_cpu
+build
+AM_BACKSLASH
+AM_DEFAULT_VERBOSITY
+AM_DEFAULT_V
+AM_V
+am__untar
+am__tar
+AMTAR
+am__leading_dot
+SET_MAKE
+AWK
+mkdir_p
+MKDIR_P
+INSTALL_STRIP_PROGRAM
+STRIP
+install_sh
+MAKEINFO
+AUTOHEADER
+AUTOMAKE
+AUTOCONF
+ACLOCAL
+VERSION
+PACKAGE
+CYGPATH_W
+am__isrc
+INSTALL_DATA
+INSTALL_SCRIPT
+INSTALL_PROGRAM
+PACKAGE_RPM_VERSION
+MAINT
+MAINTAINER_MODE_FALSE
+MAINTAINER_MODE_TRUE
+target_alias
+host_alias
+build_alias
+LIBS
+ECHO_T
+ECHO_N
+ECHO_C
+DEFS
+mandir
+localedir
+libdir
+psdir
+pdfdir
+dvidir
+htmldir
+infodir
+docdir
+oldincludedir
+includedir
+runstatedir
+localstatedir
+sharedstatedir
+sysconfdir
+datadir
+datarootdir
+libexecdir
+sbindir
+bindir
+program_transform_name
+prefix
+exec_prefix
+PACKAGE_URL
+PACKAGE_BUGREPORT
+PACKAGE_STRING
+PACKAGE_VERSION
+PACKAGE_TARNAME
+PACKAGE_NAME
+PATH_SEPARATOR
+SHELL'
+ac_subst_files=''
+ac_user_opts='
+enable_option_checking
+enable_maintainer_mode
+enable_silent_rules
+enable_dependency_tracking
+enable_plugin_nfacct
+enable_plugin_freeipmi
+enable_plugin_cups
+enable_plugin_xenstat
+enable_backend_kinesis
+enable_backend_prometheus_remote_write
+enable_backend_mongodb
+enable_pedantic
+enable_accept4
+with_webdir
+with_libcap
+with_zlib
+with_math
+with_user
+enable_x86_sse
+enable_lto
+enable_https
+enable_dbengine
+enable_jsonc
+with_jemalloc_prefix
+with_jemalloc
+with_tcmalloc_lib
+with_tcmalloc
+with_cups_config
+'
+ ac_precious_vars='build_alias
+host_alias
+target_alias
+CC
+CFLAGS
+LDFLAGS
+LIBS
+CPPFLAGS
+CXX
+CXXFLAGS
+CCC
+PKG_CONFIG
+PKG_CONFIG_PATH
+PKG_CONFIG_LIBDIR
+CPP
+MATH_CFLAGS
+MATH_LIBS
+ZLIB_CFLAGS
+ZLIB_LIBS
+UUID_CFLAGS
+UUID_LIBS
+JSON_CFLAGS
+JSON_LIBS
+SSE_CANDIDATE
+LIBCAP_CFLAGS
+LIBCAP_LIBS
+IPMIMONITORING_CFLAGS
+IPMIMONITORING_LIBS
+NFACCT_CFLAGS
+NFACCT_LIBS
+LIBMNL_CFLAGS
+LIBMNL_LIBS
+YAJL_CFLAGS
+YAJL_LIBS
+XENLIGHT_CFLAGS
+XENLIGHT_LIBS
+LIBCRYPTO_CFLAGS
+LIBCRYPTO_LIBS
+LIBSSL_CFLAGS
+LIBSSL_LIBS
+LIBCURL_CFLAGS
+LIBCURL_LIBS
+PROTOBUF_CFLAGS
+PROTOBUF_LIBS
+LIBMONGOC_CFLAGS
+LIBMONGOC_LIBS'
+
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+ac_unrecognized_opts=
+ac_unrecognized_sep=
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+runstatedir='${localstatedir}/run'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
+
+ac_prev=
+ac_dashdash=
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval $ac_prev=\$ac_option
+ ac_prev=
+ continue
+ fi
+
+ case $ac_option in
+ *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+ *=) ac_optarg= ;;
+ *) ac_optarg=yes ;;
+ esac
+
+ # Accept the important Cygnus configure options, so we can diagnose typos.
+
+ case $ac_dashdash$ac_option in
+ --)
+ ac_dashdash=yes ;;
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir=$ac_optarg ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build_alias ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build_alias=$ac_optarg ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file=$ac_optarg ;;
+
+ --config-cache | -C)
+ cache_file=config.cache ;;
+
+ -datadir | --datadir | --datadi | --datad)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=*)
+ datadir=$ac_optarg ;;
+
+ -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+ | --dataroo | --dataro | --datar)
+ ac_prev=datarootdir ;;
+ -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+ datarootdir=$ac_optarg ;;
+
+ -disable-* | --disable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=no ;;
+
+ -docdir | --docdir | --docdi | --doc | --do)
+ ac_prev=docdir ;;
+ -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+ docdir=$ac_optarg ;;
+
+ -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+ ac_prev=dvidir ;;
+ -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+ dvidir=$ac_optarg ;;
+
+ -enable-* | --enable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=\$ac_optarg ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix=$ac_optarg ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he | -h)
+ ac_init_help=long ;;
+ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+ ac_init_help=recursive ;;
+ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+ ac_init_help=short ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host_alias ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host_alias=$ac_optarg ;;
+
+ -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+ ac_prev=htmldir ;;
+ -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+ | --ht=*)
+ htmldir=$ac_optarg ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir=$ac_optarg ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir=$ac_optarg ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir=$ac_optarg ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir=$ac_optarg ;;
+
+ -localedir | --localedir | --localedi | --localed | --locale)
+ ac_prev=localedir ;;
+ -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+ localedir=$ac_optarg ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst | --locals)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
+ localstatedir=$ac_optarg ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir=$ac_optarg ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c | -n)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir=$ac_optarg ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix=$ac_optarg ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix=$ac_optarg ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix=$ac_optarg ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name=$ac_optarg ;;
+
+ -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+ ac_prev=pdfdir ;;
+ -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+ pdfdir=$ac_optarg ;;
+
+ -psdir | --psdir | --psdi | --psd | --ps)
+ ac_prev=psdir ;;
+ -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+ psdir=$ac_optarg ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -runstatedir | --runstatedir | --runstatedi | --runstated \
+ | --runstate | --runstat | --runsta | --runst | --runs \
+ | --run | --ru | --r)
+ ac_prev=runstatedir ;;
+ -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
+ | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
+ | --run=* | --ru=* | --r=*)
+ runstatedir=$ac_optarg ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir=$ac_optarg ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir=$ac_optarg ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site=$ac_optarg ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir=$ac_optarg ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir=$ac_optarg ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target_alias ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target_alias=$ac_optarg ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers | -V)
+ ac_init_version=: ;;
+
+ -with-* | --with-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=\$ac_optarg ;;
+
+ -without-* | --without-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=no ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes=$ac_optarg ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries=$ac_optarg ;;
+
+ -*) as_fn_error $? "unrecognized option: \`$ac_option'
+Try \`$0 --help' for more information"
+ ;;
+
+ *=*)
+ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+ # Reject names that are not valid shell variable names.
+ case $ac_envvar in #(
+ '' | [0-9]* | *[!_$as_cr_alnum]* )
+ as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
+ esac
+ eval $ac_envvar=\$ac_optarg
+ export $ac_envvar ;;
+
+ *)
+ # FIXME: should be removed in autoconf 3.0.
+ $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+ as_fn_error $? "missing argument to $ac_option"
+fi
+
+if test -n "$ac_unrecognized_opts"; then
+ case $enable_option_checking in
+ no) ;;
+ fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
+ *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
+ esac
+fi
+
+# Check all directory arguments for consistency.
+for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
+ datadir sysconfdir sharedstatedir localstatedir includedir \
+ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+ libdir localedir mandir runstatedir
+do
+ eval ac_val=\$$ac_var
+ # Remove trailing slashes.
+ case $ac_val in
+ */ )
+ ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
+ eval $ac_var=\$ac_val;;
+ esac
+ # Be sure to have absolute directory names.
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* ) continue;;
+ NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+ esac
+ as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+ if test "x$build_alias" = x; then
+ cross_compiling=maybe
+ elif test "x$build_alias" != "x$host_alias"; then
+ cross_compiling=yes
+ fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+ as_fn_error $? "working directory cannot be determined"
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+ as_fn_error $? "pwd does not report name of working directory"
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then the parent directory.
+ ac_confdir=`$as_dirname -- "$as_myself" ||
+$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_myself" : 'X\(//\)[^/]' \| \
+ X"$as_myself" : 'X\(//\)$' \| \
+ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_myself" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ srcdir=$ac_confdir
+ if test ! -r "$srcdir/$ac_unique_file"; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+ test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+ as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+ cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
+ pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+ srcdir=.
+fi
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+ eval ac_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_env_${ac_var}_value=\$${ac_var}
+ eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat <<_ACEOF
+\`configure' configures netdata v1.17.0 to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE. See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+ -h, --help display this help and exit
+ --help=short display options specific to this package
+ --help=recursive display the short help of all the included packages
+ -V, --version display version information and exit
+ -q, --quiet, --silent do not print \`checking ...' messages
+ --cache-file=FILE cache test results in FILE [disabled]
+ -C, --config-cache alias for \`--cache-file=config.cache'
+ -n, --no-create do not create output files
+ --srcdir=DIR find the sources in DIR [configure dir or \`..']
+
+Installation directories:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --datarootdir=DIR read-only arch.-independent data root [PREFIX/share]
+ --datadir=DIR read-only architecture-independent data [DATAROOTDIR]
+ --infodir=DIR info documentation [DATAROOTDIR/info]
+ --localedir=DIR locale-dependent data [DATAROOTDIR/locale]
+ --mandir=DIR man documentation [DATAROOTDIR/man]
+ --docdir=DIR documentation root [DATAROOTDIR/doc/netdata]
+ --htmldir=DIR html documentation [DOCDIR]
+ --dvidir=DIR dvi documentation [DOCDIR]
+ --pdfdir=DIR pdf documentation [DOCDIR]
+ --psdir=DIR ps documentation [DOCDIR]
+_ACEOF
+
+ cat <<\_ACEOF
+
+Program names:
+ --program-prefix=PREFIX prepend PREFIX to installed program names
+ --program-suffix=SUFFIX append SUFFIX to installed program names
+ --program-transform-name=PROGRAM run sed PROGRAM on installed program names
+
+System types:
+ --build=BUILD configure for building on BUILD [guessed]
+ --host=HOST cross-compile to build programs to run on HOST [BUILD]
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+ case $ac_init_help in
+ short | recursive ) echo "Configuration of netdata v1.17.0:";;
+ esac
+ cat <<\_ACEOF
+
+Optional Features:
+ --disable-option-checking ignore unrecognized --enable/--with options
+ --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
+ --enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --enable-maintainer-mode
+ enable make rules and dependencies not useful (and
+ sometimes confusing) to the casual installer
+ --enable-silent-rules less verbose build output (undo: "make V=1")
+ --disable-silent-rules verbose build output (undo: "make V=0")
+ --enable-dependency-tracking
+ do not reject slow dependency extractors
+ --disable-dependency-tracking
+ speeds up one-time build
+ --enable-plugin-nfacct enable nfacct plugin [default autodetect]
+ --enable-plugin-freeipmi
+ enable freeipmi plugin [default autodetect]
+ --enable-plugin-cups enable cups plugin [default autodetect]
+ --enable-plugin-xenstat enable xenstat plugin [default autodetect]
+ --enable-backend-kinesis
+ enable kinesis backend [default autodetect]
+ --enable-backend-prometheus-remote-write
+ enable prometheus remote write backend [default
+ autodetect]
+ --enable-backend-mongodb
+ enable mongodb backend [default autodetect]
+ --enable-pedantic enable pedantic compiler warnings [default disabled]
+ --disable-accept4 System does not have accept4 [default autodetect]
+ --disable-x86-sse SSE/SS2 optimizations on x86 [default enabled]
+ --disable-lto Link Time Optimizations [default autodetect]
+ --enable-https Enable SSL support [default autodetect]
+ --disable-dbengine disable netdata dbengine [default autodetect]
+ --enable-jsonc Enable JSON-C support [default autodetect]
+
+Optional Packages:
+ --with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
+ --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
+ --with-webdir location of webdir [PKGDATADIR/web]
+ --with-libcap build with libcap [default autodetect]
+ --without-zlib build without zlib [default enabled]
+ --without-math build without math [default enabled]
+ --with-user use this user to drop privilege [default nobody]
+ --with-jemalloc-prefix=PREFIX
+ Specify the jemalloc prefix [default=""]
+ --with-jemalloc=DIR use a specific jemalloc library
+ --with-tcmalloc-lib specify the tcmalloc library to use
+ [default=tcmalloc]
+ --with-tcmalloc=DIR use the tcmalloc library
+ --with-cups-config=path Specify path to cups-config executable.
+
+Some influential environment variables:
+ CC C compiler command
+ CFLAGS C compiler flags
+ LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a
+ nonstandard directory <lib dir>
+ LIBS libraries to pass to the linker, e.g. -l<library>
+ CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
+ you have headers in a nonstandard directory <include dir>
+ CXX C++ compiler command
+ CXXFLAGS C++ compiler flags
+ PKG_CONFIG path to pkg-config utility
+ PKG_CONFIG_PATH
+ directories to add to pkg-config's search path
+ PKG_CONFIG_LIBDIR
+ path overriding pkg-config's built-in search path
+ CPP C preprocessor
+ MATH_CFLAGS C compiler flags for math
+ MATH_LIBS linker flags for math
+ ZLIB_CFLAGS C compiler flags for ZLIB, overriding pkg-config
+ ZLIB_LIBS linker flags for ZLIB, overriding pkg-config
+ UUID_CFLAGS C compiler flags for UUID, overriding pkg-config
+ UUID_LIBS linker flags for UUID, overriding pkg-config
+ JSON_CFLAGS C compiler flags for JSON, overriding pkg-config
+ JSON_LIBS linker flags for JSON, overriding pkg-config
+ SSE_CANDIDATE
+ C compiler flags for SSE
+ LIBCAP_CFLAGS
+ C compiler flags for LIBCAP, overriding pkg-config
+ LIBCAP_LIBS linker flags for LIBCAP, overriding pkg-config
+ IPMIMONITORING_CFLAGS
+ C compiler flags for IPMIMONITORING, overriding pkg-config
+ IPMIMONITORING_LIBS
+ linker flags for IPMIMONITORING, overriding pkg-config
+ NFACCT_CFLAGS
+ C compiler flags for NFACCT, overriding pkg-config
+ NFACCT_LIBS linker flags for NFACCT, overriding pkg-config
+ LIBMNL_CFLAGS
+ C compiler flags for LIBMNL, overriding pkg-config
+ LIBMNL_LIBS linker flags for LIBMNL, overriding pkg-config
+ YAJL_CFLAGS C compiler flags for YAJL, overriding pkg-config
+ YAJL_LIBS linker flags for YAJL, overriding pkg-config
+ XENLIGHT_CFLAGS
+ C compiler flags for XENLIGHT, overriding pkg-config
+ XENLIGHT_LIBS
+ linker flags for XENLIGHT, overriding pkg-config
+ LIBCRYPTO_CFLAGS
+ C compiler flags for LIBCRYPTO, overriding pkg-config
+ LIBCRYPTO_LIBS
+ linker flags for LIBCRYPTO, overriding pkg-config
+ LIBSSL_CFLAGS
+ C compiler flags for LIBSSL, overriding pkg-config
+ LIBSSL_LIBS linker flags for LIBSSL, overriding pkg-config
+ LIBCURL_CFLAGS
+ C compiler flags for LIBCURL, overriding pkg-config
+ LIBCURL_LIBS
+ linker flags for LIBCURL, overriding pkg-config
+ PROTOBUF_CFLAGS
+ C compiler flags for PROTOBUF, overriding pkg-config
+ PROTOBUF_LIBS
+ linker flags for PROTOBUF, overriding pkg-config
+ LIBMONGOC_CFLAGS
+ C compiler flags for LIBMONGOC, overriding pkg-config
+ LIBMONGOC_LIBS
+ linker flags for LIBMONGOC, overriding pkg-config
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+Report bugs to the package provider.
+_ACEOF
+ac_status=$?
+fi
+
+if test "$ac_init_help" = "recursive"; then
+ # If there are subdirs, report their specific --help.
+ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+ test -d "$ac_dir" ||
+ { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
+ continue
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+ cd "$ac_dir" || { ac_status=$?; continue; }
+ # Check for guested configure.
+ if test -f "$ac_srcdir/configure.gnu"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+ elif test -f "$ac_srcdir/configure"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure" --help=recursive
+ else
+ $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ fi || ac_status=$?
+ cd "$ac_pwd" || { ac_status=$?; break; }
+ done
+fi
+
+test -n "$ac_init_help" && exit $ac_status
+if $ac_init_version; then
+ cat <<\_ACEOF
+netdata configure v1.17.0
+generated by GNU Autoconf 2.69
+
+Copyright (C) 2012 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+ exit
+fi
+
+## ------------------------ ##
+## Autoconf initialization. ##
+## ------------------------ ##
+
+# ac_fn_c_try_compile LINENO
+# --------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext
+ if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_compile
+
+# ac_fn_cxx_try_compile LINENO
+# ----------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext
+ if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_compile
+
+# ac_fn_c_try_cpp LINENO
+# ----------------------
+# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_cpp ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if { { ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } > conftest.i && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_cpp
+
+# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES
+# -------------------------------------------------------
+# Tests whether HEADER exists, giving a warning if it cannot be compiled using
+# the include files in INCLUDES and setting the cache variable VAR
+# accordingly.
+ac_fn_c_check_header_mongrel ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if eval \${$3+:} false; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+else
+ # Is the header compilable?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
+$as_echo_n "checking $2 usability... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_header_compiler=yes
+else
+ ac_header_compiler=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
+$as_echo_n "checking $2 presence... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <$2>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+ ac_header_preproc=yes
+else
+ ac_header_preproc=no
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #((
+ yes:no: )
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+ ;;
+ no:yes:* )
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+ ;;
+esac
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=\$ac_header_compiler"
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_header_mongrel
+
+# ac_fn_c_try_run LINENO
+# ----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes
+# that executables *can* be run.
+ac_fn_c_try_run ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
+ { { case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: program exited with status $ac_status" >&5
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=$ac_status
+fi
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_run
+
+# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
+# -------------------------------------------------------
+# Tests whether HEADER exists and can be compiled using the include files in
+# INCLUDES, setting the cache variable VAR accordingly.
+ac_fn_c_check_header_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_header_compile
+
+# ac_fn_c_try_link LINENO
+# -----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_link ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext conftest$ac_exeext
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ test -x conftest$ac_exeext
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+ # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+ # interfere with the next link command; also delete a directory that is
+ # left behind by Apple's compiler. We do this before executing the actions.
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_link
+
+# ac_fn_c_check_type LINENO TYPE VAR INCLUDES
+# -------------------------------------------
+# Tests whether TYPE exists after having included INCLUDES, setting cache
+# variable VAR accordingly.
+ac_fn_c_check_type ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=no"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+if (sizeof ($2))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+if (sizeof (($2)))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ eval "$3=yes"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_type
+
+# ac_fn_c_check_func LINENO FUNC VAR
+# ----------------------------------
+# Tests whether FUNC exists, setting the cache variable VAR accordingly
+ac_fn_c_check_func ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
+ For example, HP-UX 11i <limits.h> declares gettimeofday. */
+#define $2 innocuous_$2
+
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $2 (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef $2
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char $2 ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined __stub_$2 || defined __stub___$2
+choke me
+#endif
+
+int
+main ()
+{
+return $2 ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_func
+
+# ac_fn_c_find_intX_t LINENO BITS VAR
+# -----------------------------------
+# Finds a signed integer type with width BITS, setting cache variable VAR
+# accordingly.
+ac_fn_c_find_intX_t ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for int$2_t" >&5
+$as_echo_n "checking for int$2_t... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=no"
+ # Order is important - never check a type that is potentially smaller
+ # than half of the expected target width.
+ for ac_type in int$2_t 'int' 'long int' \
+ 'long long int' 'short int' 'signed char'; do
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$ac_includes_default
+ enum { N = $2 / 2 - 1 };
+int
+main ()
+{
+static int test_array [1 - 2 * !(0 < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1))];
+test_array [0] = 0;
+return test_array [0];
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$ac_includes_default
+ enum { N = $2 / 2 - 1 };
+int
+main ()
+{
+static int test_array [1 - 2 * !(($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1)
+ < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 2))];
+test_array [0] = 0;
+return test_array [0];
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ case $ac_type in #(
+ int$2_t) :
+ eval "$3=yes" ;; #(
+ *) :
+ eval "$3=\$ac_type" ;;
+esac
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ if eval test \"x\$"$3"\" = x"no"; then :
+
+else
+ break
+fi
+ done
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_find_intX_t
+
+# ac_fn_c_find_uintX_t LINENO BITS VAR
+# ------------------------------------
+# Finds an unsigned integer type with width BITS, setting cache variable VAR
+# accordingly.
+ac_fn_c_find_uintX_t ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uint$2_t" >&5
+$as_echo_n "checking for uint$2_t... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=no"
+ # Order is important - never check a type that is potentially smaller
+ # than half of the expected target width.
+ for ac_type in uint$2_t 'unsigned int' 'unsigned long int' \
+ 'unsigned long long int' 'unsigned short int' 'unsigned char'; do
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !((($ac_type) -1 >> ($2 / 2 - 1)) >> ($2 / 2 - 1) == 3)];
+test_array [0] = 0;
+return test_array [0];
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ case $ac_type in #(
+ uint$2_t) :
+ eval "$3=yes" ;; #(
+ *) :
+ eval "$3=\$ac_type" ;;
+esac
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ if eval test \"x\$"$3"\" = x"no"; then :
+
+else
+ break
+fi
+ done
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_find_uintX_t
+
+# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES
+# ---------------------------------------------
+# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR
+# accordingly.
+ac_fn_c_check_decl ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ as_decl_name=`echo $2|sed 's/ *(.*//'`
+ as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'`
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5
+$as_echo_n "checking whether $as_decl_name is declared... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+#ifndef $as_decl_name
+#ifdef __cplusplus
+ (void) $as_decl_use;
+#else
+ (void) $as_decl_name;
+#endif
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_decl
+
+# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES
+# --------------------------------------------
+# Tries to find the compile-time value of EXPR in a program that includes
+# INCLUDES, setting VAR accordingly. Returns whether the value could be
+# computed
+ac_fn_c_compute_int ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) >= 0)];
+test_array [0] = 0;
+return test_array [0];
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) <= $ac_mid)];
+test_array [0] = 0;
+return test_array [0];
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_hi=$ac_mid; break
+else
+ as_fn_arith $ac_mid + 1 && ac_lo=$as_val
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) < 0)];
+test_array [0] = 0;
+return test_array [0];
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) >= $ac_mid)];
+test_array [0] = 0;
+return test_array [0];
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_lo=$ac_mid; break
+else
+ as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ ac_lo= ac_hi=
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) <= $ac_mid)];
+test_array [0] = 0;
+return test_array [0];
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_hi=$ac_mid
+else
+ as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in #((
+?*) eval "$3=\$ac_lo"; ac_retval=0 ;;
+'') ac_retval=1 ;;
+esac
+ else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+static long int longval () { return $2; }
+static unsigned long int ulongval () { return $2; }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ return 1;
+ if (($2) < 0)
+ {
+ long int i = longval ();
+ if (i != ($2))
+ return 1;
+ fprintf (f, "%ld", i);
+ }
+ else
+ {
+ unsigned long int i = ulongval ();
+ if (i != ($2))
+ return 1;
+ fprintf (f, "%lu", i);
+ }
+ /* Do not output a trailing newline, as this causes \r\n confusion
+ on some platforms. */
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ echo >>conftest.val; read $3 <conftest.val; ac_retval=0
+else
+ ac_retval=1
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+rm -f conftest.val
+
+ fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_compute_int
+
+# ac_fn_cxx_try_link LINENO
+# -------------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_link ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext conftest$ac_exeext
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ test -x conftest$ac_exeext
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+ # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+ # interfere with the next link command; also delete a directory that is
+ # left behind by Apple's compiler. We do this before executing the actions.
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_link
+cat >config.log <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by netdata $as_me v1.17.0, which was
+generated by GNU Autoconf 2.69. Invocation command line was
+
+ $ $0 $@
+
+_ACEOF
+exec 5>>config.log
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
+
+/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown`
+/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
+/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ $as_echo "PATH: $as_dir"
+ done
+IFS=$as_save_IFS
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+ for ac_arg
+ do
+ case $ac_arg in
+ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ continue ;;
+ *\'*)
+ ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ case $ac_pass in
+ 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
+ 2)
+ as_fn_append ac_configure_args1 " '$ac_arg'"
+ if test $ac_must_keep_next = true; then
+ ac_must_keep_next=false # Got value, back to normal.
+ else
+ case $ac_arg in
+ *=* | --config-cache | -C | -disable-* | --disable-* \
+ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+ | -with-* | --with-* | -without-* | --without-* | --x)
+ case "$ac_configure_args0 " in
+ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+ esac
+ ;;
+ -* ) ac_must_keep_next=true ;;
+ esac
+ fi
+ as_fn_append ac_configure_args " '$ac_arg'"
+ ;;
+ esac
+ done
+done
+{ ac_configure_args0=; unset ac_configure_args0;}
+{ ac_configure_args1=; unset ac_configure_args1;}
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log. We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+trap 'exit_status=$?
+ # Save into config.log some information that might help in debugging.
+ {
+ echo
+
+ $as_echo "## ---------------- ##
+## Cache variables. ##
+## ---------------- ##"
+ echo
+ # The following way of writing the cache mishandles newlines in values,
+(
+ for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
+ esac ;;
+ esac
+ done
+ (set) 2>&1 |
+ case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ sed -n \
+ "s/'\''/'\''\\\\'\'''\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+ ;; #(
+ *)
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+)
+ echo
+
+ $as_echo "## ----------------- ##
+## Output variables. ##
+## ----------------- ##"
+ echo
+ for ac_var in $ac_subst_vars
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+
+ if test -n "$ac_subst_files"; then
+ $as_echo "## ------------------- ##
+## File substitutions. ##
+## ------------------- ##"
+ echo
+ for ac_var in $ac_subst_files
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+ fi
+
+ if test -s confdefs.h; then
+ $as_echo "## ----------- ##
+## confdefs.h. ##
+## ----------- ##"
+ echo
+ cat confdefs.h
+ echo
+ fi
+ test "$ac_signal" != 0 &&
+ $as_echo "$as_me: caught signal $ac_signal"
+ $as_echo "$as_me: exit $exit_status"
+ } >&5
+ rm -f core *.core core.conftest.* &&
+ rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+ exit $exit_status
+' 0
+for ac_signal in 1 2 13 15; do
+ trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -f -r conftest* confdefs.h
+
+$as_echo "/* confdefs.h */" > confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_URL "$PACKAGE_URL"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer an explicitly selected file to automatically selected ones.
+ac_site_file1=NONE
+ac_site_file2=NONE
+if test -n "$CONFIG_SITE"; then
+ # We do not want a PATH search for config.site.
+ case $CONFIG_SITE in #((
+ -*) ac_site_file1=./$CONFIG_SITE;;
+ */*) ac_site_file1=$CONFIG_SITE;;
+ *) ac_site_file1=./$CONFIG_SITE;;
+ esac
+elif test "x$prefix" != xNONE; then
+ ac_site_file1=$prefix/share/config.site
+ ac_site_file2=$prefix/etc/config.site
+else
+ ac_site_file1=$ac_default_prefix/share/config.site
+ ac_site_file2=$ac_default_prefix/etc/config.site
+fi
+for ac_site_file in "$ac_site_file1" "$ac_site_file2"
+do
+ test "x$ac_site_file" = xNONE && continue
+ if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
+$as_echo "$as_me: loading site script $ac_site_file" >&6;}
+ sed 's/^/| /' "$ac_site_file" >&5
+ . "$ac_site_file" \
+ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "failed to load site script $ac_site_file
+See \`config.log' for more details" "$LINENO" 5; }
+ fi
+done
+
+if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special files
+ # actually), so we avoid doing that. DJGPP emulates it as a regular file.
+ if test /dev/null != "$cache_file" && test -f "$cache_file"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
+$as_echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . "$cache_file";;
+ *) . "./$cache_file";;
+ esac
+ fi
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
+$as_echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+as_fn_append ac_header_list " sys/prctl.h"
+as_fn_append ac_header_list " sys/vfs.h"
+as_fn_append ac_header_list " sys/statfs.h"
+as_fn_append ac_header_list " sys/statvfs.h"
+as_fn_append ac_header_list " sys/mount.h"
+as_fn_append ac_func_list " accept4"
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in $ac_precious_vars; do
+ eval ac_old_set=\$ac_cv_env_${ac_var}_set
+ eval ac_new_set=\$ac_env_${ac_var}_set
+ eval ac_old_val=\$ac_cv_env_${ac_var}_value
+ eval ac_new_val=\$ac_env_${ac_var}_value
+ case $ac_old_set,$ac_new_set in
+ set,)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,set)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,);;
+ *)
+ if test "x$ac_old_val" != "x$ac_new_val"; then
+ # differences in whitespace do not lead to failure.
+ ac_old_val_w=`echo x $ac_old_val`
+ ac_new_val_w=`echo x $ac_new_val`
+ if test "$ac_old_val_w" != "$ac_new_val_w"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
+$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ ac_cache_corrupted=:
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+ eval $ac_var=\$ac_old_val
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5
+$as_echo "$as_me: former value: \`$ac_old_val'" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5
+$as_echo "$as_me: current value: \`$ac_new_val'" >&2;}
+ fi;;
+ esac
+ # Pass precious variables to config.status.
+ if test "$ac_new_set" = set; then
+ case $ac_new_val in
+ *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *) ac_arg=$ac_var=$ac_new_val ;;
+ esac
+ case " $ac_configure_args " in
+ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
+ *) as_fn_append ac_configure_args " '$ac_arg'" ;;
+ esac
+ fi
+done
+if $ac_cache_corrupted; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
+$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+ as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
+fi
+## -------------------- ##
+## Main body of script. ##
+## -------------------- ##
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5
+$as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; }
+ # Check whether --enable-maintainer-mode was given.
+if test "${enable_maintainer_mode+set}" = set; then :
+ enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval
+else
+ USE_MAINTAINER_MODE=no
+fi
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5
+$as_echo "$USE_MAINTAINER_MODE" >&6; }
+ if test $USE_MAINTAINER_MODE = yes; then
+ MAINTAINER_MODE_TRUE=
+ MAINTAINER_MODE_FALSE='#'
+else
+ MAINTAINER_MODE_TRUE='#'
+ MAINTAINER_MODE_FALSE=
+fi
+
+ MAINT=$MAINTAINER_MODE_TRUE
+
+
+if test x"$USE_MAINTAINER_MODE" = xyes; then
+{ $as_echo "$as_me:${as_lineno-$LINENO}: ***************** MAINTAINER MODE *****************" >&5
+$as_echo "$as_me: ***************** MAINTAINER MODE *****************" >&6;}
+fi
+
+PACKAGE_RPM_VERSION="v1.17.0"
+
+
+# -----------------------------------------------------------------------------
+# autoconf initialization
+
+ac_aux_dir=
+for ac_dir in . "$srcdir"/.; do
+ if test -f "$ac_dir/install-sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f "$ac_dir/install.sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f "$ac_dir/shtool"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ as_fn_error $? "cannot find install-sh, install.sh, or shtool in . \"$srcdir\"/." "$LINENO" 5
+fi
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
+ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
+ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
+
+
+ac_config_headers="$ac_config_headers config.h"
+
+
+
+
+
+
+
+am__api_version='1.15'
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# OS/2's system install, which has a completely different semantic
+# ./install, which can be erroneously created by make from ./install.sh.
+# Reject install programs that cannot install multiple files.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
+$as_echo_n "checking for a BSD-compatible install... " >&6; }
+if test -z "$INSTALL"; then
+if ${ac_cv_path_install+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ # Account for people who put trailing slashes in PATH elements.
+case $as_dir/ in #((
+ ./ | .// | /[cC]/* | \
+ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \
+ /usr/ucb/* ) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then
+ if test $ac_prog = install &&
+ grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ elif test $ac_prog = install &&
+ grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # program-specific install script used by HP pwplus--don't use.
+ :
+ else
+ rm -rf conftest.one conftest.two conftest.dir
+ echo one > conftest.one
+ echo two > conftest.two
+ mkdir conftest.dir
+ if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" &&
+ test -s conftest.one && test -s conftest.two &&
+ test -s conftest.dir/conftest.one &&
+ test -s conftest.dir/conftest.two
+ then
+ ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+ break 3
+ fi
+ fi
+ fi
+ done
+ done
+ ;;
+esac
+
+ done
+IFS=$as_save_IFS
+
+rm -rf conftest.one conftest.two conftest.dir
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL=$ac_cv_path_install
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ INSTALL=$ac_install_sh
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5
+$as_echo "$INSTALL" >&6; }
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5
+$as_echo_n "checking whether build environment is sane... " >&6; }
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name. Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+ *[\\\"\#\$\&\'\`$am_lf]*)
+ as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;;
+esac
+case $srcdir in
+ *[\\\"\#\$\&\'\`$am_lf\ \ ]*)
+ as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;;
+esac
+
+# Do 'set' in a subshell so we don't clobber the current shell's
+# arguments. Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+ am_has_slept=no
+ for am_try in 1 2; do
+ echo "timestamp, slept: $am_has_slept" > conftest.file
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$*" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ if test "$*" != "X $srcdir/configure conftest.file" \
+ && test "$*" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ as_fn_error $? "ls -t appears to fail. Make sure there is not a broken
+ alias in your environment" "$LINENO" 5
+ fi
+ if test "$2" = conftest.file || test $am_try -eq 2; then
+ break
+ fi
+ # Just in case.
+ sleep 1
+ am_has_slept=yes
+ done
+ test "$2" = conftest.file
+ )
+then
+ # Ok.
+ :
+else
+ as_fn_error $? "newly created file is older than distributed files!
+Check your system clock" "$LINENO" 5
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+# If we didn't sleep, we still need to ensure time stamps of config.status and
+# generated files are strictly newer.
+am_sleep_pid=
+if grep 'slept: no' conftest.file >/dev/null 2>&1; then
+ ( sleep 1 ) &
+ am_sleep_pid=$!
+fi
+
+rm -f conftest.file
+
+test "$program_prefix" != NONE &&
+ program_transform_name="s&^&$program_prefix&;$program_transform_name"
+# Use a double $ so make ignores it.
+test "$program_suffix" != NONE &&
+ program_transform_name="s&\$&$program_suffix&;$program_transform_name"
+# Double any \ or $.
+# By default was `s,x,x', remove it if useless.
+ac_script='s/[\\$]/&&/g;s/;s,x,x,$//'
+program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"`
+
+# Expand $ac_aux_dir to an absolute path.
+am_aux_dir=`cd "$ac_aux_dir" && pwd`
+
+if test x"${MISSING+set}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
+ *)
+ MISSING="\${SHELL} $am_aux_dir/missing" ;;
+ esac
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --is-lightweight"; then
+ am_missing_run="$MISSING "
+else
+ am_missing_run=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5
+$as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;}
+fi
+
+if test x"${install_sh+set}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+ *)
+ install_sh="\${SHELL} $am_aux_dir/install-sh"
+ esac
+fi
+
+# Installed binaries are usually stripped using 'strip' when the user
+# run "make install-strip". However 'strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the 'STRIP' environment variable to overrule this program.
+if test "$cross_compiling" != no; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_STRIP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$STRIP"; then
+ ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+STRIP=$ac_cv_prog_STRIP
+if test -n "$STRIP"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
+$as_echo "$STRIP" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_STRIP"; then
+ ac_ct_STRIP=$STRIP
+ # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_STRIP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_STRIP"; then
+ ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_STRIP="strip"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+if test -n "$ac_ct_STRIP"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
+$as_echo "$ac_ct_STRIP" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_STRIP" = x; then
+ STRIP=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ STRIP=$ac_ct_STRIP
+ fi
+else
+ STRIP="$ac_cv_prog_STRIP"
+fi
+
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5
+$as_echo_n "checking for a thread-safe mkdir -p... " >&6; }
+if test -z "$MKDIR_P"; then
+ if ${ac_cv_path_mkdir+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in mkdir gmkdir; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue
+ case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #(
+ 'mkdir (GNU coreutils) '* | \
+ 'mkdir (coreutils) '* | \
+ 'mkdir (fileutils) '4.1*)
+ ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext
+ break 3;;
+ esac
+ done
+ done
+ done
+IFS=$as_save_IFS
+
+fi
+
+ test -d ./--version && rmdir ./--version
+ if test "${ac_cv_path_mkdir+set}" = set; then
+ MKDIR_P="$ac_cv_path_mkdir -p"
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for MKDIR_P within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ MKDIR_P="$ac_install_sh -d"
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5
+$as_echo "$MKDIR_P" >&6; }
+
+for ac_prog in gawk mawk nawk awk
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_AWK+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$AWK"; then
+ ac_cv_prog_AWK="$AWK" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_AWK="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+AWK=$ac_cv_prog_AWK
+if test -n "$AWK"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5
+$as_echo "$AWK" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$AWK" && break
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5
+$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
+set x ${MAKE-make}
+ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
+if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.make <<\_ACEOF
+SHELL = /bin/sh
+all:
+ @echo '@@@%%%=$(MAKE)=@@@%%%'
+_ACEOF
+# GNU make sometimes prints "make[1]: Entering ...", which would confuse us.
+case `${MAKE-make} -f conftest.make 2>/dev/null` in
+ *@@@%%%=?*=@@@%%%*)
+ eval ac_cv_prog_make_${ac_make}_set=yes;;
+ *)
+ eval ac_cv_prog_make_${ac_make}_set=no;;
+esac
+rm -f conftest.make
+fi
+if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ SET_MAKE=
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ SET_MAKE="MAKE=${MAKE-make}"
+fi
+
+rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+ am__leading_dot=.
+else
+ am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+
+# Check whether --enable-silent-rules was given.
+if test "${enable_silent_rules+set}" = set; then :
+ enableval=$enable_silent_rules;
+fi
+
+case $enable_silent_rules in # (((
+ yes) AM_DEFAULT_VERBOSITY=0;;
+ no) AM_DEFAULT_VERBOSITY=1;;
+ *) AM_DEFAULT_VERBOSITY=1;;
+esac
+am_make=${MAKE-make}
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5
+$as_echo_n "checking whether $am_make supports nested variables... " >&6; }
+if ${am_cv_make_support_nested_variables+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if $as_echo 'TRUE=$(BAR$(V))
+BAR0=false
+BAR1=true
+V=1
+am__doit:
+ @$(TRUE)
+.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then
+ am_cv_make_support_nested_variables=yes
+else
+ am_cv_make_support_nested_variables=no
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5
+$as_echo "$am_cv_make_support_nested_variables" >&6; }
+if test $am_cv_make_support_nested_variables = yes; then
+ AM_V='$(V)'
+ AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+else
+ AM_V=$AM_DEFAULT_VERBOSITY
+ AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
+fi
+AM_BACKSLASH='\'
+
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+ # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+ # is not polluted with repeated "-I."
+ am__isrc=' -I$(srcdir)'
+ # test to see if srcdir already configured
+ if test -f $srcdir/config.status; then
+ as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5
+ fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+ if (cygpath --version) >/dev/null 2>/dev/null; then
+ CYGPATH_W='cygpath -w'
+ else
+ CYGPATH_W=echo
+ fi
+fi
+
+
+# Define the identity of the package.
+ PACKAGE='netdata'
+ VERSION='v1.17.0'
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE "$PACKAGE"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define VERSION "$VERSION"
+_ACEOF
+
+# Some tools Automake needs.
+
+ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"}
+
+
+AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"}
+
+
+AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"}
+
+
+AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"}
+
+
+MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"}
+
+# For better backward compatibility. To be removed once Automake 1.9.x
+# dies out for good. For more background, see:
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
+mkdir_p='$(MKDIR_P)'
+
+# We need awk for the "check" target (and possibly the TAP driver). The
+# system "awk" is bad on some platforms.
+# Always define AMTAR for backward compatibility. Yes, it's still used
+# in the wild :-( We should find a proper way to deprecate it ...
+AMTAR='$${TAR-tar}'
+
+
+# We'll loop over all known methods to create a tar archive until one works.
+_am_tools='gnutar pax cpio none'
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to create a pax tar archive" >&5
+$as_echo_n "checking how to create a pax tar archive... " >&6; }
+
+ # Go ahead even if we have the value already cached. We do so because we
+ # need to set the values for the 'am__tar' and 'am__untar' variables.
+ _am_tools=${am_cv_prog_tar_pax-$_am_tools}
+
+ for _am_tool in $_am_tools; do
+ case $_am_tool in
+ gnutar)
+ for _am_tar in tar gnutar gtar; do
+ { echo "$as_me:$LINENO: $_am_tar --version" >&5
+ ($_am_tar --version) >&5 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && break
+ done
+ am__tar="$_am_tar --format=posix -chf - "'"$$tardir"'
+ am__tar_="$_am_tar --format=posix -chf - "'"$tardir"'
+ am__untar="$_am_tar -xf -"
+ ;;
+ plaintar)
+ # Must skip GNU tar: if it does not support --format= it doesn't create
+ # ustar tarball either.
+ (tar --version) >/dev/null 2>&1 && continue
+ am__tar='tar chf - "$$tardir"'
+ am__tar_='tar chf - "$tardir"'
+ am__untar='tar xf -'
+ ;;
+ pax)
+ am__tar='pax -L -x pax -w "$$tardir"'
+ am__tar_='pax -L -x pax -w "$tardir"'
+ am__untar='pax -r'
+ ;;
+ cpio)
+ am__tar='find "$$tardir" -print | cpio -o -H pax -L'
+ am__tar_='find "$tardir" -print | cpio -o -H pax -L'
+ am__untar='cpio -i -H pax -d'
+ ;;
+ none)
+ am__tar=false
+ am__tar_=false
+ am__untar=false
+ ;;
+ esac
+
+ # If the value was cached, stop now. We just wanted to have am__tar
+ # and am__untar set.
+ test -n "${am_cv_prog_tar_pax}" && break
+
+ # tar/untar a dummy directory, and stop if the command works.
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ echo GrepMe > conftest.dir/file
+ { echo "$as_me:$LINENO: tardir=conftest.dir && eval $am__tar_ >conftest.tar" >&5
+ (tardir=conftest.dir && eval $am__tar_ >conftest.tar) >&5 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+ rm -rf conftest.dir
+ if test -s conftest.tar; then
+ { echo "$as_me:$LINENO: $am__untar <conftest.tar" >&5
+ ($am__untar <conftest.tar) >&5 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+ { echo "$as_me:$LINENO: cat conftest.dir/file" >&5
+ (cat conftest.dir/file) >&5 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+ grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
+ fi
+ done
+ rm -rf conftest.dir
+
+ if ${am_cv_prog_tar_pax+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ am_cv_prog_tar_pax=$_am_tool
+fi
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_tar_pax" >&5
+$as_echo "$am_cv_prog_tar_pax" >&6; }
+
+
+
+
+
+
+# POSIX will say in a future version that running "rm -f" with no argument
+# is OK; and we want to be able to make that assumption in our Makefile
+# recipes. So use an aggressive probe to check that the usage we want is
+# actually supported "in the wild" to an acceptable degree.
+# See automake bug#10828.
+# To make any issue more visible, cause the running configure to be aborted
+# by default if the 'rm' program in use doesn't match our expectations; the
+# user can still override this though.
+if rm -f && rm -fr && rm -rf; then : OK; else
+ cat >&2 <<'END'
+Oops!
+
+Your 'rm' program seems unable to run without file operands specified
+on the command line, even when the '-f' option is present. This is contrary
+to the behaviour of most rm programs out there, and not conforming with
+the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
+
+Please tell bug-automake@gnu.org about your system, including the value
+of your $PATH and any error possibly output before this message. This
+can help us improve future automake versions.
+
+END
+ if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
+ echo 'Configuration will proceed anyway, since you have set the' >&2
+ echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
+ echo >&2
+ else
+ cat >&2 <<'END'
+Aborting the configuration process, to ensure you take notice of the issue.
+
+You can download and install GNU coreutils to get an 'rm' implementation
+that behaves properly: <http://www.gnu.org/software/coreutils/>.
+
+If you want to complete the configuration process using your problematic
+'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
+to "yes", and re-run configure.
+
+END
+ as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5
+ fi
+fi
+
+
+ # Check whether --enable-silent-rules was given.
+if test "${enable_silent_rules+set}" = set; then :
+ enableval=$enable_silent_rules;
+fi
+
+case $enable_silent_rules in # (((
+ yes) AM_DEFAULT_VERBOSITY=0;;
+ no) AM_DEFAULT_VERBOSITY=1;;
+ *) AM_DEFAULT_VERBOSITY=0;;
+esac
+am_make=${MAKE-make}
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5
+$as_echo_n "checking whether $am_make supports nested variables... " >&6; }
+if ${am_cv_make_support_nested_variables+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if $as_echo 'TRUE=$(BAR$(V))
+BAR0=false
+BAR1=true
+V=1
+am__doit:
+ @$(TRUE)
+.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then
+ am_cv_make_support_nested_variables=yes
+else
+ am_cv_make_support_nested_variables=no
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5
+$as_echo "$am_cv_make_support_nested_variables" >&6; }
+if test $am_cv_make_support_nested_variables = yes; then
+ AM_V='$(V)'
+ AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+else
+ AM_V=$AM_DEFAULT_VERBOSITY
+ AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
+fi
+AM_BACKSLASH='\'
+
+
+# Make sure we can run config.sub.
+$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
+ as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
+$as_echo_n "checking build system type... " >&6; }
+if ${ac_cv_build+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_build_alias=$build_alias
+test "x$ac_build_alias" = x &&
+ ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"`
+test "x$ac_build_alias" = x &&
+ as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5
+ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` ||
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
+$as_echo "$ac_cv_build" >&6; }
+case $ac_cv_build in
+*-*-*) ;;
+*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;;
+esac
+build=$ac_cv_build
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_build
+shift
+build_cpu=$1
+build_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+build_os=$*
+IFS=$ac_save_IFS
+case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5
+$as_echo_n "checking host system type... " >&6; }
+if ${ac_cv_host+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "x$host_alias" = x; then
+ ac_cv_host=$ac_cv_build
+else
+ ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` ||
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5
+$as_echo "$ac_cv_host" >&6; }
+case $ac_cv_host in
+*-*-*) ;;
+*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;;
+esac
+host=$ac_cv_host
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_host
+shift
+host_cpu=$1
+host_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+host_os=$*
+IFS=$ac_save_IFS
+case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="${ac_tool_prefix}gcc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+ ac_ct_CC=$CC
+ # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CC="gcc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+else
+ CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="${ac_tool_prefix}cc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ fi
+fi
+if test -z "$CC"; then
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+ ac_prog_rejected=yes
+ continue
+ fi
+ ac_cv_prog_CC="cc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $ac_cv_prog_CC
+ shift
+ if test $# != 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same basename, so the bogon will be chosen
+ # first if we set CC to just the basename; use the full file name.
+ shift
+ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+ fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in cl.exe
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CC" && break
+ done
+fi
+if test -z "$CC"; then
+ ac_ct_CC=$CC
+ for ac_prog in cl.exe
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CC="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CC" && break
+done
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+fi
+
+fi
+
+
+test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "no acceptable C compiler found in \$PATH
+See \`config.log' for more details" "$LINENO" 5; }
+
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+ { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ sed '10a\
+... rest of stderr output deleted ...
+ 10q' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ fi
+ rm -f conftest.er1 conftest.err
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+done
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
+$as_echo_n "checking whether the C compiler works... " >&6; }
+ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+
+# The possible output files:
+ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
+
+ac_rmfiles=
+for ac_file in $ac_files
+do
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+ esac
+done
+rm -f $ac_rmfiles
+
+if { { ac_try="$ac_link_default"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link_default") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile. We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files ''
+do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
+ ;;
+ [ab].out )
+ # We found the default executable, but exeext='' is most
+ # certainly right.
+ break;;
+ *.* )
+ if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+ then :; else
+ ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ fi
+ # We set ac_cv_exeext here because the later test for it is not
+ # safe: cross compilers may not add the suffix if given an `-o'
+ # argument, so we may need to know it at that point already.
+ # Even if this section looks crufty: it has the advantage of
+ # actually working.
+ break;;
+ * )
+ break;;
+ esac
+done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
+else
+ ac_file=''
+fi
+if test -z "$ac_file"; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+$as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "C compiler cannot create executables
+See \`config.log' for more details" "$LINENO" 5; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
+$as_echo_n "checking for C compiler default output file name... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
+$as_echo "$ac_file" >&6; }
+ac_exeext=$ac_cv_exeext
+
+rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
+$as_echo_n "checking for suffix of executables... " >&6; }
+if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ break;;
+ * ) break;;
+ esac
+done
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest conftest$ac_cv_exeext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
+$as_echo "$ac_cv_exeext" >&6; }
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdio.h>
+int
+main ()
+{
+FILE *f = fopen ("conftest.out", "w");
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files="$ac_clean_files conftest.out"
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
+$as_echo_n "checking whether we are cross compiling... " >&6; }
+if test "$cross_compiling" != yes; then
+ { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ if { ac_try='./conftest$ac_cv_exeext'
+ { { case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }; then
+ cross_compiling=no
+ else
+ if test "$cross_compiling" = maybe; then
+ cross_compiling=yes
+ else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details" "$LINENO" 5; }
+ fi
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
+$as_echo "$cross_compiling" >&6; }
+
+rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
+$as_echo_n "checking for suffix of object files... " >&6; }
+if ${ac_cv_objext+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ for ac_file in conftest.o conftest.obj conftest.*; do
+ test -f "$ac_file" || continue;
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
+ *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+ break;;
+ esac
+done
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of object files: cannot compile
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
+$as_echo "$ac_cv_objext" >&6; }
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
+$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
+if ${ac_cv_c_compiler_gnu+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_compiler_gnu=yes
+else
+ ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
+$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GCC=yes
+else
+ GCC=
+fi
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
+$as_echo_n "checking whether $CC accepts -g... " >&6; }
+if ${ac_cv_prog_cc_g+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_c_werror_flag=$ac_c_werror_flag
+ ac_c_werror_flag=yes
+ ac_cv_prog_cc_g=no
+ CFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_g=yes
+else
+ CFLAGS=""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ ac_c_werror_flag=$ac_save_c_werror_flag
+ CFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
+$as_echo "$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
+$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
+if ${ac_cv_prog_cc_c89+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdio.h>
+struct stat;
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+ char **p;
+ int i;
+{
+ return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+ char *s;
+ va_list v;
+ va_start (v,p);
+ s = g (p, va_arg (v,int));
+ va_end (v);
+ return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has
+ function prototypes and stuff, but not '\xHH' hex character constants.
+ These don't provoke an error unfortunately, instead are silently treated
+ as 'x'. The following induces an error, until -std is added to get
+ proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an
+ array size at least. It's necessary to write '\x00'==0 to get something
+ that's true only with -std. */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+ inside strings and character constants. */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1];
+ ;
+ return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+ CC="$ac_save_CC $ac_arg"
+ if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_c89=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext
+ test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+ x)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+ xno)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
+ *)
+ CC="$CC $ac_cv_prog_cc_c89"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
+$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+if test "x$ac_cv_prog_cc_c89" != xno; then :
+
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5
+$as_echo_n "checking whether $CC understands -c and -o together... " >&6; }
+if ${am_cv_prog_cc_c_o+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ # Make sure it works both with $CC and with simple cc.
+ # Following AC_PROG_CC_C_O, we do the test twice because some
+ # compilers refuse to overwrite an existing .o file with -o,
+ # though they will create one.
+ am_cv_prog_cc_c_o=yes
+ for am_i in 1 2; do
+ if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5
+ ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } \
+ && test -f conftest2.$ac_objext; then
+ : OK
+ else
+ am_cv_prog_cc_c_o=no
+ break
+ fi
+ done
+ rm -f core conftest*
+ unset am_i
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5
+$as_echo "$am_cv_prog_cc_c_o" >&6; }
+if test "$am_cv_prog_cc_c_o" != yes; then
+ # Losing compiler, so override with the script.
+ # FIXME: It is wrong to rewrite CC.
+ # But if we don't then we get into trouble of one sort or another.
+ # A longer-term fix would be to have automake use am__CC in this case,
+ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+ CC="$am_aux_dir/compile $CC"
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+DEPDIR="${am__leading_dot}deps"
+
+ac_config_commands="$ac_config_commands depfiles"
+
+
+am_make=${MAKE-make}
+cat > confinc << 'END'
+am__doit:
+ @echo this is the am__doit target
+.PHONY: am__doit
+END
+# If we don't find an include directive, just comment out the code.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5
+$as_echo_n "checking for style of include used by $am_make... " >&6; }
+am__include="#"
+am__quote=
+_am_result=none
+# First try GNU make style include.
+echo "include confinc" > confmf
+# Ignore all kinds of additional output from 'make'.
+case `$am_make -s -f confmf 2> /dev/null` in #(
+*the\ am__doit\ target*)
+ am__include=include
+ am__quote=
+ _am_result=GNU
+ ;;
+esac
+# Now try BSD make style include.
+if test "$am__include" = "#"; then
+ echo '.include "confinc"' > confmf
+ case `$am_make -s -f confmf 2> /dev/null` in #(
+ *the\ am__doit\ target*)
+ am__include=.include
+ am__quote="\""
+ _am_result=BSD
+ ;;
+ esac
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5
+$as_echo "$_am_result" >&6; }
+rm -f confinc confmf
+
+# Check whether --enable-dependency-tracking was given.
+if test "${enable_dependency_tracking+set}" = set; then :
+ enableval=$enable_dependency_tracking;
+fi
+
+if test "x$enable_dependency_tracking" != xno; then
+ am_depcomp="$ac_aux_dir/depcomp"
+ AMDEPBACKSLASH='\'
+ am__nodep='_no'
+fi
+ if test "x$enable_dependency_tracking" != xno; then
+ AMDEP_TRUE=
+ AMDEP_FALSE='#'
+else
+ AMDEP_TRUE='#'
+ AMDEP_FALSE=
+fi
+
+
+
+depcc="$CC" am_compiler_list=
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if ${am_cv_CC_dependencies_compiler_type+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named 'D' -- because '-MD' means "put the output
+ # in D".
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_CC_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+ fi
+ am__universal=false
+ case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+ # Solaris 10 /bin/sh.
+ echo '/* dummy */' > sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with '-c' and '-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle '-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs.
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # After this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested.
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+ # This compiler won't grok '-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_CC_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_CC_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; }
+CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type
+
+ if
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then
+ am__fastdepCC_TRUE=
+ am__fastdepCC_FALSE='#'
+else
+ am__fastdepCC_TRUE='#'
+ am__fastdepCC_FALSE=
+fi
+
+
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+if test -z "$CXX"; then
+ if test -n "$CCC"; then
+ CXX=$CCC
+ else
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CXX"; then
+ ac_cv_prog_CXX="$CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CXX=$ac_cv_prog_CXX
+if test -n "$CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5
+$as_echo "$CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CXX" && break
+ done
+fi
+if test -z "$CXX"; then
+ ac_ct_CXX=$CXX
+ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CXX"; then
+ ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CXX="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
+if test -n "$ac_ct_CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
+$as_echo "$ac_ct_CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CXX" && break
+done
+
+ if test "x$ac_ct_CXX" = x; then
+ CXX="g++"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CXX=$ac_ct_CXX
+ fi
+fi
+
+ fi
+fi
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+ { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ sed '10a\
+... rest of stderr output deleted ...
+ 10q' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ fi
+ rm -f conftest.er1 conftest.err
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5
+$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; }
+if ${ac_cv_cxx_compiler_gnu+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_compiler_gnu=yes
+else
+ ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5
+$as_echo "$ac_cv_cxx_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GXX=yes
+else
+ GXX=
+fi
+ac_test_CXXFLAGS=${CXXFLAGS+set}
+ac_save_CXXFLAGS=$CXXFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5
+$as_echo_n "checking whether $CXX accepts -g... " >&6; }
+if ${ac_cv_prog_cxx_g+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_cxx_werror_flag=$ac_cxx_werror_flag
+ ac_cxx_werror_flag=yes
+ ac_cv_prog_cxx_g=no
+ CXXFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_prog_cxx_g=yes
+else
+ CXXFLAGS=""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+else
+ ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+ CXXFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_prog_cxx_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5
+$as_echo "$ac_cv_prog_cxx_g" >&6; }
+if test "$ac_test_CXXFLAGS" = set; then
+ CXXFLAGS=$ac_save_CXXFLAGS
+elif test $ac_cv_prog_cxx_g = yes; then
+ if test "$GXX" = yes; then
+ CXXFLAGS="-g -O2"
+ else
+ CXXFLAGS="-g"
+ fi
+else
+ if test "$GXX" = yes; then
+ CXXFLAGS="-O2"
+ else
+ CXXFLAGS=
+ fi
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+depcc="$CXX" am_compiler_list=
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if ${am_cv_CXX_dependencies_compiler_type+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named 'D' -- because '-MD' means "put the output
+ # in D".
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_CXX_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+ fi
+ am__universal=false
+ case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+ # Solaris 10 /bin/sh.
+ echo '/* dummy */' > sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with '-c' and '-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle '-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs.
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # After this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested.
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+ # This compiler won't grok '-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_CXX_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_CXX_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; }
+CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type
+
+ if
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then
+ am__fastdepCXX_TRUE=
+ am__fastdepCXX_FALSE='#'
+else
+ am__fastdepCXX_TRUE='#'
+ am__fastdepCXX_FALSE=
+fi
+
+
+
+
+
+
+
+
+
+
+if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args.
+set dummy ${ac_tool_prefix}pkg-config; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_PKG_CONFIG+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $PKG_CONFIG in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+PKG_CONFIG=$ac_cv_path_PKG_CONFIG
+if test -n "$PKG_CONFIG"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5
+$as_echo "$PKG_CONFIG" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_path_PKG_CONFIG"; then
+ ac_pt_PKG_CONFIG=$PKG_CONFIG
+ # Extract the first word of "pkg-config", so it can be a program name with args.
+set dummy pkg-config; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_ac_pt_PKG_CONFIG+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $ac_pt_PKG_CONFIG in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG
+if test -n "$ac_pt_PKG_CONFIG"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_PKG_CONFIG" >&5
+$as_echo "$ac_pt_PKG_CONFIG" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_pt_PKG_CONFIG" = x; then
+ PKG_CONFIG=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ PKG_CONFIG=$ac_pt_PKG_CONFIG
+ fi
+else
+ PKG_CONFIG="$ac_cv_path_PKG_CONFIG"
+fi
+
+fi
+if test -n "$PKG_CONFIG"; then
+ _pkg_min_version=0.9.0
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking pkg-config is at least version $_pkg_min_version" >&5
+$as_echo_n "checking pkg-config is at least version $_pkg_min_version... " >&6; }
+ if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ PKG_CONFIG=""
+ fi
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5
+$as_echo_n "checking how to run the C preprocessor... " >&6; }
+# On Suns, sometimes $CPP names a directory.
+if test -n "$CPP" && test -d "$CPP"; then
+ CPP=
+fi
+if test -z "$CPP"; then
+ if ${ac_cv_prog_CPP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ # Double quotes because CPP needs to be expanded
+ for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
+ do
+ ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+
+else
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+ # Broken: success on invalid input.
+continue
+else
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+ break
+fi
+
+ done
+ ac_cv_prog_CPP=$CPP
+
+fi
+ CPP=$ac_cv_prog_CPP
+else
+ ac_cv_prog_CPP=$CPP
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5
+$as_echo "$CPP" >&6; }
+ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+
+else
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+ # Broken: success on invalid input.
+continue
+else
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
+$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
+if ${ac_cv_path_GREP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$GREP"; then
+ ac_path_GREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in grep ggrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
+ as_fn_executable_p "$ac_path_GREP" || continue
+# Check for GNU ac_path_GREP and select it if it is found.
+ # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'GREP' >> "conftest.nl"
+ "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_GREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_GREP="$ac_path_GREP"
+ ac_path_GREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_GREP_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_GREP"; then
+ as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ fi
+else
+ ac_cv_path_GREP=$GREP
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
+$as_echo "$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
+$as_echo_n "checking for egrep... " >&6; }
+if ${ac_cv_path_EGREP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+ then ac_cv_path_EGREP="$GREP -E"
+ else
+ if test -z "$EGREP"; then
+ ac_path_EGREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in egrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
+ as_fn_executable_p "$ac_path_EGREP" || continue
+# Check for GNU ac_path_EGREP and select it if it is found.
+ # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'EGREP' >> "conftest.nl"
+ "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_EGREP="$ac_path_EGREP"
+ ac_path_EGREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_EGREP_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_EGREP"; then
+ as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ fi
+else
+ ac_cv_path_EGREP=$EGREP
+fi
+
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
+$as_echo "$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
+$as_echo_n "checking for ANSI C header files... " >&6; }
+if ${ac_cv_header_stdc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_header_stdc=yes
+else
+ ac_cv_header_stdc=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+ # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "memchr" >/dev/null 2>&1; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "free" >/dev/null 2>&1; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+ if test "$cross_compiling" = yes; then :
+ :
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ctype.h>
+#include <stdlib.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+ (('a' <= (c) && (c) <= 'i') \
+ || ('j' <= (c) && (c) <= 'r') \
+ || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ if (XOR (islower (i), ISLOWER (i))
+ || toupper (i) != TOUPPER (i))
+ return 2;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5
+$as_echo "$ac_cv_header_stdc" >&6; }
+if test $ac_cv_header_stdc = yes; then
+
+$as_echo "#define STDC_HEADERS 1" >>confdefs.h
+
+fi
+
+# On IRIX 5.3, sys/types and inttypes.h are conflicting.
+for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
+ inttypes.h stdint.h unistd.h
+do :
+ as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
+"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+
+ ac_fn_c_check_header_mongrel "$LINENO" "minix/config.h" "ac_cv_header_minix_config_h" "$ac_includes_default"
+if test "x$ac_cv_header_minix_config_h" = xyes; then :
+ MINIX=yes
+else
+ MINIX=
+fi
+
+
+ if test "$MINIX" = yes; then
+
+$as_echo "#define _POSIX_SOURCE 1" >>confdefs.h
+
+
+$as_echo "#define _POSIX_1_SOURCE 2" >>confdefs.h
+
+
+$as_echo "#define _MINIX 1" >>confdefs.h
+
+ fi
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5
+$as_echo_n "checking whether it is safe to define __EXTENSIONS__... " >&6; }
+if ${ac_cv_safe_to_define___extensions__+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+# define __EXTENSIONS__ 1
+ $ac_includes_default
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_safe_to_define___extensions__=yes
+else
+ ac_cv_safe_to_define___extensions__=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_safe_to_define___extensions__" >&5
+$as_echo "$ac_cv_safe_to_define___extensions__" >&6; }
+ test $ac_cv_safe_to_define___extensions__ = yes &&
+ $as_echo "#define __EXTENSIONS__ 1" >>confdefs.h
+
+ $as_echo "#define _ALL_SOURCE 1" >>confdefs.h
+
+ $as_echo "#define _GNU_SOURCE 1" >>confdefs.h
+
+ $as_echo "#define _POSIX_PTHREAD_SEMANTICS 1" >>confdefs.h
+
+ $as_echo "#define _TANDEM_SOURCE 1" >>confdefs.h
+
+
+
+
+# -----------------------------------------------------------------------------
+# configurable options
+
+# Check whether --enable-plugin-nfacct was given.
+if test "${enable_plugin_nfacct+set}" = set; then :
+ enableval=$enable_plugin_nfacct;
+else
+ enable_plugin_nfacct="detect"
+
+fi
+
+# Check whether --enable-plugin-freeipmi was given.
+if test "${enable_plugin_freeipmi+set}" = set; then :
+ enableval=$enable_plugin_freeipmi;
+else
+ enable_plugin_freeipmi="detect"
+
+fi
+
+# Check whether --enable-plugin-cups was given.
+if test "${enable_plugin_cups+set}" = set; then :
+ enableval=$enable_plugin_cups;
+else
+ enable_plugin_cups="detect"
+
+fi
+
+# Check whether --enable-plugin-xenstat was given.
+if test "${enable_plugin_xenstat+set}" = set; then :
+ enableval=$enable_plugin_xenstat;
+else
+ enable_plugin_xenstat="detect"
+
+fi
+
+# Check whether --enable-backend-kinesis was given.
+if test "${enable_backend_kinesis+set}" = set; then :
+ enableval=$enable_backend_kinesis;
+else
+ enable_backend_kinesis="detect"
+
+fi
+
+# Check whether --enable-backend-prometheus-remote-write was given.
+if test "${enable_backend_prometheus_remote_write+set}" = set; then :
+ enableval=$enable_backend_prometheus_remote_write;
+else
+ enable_backend_prometheus_remote_write="detect"
+
+fi
+
+# Check whether --enable-backend-mongodb was given.
+if test "${enable_backend_mongodb+set}" = set; then :
+ enableval=$enable_backend_mongodb;
+else
+ enable_backend_mongodb="detect"
+
+fi
+
+# Check whether --enable-pedantic was given.
+if test "${enable_pedantic+set}" = set; then :
+ enableval=$enable_pedantic;
+else
+ enable_pedantic="no"
+
+fi
+
+# Check whether --enable-accept4 was given.
+if test "${enable_accept4+set}" = set; then :
+ enableval=$enable_accept4;
+else
+ enable_accept4="detect"
+
+fi
+
+
+# Check whether --with-webdir was given.
+if test "${with_webdir+set}" = set; then :
+ withval=$with_webdir; webdir="${withval}"
+else
+ webdir="\$(pkgdatadir)/web"
+
+fi
+
+
+# Check whether --with-libcap was given.
+if test "${with_libcap+set}" = set; then :
+ withval=$with_libcap;
+else
+ with_libcap="detect"
+
+fi
+
+
+# Check whether --with-zlib was given.
+if test "${with_zlib+set}" = set; then :
+ withval=$with_zlib;
+else
+ with_zlib="yes"
+
+fi
+
+
+# Check whether --with-math was given.
+if test "${with_math+set}" = set; then :
+ withval=$with_math;
+else
+ with_math="yes"
+
+fi
+
+
+# Check whether --with-user was given.
+if test "${with_user+set}" = set; then :
+ withval=$with_user;
+else
+ with_user="nobody"
+
+fi
+
+# Check whether --enable-x86-sse was given.
+if test "${enable_x86_sse+set}" = set; then :
+ enableval=$enable_x86_sse;
+else
+ enable_x86_sse="yes"
+
+fi
+
+# Check whether --enable-lto was given.
+if test "${enable_lto+set}" = set; then :
+ enableval=$enable_lto;
+else
+ enable_lto="detect"
+
+fi
+
+# Check whether --enable-https was given.
+if test "${enable_https+set}" = set; then :
+ enableval=$enable_https;
+else
+ enable_https="detect"
+
+fi
+
+# Check whether --enable-dbengine was given.
+if test "${enable_dbengine+set}" = set; then :
+ enableval=$enable_dbengine;
+else
+ enable_dbengine="detect"
+
+fi
+
+# Check whether --enable-jsonc was given.
+if test "${enable_jsonc+set}" = set; then :
+ enableval=$enable_jsonc;
+else
+ enable_jsonc="detect"
+
+fi
+
+
+# -----------------------------------------------------------------------------
+# netdata required checks
+
+# fails on centos6
+#AX_CHECK_ENABLE_DEBUG()
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((returns_nonnull))" >&5
+$as_echo_n "checking for __attribute__((returns_nonnull))... " >&6; }
+if ${ax_cv_have_func_attribute_returns_nonnull+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+ void *foo( void ) __attribute__((returns_nonnull));
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ if test -s conftest.err; then :
+ ax_cv_have_func_attribute_returns_nonnull=no
+else
+ ax_cv_have_func_attribute_returns_nonnull=yes
+fi
+else
+ ax_cv_have_func_attribute_returns_nonnull=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_have_func_attribute_returns_nonnull" >&5
+$as_echo "$ax_cv_have_func_attribute_returns_nonnull" >&6; }
+
+ if test yes = $ax_cv_have_func_attribute_returns_nonnull; then :
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL 1
+_ACEOF
+
+fi
+
+
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((malloc))" >&5
+$as_echo_n "checking for __attribute__((malloc))... " >&6; }
+if ${ax_cv_have_func_attribute_malloc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+ void *foo( void ) __attribute__((malloc));
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ if test -s conftest.err; then :
+ ax_cv_have_func_attribute_malloc=no
+else
+ ax_cv_have_func_attribute_malloc=yes
+fi
+else
+ ax_cv_have_func_attribute_malloc=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_have_func_attribute_malloc" >&5
+$as_echo "$ax_cv_have_func_attribute_malloc" >&6; }
+
+ if test yes = $ax_cv_have_func_attribute_malloc; then :
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_FUNC_ATTRIBUTE_MALLOC 1
+_ACEOF
+
+fi
+
+
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((noreturn))" >&5
+$as_echo_n "checking for __attribute__((noreturn))... " >&6; }
+if ${ax_cv_have_func_attribute_noreturn+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+ void foo( void ) __attribute__((noreturn));
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ if test -s conftest.err; then :
+ ax_cv_have_func_attribute_noreturn=no
+else
+ ax_cv_have_func_attribute_noreturn=yes
+fi
+else
+ ax_cv_have_func_attribute_noreturn=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_have_func_attribute_noreturn" >&5
+$as_echo "$ax_cv_have_func_attribute_noreturn" >&6; }
+
+ if test yes = $ax_cv_have_func_attribute_noreturn; then :
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_FUNC_ATTRIBUTE_NORETURN 1
+_ACEOF
+
+fi
+
+
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((noinline))" >&5
+$as_echo_n "checking for __attribute__((noinline))... " >&6; }
+if ${ax_cv_have_func_attribute_noinline+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+ __attribute__((noinline)) int foo( void ) { return 0; }
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ if test -s conftest.err; then :
+ ax_cv_have_func_attribute_noinline=no
+else
+ ax_cv_have_func_attribute_noinline=yes
+fi
+else
+ ax_cv_have_func_attribute_noinline=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_have_func_attribute_noinline" >&5
+$as_echo "$ax_cv_have_func_attribute_noinline" >&6; }
+
+ if test yes = $ax_cv_have_func_attribute_noinline; then :
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_FUNC_ATTRIBUTE_NOINLINE 1
+_ACEOF
+
+fi
+
+
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((format))" >&5
+$as_echo_n "checking for __attribute__((format))... " >&6; }
+if ${ax_cv_have_func_attribute_format+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+ int foo(const char *p, ...) __attribute__((format(printf, 1, 2)));
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ if test -s conftest.err; then :
+ ax_cv_have_func_attribute_format=no
+else
+ ax_cv_have_func_attribute_format=yes
+fi
+else
+ ax_cv_have_func_attribute_format=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_have_func_attribute_format" >&5
+$as_echo "$ax_cv_have_func_attribute_format" >&6; }
+
+ if test yes = $ax_cv_have_func_attribute_format; then :
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_FUNC_ATTRIBUTE_FORMAT 1
+_ACEOF
+
+fi
+
+
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((warn_unused_result))" >&5
+$as_echo_n "checking for __attribute__((warn_unused_result))... " >&6; }
+if ${ax_cv_have_func_attribute_warn_unused_result+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+ int foo( void ) __attribute__((warn_unused_result));
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ if test -s conftest.err; then :
+ ax_cv_have_func_attribute_warn_unused_result=no
+else
+ ax_cv_have_func_attribute_warn_unused_result=yes
+fi
+else
+ ax_cv_have_func_attribute_warn_unused_result=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_have_func_attribute_warn_unused_result" >&5
+$as_echo "$ax_cv_have_func_attribute_warn_unused_result" >&6; }
+
+ if test yes = $ax_cv_have_func_attribute_warn_unused_result; then :
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT 1
+_ACEOF
+
+fi
+
+
+
+
+ac_fn_c_check_type "$LINENO" "struct timespec" "ac_cv_type_struct_timespec" "#include <time.h>
+"
+if test "x$ac_cv_type_struct_timespec" = xyes; then :
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_STRUCT_TIMESPEC 1
+_ACEOF
+
+
+fi
+ac_fn_c_check_type "$LINENO" "clockid_t" "ac_cv_type_clockid_t" "#include <time.h>
+"
+if test "x$ac_cv_type_clockid_t" = xyes; then :
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_CLOCKID_T 1
+_ACEOF
+
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5
+$as_echo_n "checking for library containing clock_gettime... " >&6; }
+if ${ac_cv_search_clock_gettime+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char clock_gettime ();
+int
+main ()
+{
+return clock_gettime ();
+ ;
+ return 0;
+}
+_ACEOF
+for ac_lib in '' rt posix4; do
+ if test -z "$ac_lib"; then
+ ac_res="none required"
+ else
+ ac_res=-l$ac_lib
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ fi
+ if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_search_clock_gettime=$ac_res
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext
+ if ${ac_cv_search_clock_gettime+:} false; then :
+ break
+fi
+done
+if ${ac_cv_search_clock_gettime+:} false; then :
+
+else
+ ac_cv_search_clock_gettime=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5
+$as_echo "$ac_cv_search_clock_gettime" >&6; }
+ac_res=$ac_cv_search_clock_gettime
+if test "$ac_res" != no; then :
+ test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+
+fi
+
+for ac_func in clock_gettime
+do :
+ ac_fn_c_check_func "$LINENO" "clock_gettime" "ac_cv_func_clock_gettime"
+if test "x$ac_cv_func_clock_gettime" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_CLOCK_GETTIME 1
+_ACEOF
+
+fi
+done
+
+for ac_func in sched_setscheduler sched_getscheduler sched_getparam sched_get_priority_min sched_get_priority_max getpriority setpriority nice
+do :
+ as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
+ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+done
+
+for ac_func in recvmmsg
+do :
+ ac_fn_c_check_func "$LINENO" "recvmmsg" "ac_cv_func_recvmmsg"
+if test "x$ac_cv_func_recvmmsg" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_RECVMMSG 1
+_ACEOF
+
+fi
+done
+
+
+ac_fn_c_find_intX_t "$LINENO" "8" "ac_cv_c_int8_t"
+case $ac_cv_c_int8_t in #(
+ no|yes) ;; #(
+ *)
+
+cat >>confdefs.h <<_ACEOF
+#define int8_t $ac_cv_c_int8_t
+_ACEOF
+;;
+esac
+
+ac_fn_c_find_intX_t "$LINENO" "16" "ac_cv_c_int16_t"
+case $ac_cv_c_int16_t in #(
+ no|yes) ;; #(
+ *)
+
+cat >>confdefs.h <<_ACEOF
+#define int16_t $ac_cv_c_int16_t
+_ACEOF
+;;
+esac
+
+ac_fn_c_find_intX_t "$LINENO" "32" "ac_cv_c_int32_t"
+case $ac_cv_c_int32_t in #(
+ no|yes) ;; #(
+ *)
+
+cat >>confdefs.h <<_ACEOF
+#define int32_t $ac_cv_c_int32_t
+_ACEOF
+;;
+esac
+
+ac_fn_c_find_intX_t "$LINENO" "64" "ac_cv_c_int64_t"
+case $ac_cv_c_int64_t in #(
+ no|yes) ;; #(
+ *)
+
+cat >>confdefs.h <<_ACEOF
+#define int64_t $ac_cv_c_int64_t
+_ACEOF
+;;
+esac
+
+ac_fn_c_find_uintX_t "$LINENO" "8" "ac_cv_c_uint8_t"
+case $ac_cv_c_uint8_t in #(
+ no|yes) ;; #(
+ *)
+
+$as_echo "#define _UINT8_T 1" >>confdefs.h
+
+
+cat >>confdefs.h <<_ACEOF
+#define uint8_t $ac_cv_c_uint8_t
+_ACEOF
+;;
+ esac
+
+ac_fn_c_find_uintX_t "$LINENO" "16" "ac_cv_c_uint16_t"
+case $ac_cv_c_uint16_t in #(
+ no|yes) ;; #(
+ *)
+
+
+cat >>confdefs.h <<_ACEOF
+#define uint16_t $ac_cv_c_uint16_t
+_ACEOF
+;;
+ esac
+
+ac_fn_c_find_uintX_t "$LINENO" "32" "ac_cv_c_uint32_t"
+case $ac_cv_c_uint32_t in #(
+ no|yes) ;; #(
+ *)
+
+$as_echo "#define _UINT32_T 1" >>confdefs.h
+
+
+cat >>confdefs.h <<_ACEOF
+#define uint32_t $ac_cv_c_uint32_t
+_ACEOF
+;;
+ esac
+
+ac_fn_c_find_uintX_t "$LINENO" "64" "ac_cv_c_uint64_t"
+case $ac_cv_c_uint64_t in #(
+ no|yes) ;; #(
+ *)
+
+$as_echo "#define _UINT64_T 1" >>confdefs.h
+
+
+cat >>confdefs.h <<_ACEOF
+#define uint64_t $ac_cv_c_uint64_t
+_ACEOF
+;;
+ esac
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for inline" >&5
+$as_echo_n "checking for inline... " >&6; }
+if ${ac_cv_c_inline+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_c_inline=no
+for ac_kw in inline __inline__ __inline; do
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifndef __cplusplus
+typedef int foo_t;
+static $ac_kw foo_t static_foo () {return 0; }
+$ac_kw foo_t foo () {return 0; }
+#endif
+
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_c_inline=$ac_kw
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ test "$ac_cv_c_inline" != no && break
+done
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_inline" >&5
+$as_echo "$ac_cv_c_inline" >&6; }
+
+case $ac_cv_c_inline in
+ inline | yes) ;;
+ *)
+ case $ac_cv_c_inline in
+ no) ac_val=;;
+ *) ac_val=$ac_cv_c_inline;;
+ esac
+ cat >>confdefs.h <<_ACEOF
+#ifndef __cplusplus
+#define inline $ac_val
+#endif
+_ACEOF
+ ;;
+esac
+
+ac_fn_c_check_decl "$LINENO" "strerror_r" "ac_cv_have_decl_strerror_r" "$ac_includes_default"
+if test "x$ac_cv_have_decl_strerror_r" = xyes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_STRERROR_R $ac_have_decl
+_ACEOF
+
+for ac_func in strerror_r
+do :
+ ac_fn_c_check_func "$LINENO" "strerror_r" "ac_cv_func_strerror_r"
+if test "x$ac_cv_func_strerror_r" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_STRERROR_R 1
+_ACEOF
+
+fi
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether strerror_r returns char *" >&5
+$as_echo_n "checking whether strerror_r returns char *... " >&6; }
+if ${ac_cv_func_strerror_r_char_p+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ ac_cv_func_strerror_r_char_p=no
+ if test $ac_cv_have_decl_strerror_r = yes; then
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+
+ char buf[100];
+ char x = *strerror_r (0, buf, sizeof buf);
+ char *p = strerror_r (0, buf, sizeof buf);
+ return !p || x;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_func_strerror_r_char_p=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ else
+ # strerror_r is not declared. Choose between
+ # systems that have relatively inaccessible declarations for the
+ # function. BeOS and DEC UNIX 4.0 fall in this category, but the
+ # former has a strerror_r that returns char*, while the latter
+ # has a strerror_r that returns `int'.
+ # This test should segfault on the DEC system.
+ if test "$cross_compiling" = yes; then :
+ :
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$ac_includes_default
+ extern char *strerror_r ();
+int
+main ()
+{
+char buf[100];
+ char x = *strerror_r (0, buf, sizeof buf);
+ return ! isalpha (x);
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ ac_cv_func_strerror_r_char_p=yes
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+ fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_strerror_r_char_p" >&5
+$as_echo "$ac_cv_func_strerror_r_char_p" >&6; }
+if test $ac_cv_func_strerror_r_char_p = yes; then
+
+$as_echo "#define STRERROR_R_CHAR_P 1" >>confdefs.h
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for _Generic" >&5
+$as_echo_n "checking for _Generic... " >&6; }
+if ${ac_cv_c__Generic+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+int
+ main (int argc, char **argv)
+ {
+ int a = _Generic (argc, int: argc = 1);
+ int *b = &_Generic (argc, default: argc);
+ char ***c = _Generic (argv, int: argc, default: argv ? &argv : 0);
+ _Generic (1 ? 0 : b, int: a, default: b) = &argc;
+ _Generic (a = 1, default: a) = 3;
+ return a + !b + !c;
+ }
+
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_c__Generic=yes
+else
+ ac_cv_c__Generic=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c__Generic" >&5
+$as_echo "$ac_cv_c__Generic" >&6; }
+if test $ac_cv_c__Generic = yes; then
+
+$as_echo "#define HAVE_C__GENERIC 1" >>confdefs.h
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __atomic" >&5
+$as_echo_n "checking for __atomic... " >&6; }
+if ${ac_cv_c___atomic+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+int
+ main (int argc, char **argv)
+ {
+ volatile unsigned long ul1 = 1, ul2 = 0, ul3 = 2;
+ __atomic_load_n(&ul1, __ATOMIC_SEQ_CST);
+ __atomic_compare_exchange(&ul1, &ul2, &ul3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&ul1, 1, __ATOMIC_SEQ_CST);
+ __atomic_fetch_sub(&ul3, 1, __ATOMIC_SEQ_CST);
+ __atomic_or_fetch(&ul1, ul2, __ATOMIC_SEQ_CST);
+ __atomic_and_fetch(&ul1, ul2, __ATOMIC_SEQ_CST);
+ volatile unsigned long long ull1 = 1, ull2 = 0, ull3 = 2;
+ __atomic_load_n(&ull1, __ATOMIC_SEQ_CST);
+ __atomic_compare_exchange(&ull1, &ull2, &ull3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&ull1, 1, __ATOMIC_SEQ_CST);
+ __atomic_fetch_sub(&ull3, 1, __ATOMIC_SEQ_CST);
+ __atomic_or_fetch(&ull1, ull2, __ATOMIC_SEQ_CST);
+ __atomic_and_fetch(&ull1, ull2, __ATOMIC_SEQ_CST);
+ return 0;
+ }
+
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_c___atomic=yes
+else
+ ac_cv_c___atomic=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c___atomic" >&5
+$as_echo "$ac_cv_c___atomic" >&6; }
+if test $ac_cv_c___atomic = yes; then
+
+$as_echo "#define HAVE_C___ATOMIC 1" >>confdefs.h
+
+fi
+
+# AC_C_STMT_EXPR
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of void *" >&5
+$as_echo_n "checking size of void *... " >&6; }
+if ${ac_cv_sizeof_void_p+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (void *))" "ac_cv_sizeof_void_p" "$ac_includes_default"; then :
+
+else
+ if test "$ac_cv_type_void_p" = yes; then
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (void *)
+See \`config.log' for more details" "$LINENO" 5; }
+ else
+ ac_cv_sizeof_void_p=0
+ fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_void_p" >&5
+$as_echo "$ac_cv_sizeof_void_p" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_VOID_P $ac_cv_sizeof_void_p
+_ACEOF
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether sys/types.h defines makedev" >&5
+$as_echo_n "checking whether sys/types.h defines makedev... " >&6; }
+if ${ac_cv_header_sys_types_h_makedev+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <sys/types.h>
+int
+main ()
+{
+return makedev(0, 0);
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_header_sys_types_h_makedev=yes
+else
+ ac_cv_header_sys_types_h_makedev=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_sys_types_h_makedev" >&5
+$as_echo "$ac_cv_header_sys_types_h_makedev" >&6; }
+
+if test $ac_cv_header_sys_types_h_makedev = no; then
+ac_fn_c_check_header_mongrel "$LINENO" "sys/mkdev.h" "ac_cv_header_sys_mkdev_h" "$ac_includes_default"
+if test "x$ac_cv_header_sys_mkdev_h" = xyes; then :
+
+$as_echo "#define MAJOR_IN_MKDEV 1" >>confdefs.h
+
+fi
+
+
+
+ if test $ac_cv_header_sys_mkdev_h = no; then
+ ac_fn_c_check_header_mongrel "$LINENO" "sys/sysmacros.h" "ac_cv_header_sys_sysmacros_h" "$ac_includes_default"
+if test "x$ac_cv_header_sys_sysmacros_h" = xyes; then :
+
+$as_echo "#define MAJOR_IN_SYSMACROS 1" >>confdefs.h
+
+fi
+
+
+ fi
+fi
+
+for ac_header in sys/types.h netinet/in.h arpa/nameser.h netdb.h resolv.h
+do :
+ as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "#ifdef HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#ifdef HAVE_NETINET_IN_H
+# include <netinet/in.h> /* inet_ functions / structs */
+#endif
+#ifdef HAVE_ARPA_NAMESER_H
+# include <arpa/nameser.h> /* DNS HEADER struct */
+#endif
+#ifdef HAVE_NETDB_H
+# include <netdb.h>
+#endif
+"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+
+
+
+
+ for ac_header in $ac_header_list
+do :
+ as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
+"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+if test "${enable_accept4}" != "no"; then
+
+
+
+ for ac_func in $ac_func_list
+do :
+ as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
+ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+done
+
+
+
+
+fi
+
+# -----------------------------------------------------------------------------
+# operating system detection
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking operating system" >&5
+$as_echo_n "checking operating system... " >&6; }
+case "$host_os" in
+freebsd*)
+ build_target=freebsd
+ build_target_id=2
+ CFLAGS="${CFLAGS} -I/usr/local/include -L/usr/local/lib"
+ ;;
+darwin*)
+ build_target=macos
+ build_target_id=3
+ LDFLAGS="${LDFLAGS} -framework CoreFoundation -framework IOKit"
+ ;;
+*)
+ build_target=linux
+ build_target_id=1
+ ;;
+esac
+
+ if test "${build_target}" = "freebsd"; then
+ FREEBSD_TRUE=
+ FREEBSD_FALSE='#'
+else
+ FREEBSD_TRUE='#'
+ FREEBSD_FALSE=
+fi
+
+ if test "${build_target}" = "macos"; then
+ MACOS_TRUE=
+ MACOS_FALSE='#'
+else
+ MACOS_TRUE='#'
+ MACOS_FALSE=
+fi
+
+ if test "${build_target}" = "linux"; then
+ LINUX_TRUE=
+ LINUX_FALSE='#'
+else
+ LINUX_TRUE='#'
+ LINUX_FALSE=
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${build_target} with id ${build_target_id}" >&5
+$as_echo "${build_target} with id ${build_target_id}" >&6; }
+
+
+# -----------------------------------------------------------------------------
+# pthreads
+
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ax_pthread_ok=no
+
+# We used to check for pthread.h first, but this fails if pthread.h
+# requires special compiler flags (e.g. on True64 or Sequent).
+# It gets checked for in the link test anyway.
+
+# First of all, check if the user has set any of the PTHREAD_LIBS,
+# etcetera environment variables, and if threads linking works using
+# them:
+if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ save_LIBS="$LIBS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS" >&5
+$as_echo_n "checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS... " >&6; }
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char pthread_join ();
+int
+main ()
+{
+return pthread_join ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ax_pthread_ok=yes
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5
+$as_echo "$ax_pthread_ok" >&6; }
+ if test x"$ax_pthread_ok" = xno; then
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+ fi
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+fi
+
+# We must check for the threads library under a number of different
+# names; the ordering is very important because some systems
+# (e.g. DEC) have both -lpthread and -lpthreads, where one of the
+# libraries is broken (non-POSIX).
+
+# Create a list of thread flags to try. Items starting with a "-" are
+# C compiler flags, and other items are library names, except for "none"
+# which indicates that we try without any flags at all, and "pthread-config"
+# which is a program returning the flags for the Pth emulation library.
+
+ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config"
+
+# The ordering *is* (sometimes) important. Some notes on the
+# individual items follow:
+
+# pthreads: AIX (must check this before -lpthread)
+# none: in case threads are in libc; should be tried before -Kthread and
+# other compiler flags to prevent continual compiler warnings
+# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h)
+# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
+# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
+# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads)
+# -pthreads: Solaris/gcc
+# -mthreads: Mingw32/gcc, Lynx/gcc
+# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it
+# doesn't hurt to check since this sometimes defines pthreads too;
+# also defines -D_REENTRANT)
+# ... -mt is also the pthreads flag for HP/aCC
+# pthread: Linux, etcetera
+# --thread-safe: KAI C++
+# pthread-config: use pthread-config program (for GNU Pth library)
+
+case ${host_os} in
+ solaris*)
+
+ # On Solaris (at least, for some versions), libc contains stubbed
+ # (non-functional) versions of the pthreads routines, so link-based
+ # tests will erroneously succeed. (We need to link with -pthreads/-mt/
+ # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather
+ # a function called by this macro, so we could check for that, but
+ # who knows whether they'll stub that too in a future libc.) So,
+ # we'll just look for -pthreads and -lpthread first:
+
+ ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags"
+ ;;
+
+ darwin*)
+ ax_pthread_flags="-pthread $ax_pthread_flags"
+ ;;
+esac
+
+# Clang doesn't consider unrecognized options an error unless we specify
+# -Werror. We throw in some extra Clang-specific options to ensure that
+# this doesn't happen for GCC, which also accepts -Werror.
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler needs -Werror to reject unknown flags" >&5
+$as_echo_n "checking if compiler needs -Werror to reject unknown flags... " >&6; }
+save_CFLAGS="$CFLAGS"
+ax_pthread_extra_flags="-Werror"
+CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wunknown-warning-option -Wsizeof-array-argument"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+int foo(void);
+int
+main ()
+{
+foo()
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ ax_pthread_extra_flags=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+CFLAGS="$save_CFLAGS"
+
+if test x"$ax_pthread_ok" = xno; then
+for flag in $ax_pthread_flags; do
+
+ case $flag in
+ none)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work without any flags" >&5
+$as_echo_n "checking whether pthreads work without any flags... " >&6; }
+ ;;
+
+ -*)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work with $flag" >&5
+$as_echo_n "checking whether pthreads work with $flag... " >&6; }
+ PTHREAD_CFLAGS="$flag"
+ ;;
+
+ pthread-config)
+ # Extract the first word of "pthread-config", so it can be a program name with args.
+set dummy pthread-config; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ax_pthread_config+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ax_pthread_config"; then
+ ac_cv_prog_ax_pthread_config="$ax_pthread_config" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ax_pthread_config="yes"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ test -z "$ac_cv_prog_ax_pthread_config" && ac_cv_prog_ax_pthread_config="no"
+fi
+fi
+ax_pthread_config=$ac_cv_prog_ax_pthread_config
+if test -n "$ax_pthread_config"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_config" >&5
+$as_echo "$ax_pthread_config" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ if test x"$ax_pthread_config" = xno; then continue; fi
+ PTHREAD_CFLAGS="`pthread-config --cflags`"
+ PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
+ ;;
+
+ *)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for the pthreads library -l$flag" >&5
+$as_echo_n "checking for the pthreads library -l$flag... " >&6; }
+ PTHREAD_LIBS="-l$flag"
+ ;;
+ esac
+
+ save_LIBS="$LIBS"
+ save_CFLAGS="$CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags"
+
+ # Check for various functions. We must include pthread.h,
+ # since some functions may be macros. (On the Sequent, we
+ # need a special flag -Kthread to make this header compile.)
+ # We check for pthread_join because it is in -lpthread on IRIX
+ # while pthread_create is in libc. We check for pthread_attr_init
+ # due to DEC craziness with -lpthreads. We check for
+ # pthread_cleanup_push because it is one of the few pthread
+ # functions on Solaris that doesn't have a non-functional libc stub.
+ # We try pthread_create on general principles.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <pthread.h>
+ static void routine(void *a) { a = 0; }
+ static void *start_routine(void *a) { return a; }
+int
+main ()
+{
+pthread_t th; pthread_attr_t attr;
+ pthread_create(&th, 0, start_routine, 0);
+ pthread_join(th, 0);
+ pthread_attr_init(&attr);
+ pthread_cleanup_push(routine, 0);
+ pthread_cleanup_pop(0) /* ; */
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ax_pthread_ok=yes
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5
+$as_echo "$ax_pthread_ok" >&6; }
+ if test "x$ax_pthread_ok" = xyes; then
+ break;
+ fi
+
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+done
+fi
+
+# Various other checks:
+if test "x$ax_pthread_ok" = xyes; then
+ save_LIBS="$LIBS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+
+ # Detect AIX lossage: JOINABLE attribute is called UNDETACHED.
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for joinable pthread attribute" >&5
+$as_echo_n "checking for joinable pthread attribute... " >&6; }
+ attr_name=unknown
+ for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <pthread.h>
+int
+main ()
+{
+int attr = $attr; return attr /* ; */
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ attr_name=$attr; break
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ done
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $attr_name" >&5
+$as_echo "$attr_name" >&6; }
+ if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then
+
+cat >>confdefs.h <<_ACEOF
+#define PTHREAD_CREATE_JOINABLE $attr_name
+_ACEOF
+
+ fi
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if more special flags are required for pthreads" >&5
+$as_echo_n "checking if more special flags are required for pthreads... " >&6; }
+ flag=no
+ case ${host_os} in
+ aix* | freebsd* | darwin*) flag="-D_THREAD_SAFE";;
+ osf* | hpux*) flag="-D_REENTRANT";;
+ solaris*)
+ if test "$GCC" = "yes"; then
+ flag="-D_REENTRANT"
+ else
+ # TODO: What about Clang on Solaris?
+ flag="-mt -D_REENTRANT"
+ fi
+ ;;
+ esac
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $flag" >&5
+$as_echo "$flag" >&6; }
+ if test "x$flag" != xno; then
+ PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS"
+ fi
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PTHREAD_PRIO_INHERIT" >&5
+$as_echo_n "checking for PTHREAD_PRIO_INHERIT... " >&6; }
+if ${ax_cv_PTHREAD_PRIO_INHERIT+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <pthread.h>
+int
+main ()
+{
+int i = PTHREAD_PRIO_INHERIT;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ax_cv_PTHREAD_PRIO_INHERIT=yes
+else
+ ax_cv_PTHREAD_PRIO_INHERIT=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_PTHREAD_PRIO_INHERIT" >&5
+$as_echo "$ax_cv_PTHREAD_PRIO_INHERIT" >&6; }
+ if test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"; then :
+
+$as_echo "#define HAVE_PTHREAD_PRIO_INHERIT 1" >>confdefs.h
+
+fi
+
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+
+ # More AIX lossage: compile with *_r variant
+ if test "x$GCC" != xyes; then
+ case $host_os in
+ aix*)
+ case "x/$CC" in #(
+ x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6) :
+ #handle absolute path differently from PATH based program lookup
+ case "x$CC" in #(
+ x/*) :
+ if as_fn_executable_p ${CC}_r; then :
+ PTHREAD_CC="${CC}_r"
+fi ;; #(
+ *) :
+ for ac_prog in ${CC}_r
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_PTHREAD_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$PTHREAD_CC"; then
+ ac_cv_prog_PTHREAD_CC="$PTHREAD_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_PTHREAD_CC="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+PTHREAD_CC=$ac_cv_prog_PTHREAD_CC
+if test -n "$PTHREAD_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PTHREAD_CC" >&5
+$as_echo "$PTHREAD_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$PTHREAD_CC" && break
+done
+test -n "$PTHREAD_CC" || PTHREAD_CC="$CC"
+ ;;
+esac ;; #(
+ *) :
+ ;;
+esac
+ ;;
+ esac
+ fi
+fi
+
+test -n "$PTHREAD_CC" || PTHREAD_CC="$CC"
+
+
+
+
+
+# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
+if test x"$ax_pthread_ok" = xyes; then
+
+$as_echo "#define HAVE_PTHREAD 1" >>confdefs.h
+
+ :
+else
+ ax_pthread_ok=no
+ as_fn_error $? "Cannot initialize pthread environment" "$LINENO" 5
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+LIBS="${PTHREAD_LIBS} ${LIBS}"
+CFLAGS="${CFLAGS} ${PTHREAD_CFLAGS}"
+CC="${PTHREAD_CC}"
+
+
+# -----------------------------------------------------------------------------
+# libm
+
+
+
+if test -z "${MATH_LIBS}"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sin in -lm" >&5
+$as_echo_n "checking for sin in -lm... " >&6; }
+if ${ac_cv_lib_m_sin+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lm $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char sin ();
+int
+main ()
+{
+return sin ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_m_sin=yes
+else
+ ac_cv_lib_m_sin=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_m_sin" >&5
+$as_echo "$ac_cv_lib_m_sin" >&6; }
+if test "x$ac_cv_lib_m_sin" = xyes; then :
+ MATH_LIBS="-lm"
+
+fi
+
+fi
+test "${with_math}" = "yes" -a -z "${MATH_LIBS}" && as_fn_error $? "math required but not found" "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if libm should be used" >&5
+$as_echo_n "checking if libm should be used... " >&6; }
+if test "${with_math}" != "no" -a ! -z "${MATH_LIBS}"; then
+ with_math="yes"
+
+$as_echo "#define STORAGE_WITH_MATH 1" >>confdefs.h
+
+ OPTIONAL_MATH_CFLAGS="${MATH_CFLAGS}"
+ OPTIONAL_MATH_LIBS="${MATH_LIBS}"
+else
+ with_math="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_math}" >&5
+$as_echo "${with_math}" >&6; }
+
+
+# -----------------------------------------------------------------------------
+# libuv multi-platform support library with a focus on asynchronous I/O
+# TODO: check version, uv_fs_scandir_next only available in version >= 1.0
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for uv_fs_scandir_next in -luv" >&5
+$as_echo_n "checking for uv_fs_scandir_next in -luv... " >&6; }
+if ${ac_cv_lib_uv_uv_fs_scandir_next+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-luv $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char uv_fs_scandir_next ();
+int
+main ()
+{
+return uv_fs_scandir_next ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_uv_uv_fs_scandir_next=yes
+else
+ ac_cv_lib_uv_uv_fs_scandir_next=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_uv_uv_fs_scandir_next" >&5
+$as_echo "$ac_cv_lib_uv_uv_fs_scandir_next" >&6; }
+if test "x$ac_cv_lib_uv_uv_fs_scandir_next" = xyes; then :
+ UV_LIBS="-luv"
+
+fi
+
+
+OPTIONAL_UV_CFLAGS="${UV_CFLAGS}"
+OPTIONAL_UV_LIBS="${UV_LIBS}"
+
+
+# -----------------------------------------------------------------------------
+# lz4 Extremely Fast Compression algorithm
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for LZ4_compress_default in -llz4" >&5
+$as_echo_n "checking for LZ4_compress_default in -llz4... " >&6; }
+if ${ac_cv_lib_lz4_LZ4_compress_default+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-llz4 $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char LZ4_compress_default ();
+int
+main ()
+{
+return LZ4_compress_default ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_lz4_LZ4_compress_default=yes
+else
+ ac_cv_lib_lz4_LZ4_compress_default=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_lz4_LZ4_compress_default" >&5
+$as_echo "$ac_cv_lib_lz4_LZ4_compress_default" >&6; }
+if test "x$ac_cv_lib_lz4_LZ4_compress_default" = xyes; then :
+ LZ4_LIBS="-llz4"
+
+fi
+
+
+OPTIONAL_LZ4_CFLAGS="${LZ4_CFLAGS}"
+OPTIONAL_LZ4_LIBS="${LZ4_LIBS}"
+
+
+# -----------------------------------------------------------------------------
+# Judy General purpose dynamic array
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for JudyLIns in -lJudy" >&5
+$as_echo_n "checking for JudyLIns in -lJudy... " >&6; }
+if ${ac_cv_lib_Judy_JudyLIns+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lJudy $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char JudyLIns ();
+int
+main ()
+{
+return JudyLIns ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_Judy_JudyLIns=yes
+else
+ ac_cv_lib_Judy_JudyLIns=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_Judy_JudyLIns" >&5
+$as_echo "$ac_cv_lib_Judy_JudyLIns" >&6; }
+if test "x$ac_cv_lib_Judy_JudyLIns" = xyes; then :
+ JUDY_LIBS="-lJudy"
+
+fi
+
+
+OPTIONAL_JUDY_CFLAGS="${JUDY_CFLAGS}"
+OPTIONAL_JUDY_LIBS="${JUDY_LIBS}"
+
+
+# -----------------------------------------------------------------------------
+# zlib
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ZLIB" >&5
+$as_echo_n "checking for ZLIB... " >&6; }
+
+if test -n "$ZLIB_CFLAGS"; then
+ pkg_cv_ZLIB_CFLAGS="$ZLIB_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"zlib\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "zlib") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_ZLIB_CFLAGS=`$PKG_CONFIG --cflags "zlib" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$ZLIB_LIBS"; then
+ pkg_cv_ZLIB_LIBS="$ZLIB_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"zlib\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "zlib") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_ZLIB_LIBS=`$PKG_CONFIG --libs "zlib" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ ZLIB_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "zlib" 2>&1`
+ else
+ ZLIB_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "zlib" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$ZLIB_PKG_ERRORS" >&5
+
+ have_zlib=no
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ have_zlib=no
+
+else
+ ZLIB_CFLAGS=$pkg_cv_ZLIB_CFLAGS
+ ZLIB_LIBS=$pkg_cv_ZLIB_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ have_zlib=yes
+fi
+test "${with_zlib}" = "yes" -a "${have_zlib}" != "yes" && as_fn_error $? "zlib required but not found. Try installing 'zlib1g-dev' or 'zlib-devel'." "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if zlib should be used" >&5
+$as_echo_n "checking if zlib should be used... " >&6; }
+if test "${with_zlib}" != "no" -a "${have_zlib}" = "yes"; then
+ with_zlib="yes"
+
+$as_echo "#define NETDATA_WITH_ZLIB 1" >>confdefs.h
+
+ OPTIONAL_ZLIB_CFLAGS="${ZLIB_CFLAGS}"
+ OPTIONAL_ZLIB_LIBS="${ZLIB_LIBS}"
+else
+ with_zlib="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_zlib}" >&5
+$as_echo "${with_zlib}" >&6; }
+
+
+# -----------------------------------------------------------------------------
+# libuuid
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for UUID" >&5
+$as_echo_n "checking for UUID... " >&6; }
+
+if test -n "$UUID_CFLAGS"; then
+ pkg_cv_UUID_CFLAGS="$UUID_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"uuid\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "uuid") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_UUID_CFLAGS=`$PKG_CONFIG --cflags "uuid" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$UUID_LIBS"; then
+ pkg_cv_UUID_LIBS="$UUID_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"uuid\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "uuid") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_UUID_LIBS=`$PKG_CONFIG --libs "uuid" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ UUID_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "uuid" 2>&1`
+ else
+ UUID_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "uuid" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$UUID_PKG_ERRORS" >&5
+
+ as_fn_error $? "libuuid required but not found. Try installing 'uuid-dev' or 'libuuid-devel'." "$LINENO" 5
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ as_fn_error $? "libuuid required but not found. Try installing 'uuid-dev' or 'libuuid-devel'." "$LINENO" 5
+
+else
+ UUID_CFLAGS=$pkg_cv_UUID_CFLAGS
+ UUID_LIBS=$pkg_cv_UUID_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ have_uuid=yes
+fi
+
+$as_echo "#define NETDATA_WITH_UUID 1" >>confdefs.h
+
+OPTIONAL_UUID_CFLAGS="${UUID_CFLAGS}"
+OPTIONAL_UUID_LIBS="${UUID_LIBS}"
+
+
+# -----------------------------------------------------------------------------
+# OpenSSL Cryptography and SSL/TLS Toolkit
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for SHA256_Init in -lcrypto" >&5
+$as_echo_n "checking for SHA256_Init in -lcrypto... " >&6; }
+if ${ac_cv_lib_crypto_SHA256_Init+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lcrypto $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char SHA256_Init ();
+int
+main ()
+{
+return SHA256_Init ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_crypto_SHA256_Init=yes
+else
+ ac_cv_lib_crypto_SHA256_Init=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_crypto_SHA256_Init" >&5
+$as_echo "$ac_cv_lib_crypto_SHA256_Init" >&6; }
+if test "x$ac_cv_lib_crypto_SHA256_Init" = xyes; then :
+ SSL_LIBS="-lcrypto -lssl"
+
+fi
+
+
+OPTIONAL_SSL_CFLAGS="${SSL_CFLAGS}"
+OPTIONAL_SSL_LIBS="${SSL_LIBS}"
+
+# -----------------------------------------------------------------------------
+# JSON-C library
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for JSON" >&5
+$as_echo_n "checking for JSON... " >&6; }
+
+if test -n "$JSON_CFLAGS"; then
+ pkg_cv_JSON_CFLAGS="$JSON_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"json-c\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "json-c") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_JSON_CFLAGS=`$PKG_CONFIG --cflags "json-c" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$JSON_LIBS"; then
+ pkg_cv_JSON_LIBS="$JSON_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"json-c\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "json-c") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_JSON_LIBS=`$PKG_CONFIG --libs "json-c" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ JSON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "json-c" 2>&1`
+ else
+ JSON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "json-c" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$JSON_PKG_ERRORS" >&5
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for json_object_get_type in -ljson" >&5
+$as_echo_n "checking for json_object_get_type in -ljson... " >&6; }
+if ${ac_cv_lib_json_json_object_get_type+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ljson $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char json_object_get_type ();
+int
+main ()
+{
+return json_object_get_type ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_json_json_object_get_type=yes
+else
+ ac_cv_lib_json_json_object_get_type=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_json_json_object_get_type" >&5
+$as_echo "$ac_cv_lib_json_json_object_get_type" >&6; }
+if test "x$ac_cv_lib_json_json_object_get_type" = xyes; then :
+ JSONC_LIBS="-ljson"
+fi
+
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for json_object_get_type in -ljson" >&5
+$as_echo_n "checking for json_object_get_type in -ljson... " >&6; }
+if ${ac_cv_lib_json_json_object_get_type+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ljson $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char json_object_get_type ();
+int
+main ()
+{
+return json_object_get_type ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_json_json_object_get_type=yes
+else
+ ac_cv_lib_json_json_object_get_type=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_json_json_object_get_type" >&5
+$as_echo "$ac_cv_lib_json_json_object_get_type" >&6; }
+if test "x$ac_cv_lib_json_json_object_get_type" = xyes; then :
+ JSONC_LIBS="-ljson"
+fi
+
+
+else
+ JSON_CFLAGS=$pkg_cv_JSON_CFLAGS
+ JSON_LIBS=$pkg_cv_JSON_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for json_object_get_type in -ljson-c" >&5
+$as_echo_n "checking for json_object_get_type in -ljson-c... " >&6; }
+if ${ac_cv_lib_json_c_json_object_get_type+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ljson-c $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char json_object_get_type ();
+int
+main ()
+{
+return json_object_get_type ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_json_c_json_object_get_type=yes
+else
+ ac_cv_lib_json_c_json_object_get_type=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_json_c_json_object_get_type" >&5
+$as_echo "$ac_cv_lib_json_c_json_object_get_type" >&6; }
+if test "x$ac_cv_lib_json_c_json_object_get_type" = xyes; then :
+ JSONC_LIBS="-ljson-c"
+fi
+
+fi
+
+OPTIONAL_JSONC_LIBS="${JSONC_LIBS}"
+
+# -----------------------------------------------------------------------------
+# DB engine and HTTPS
+test "${enable_dbengine}" = "yes" -a -z "${UV_LIBS}" && \
+ as_fn_error $? "libuv required but not found. Try installing 'libuv1-dev' or 'libuv-devel'." "$LINENO" 5
+
+test "${enable_dbengine}" = "yes" -a -z "${LZ4_LIBS}" && \
+ as_fn_error $? "liblz4 required but not found. Try installing 'liblz4-dev' or 'lz4-devel'." "$LINENO" 5
+
+test "${enable_dbengine}" = "yes" -a -z "${JUDY_LIBS}" && \
+ as_fn_error $? "libJudy required but not found. Try installing 'libjudy-dev' or 'Judy-devel'." "$LINENO" 5
+
+test "${enable_https}" = "yes" -a -z "${SSL_LIBS}" && \
+ as_fn_error $? "OpenSSL required for HTTPS but not found. Try installing 'libssl-dev' or 'openssl-devel'." "$LINENO" 5
+
+test "${enable_dbengine}" = "yes" -a -z "${SSL_LIBS}" && \
+ as_fn_error $? "OpenSSL required for DBENGINE but not found. Try installing 'libssl-dev' or 'openssl-devel'." "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if netdata dbengine should be used" >&5
+$as_echo_n "checking if netdata dbengine should be used... " >&6; }
+if test "${enable_dbengine}" != "no" -a "${UV_LIBS}" -a "${LZ4_LIBS}" -a "${JUDY_LIBS}" -a "${SSL_LIBS}"; then
+ enable_dbengine="yes"
+
+$as_echo "#define ENABLE_DBENGINE 1" >>confdefs.h
+
+else
+ enable_dbengine="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_dbengine}" >&5
+$as_echo "${enable_dbengine}" >&6; }
+ if test "${enable_dbengine}" = "yes"; then
+ ENABLE_DBENGINE_TRUE=
+ ENABLE_DBENGINE_FALSE='#'
+else
+ ENABLE_DBENGINE_TRUE='#'
+ ENABLE_DBENGINE_FALSE=
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if netdata https should be used" >&5
+$as_echo_n "checking if netdata https should be used... " >&6; }
+if test "${enable_https}" != "no" -a "${SSL_LIBS}"; then
+ enable_https="yes"
+
+$as_echo "#define ENABLE_HTTPS 1" >>confdefs.h
+
+else
+ enable_https="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_https}" >&5
+$as_echo "${enable_https}" >&6; }
+ if test "${enable_https}" = "yes"; then
+ ENABLE_HTTPS_TRUE=
+ ENABLE_HTTPS_FALSE='#'
+else
+ ENABLE_HTTPS_TRUE='#'
+ ENABLE_HTTPS_FALSE=
+fi
+
+
+# -----------------------------------------------------------------------------
+# JSON-C
+test "${enable_jsonc}" = "yes" -a -z "${JSONC_LIBS}" && \
+ as_fn_error $? "JSON-C required but not found. Try installing 'libjson-c-dev' or 'json-c'." "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if json-c should be used" >&5
+$as_echo_n "checking if json-c should be used... " >&6; }
+if test "${enable_jsonc}" != "no" -a "${JSONC_LIBS}"; then
+ enable_jsonc="yes"
+
+$as_echo "#define ENABLE_JSONC 1" >>confdefs.h
+
+else
+ enable_jsonc="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_jsonc}" >&5
+$as_echo "${enable_jsonc}" >&6; }
+ if test "${enable_jsonc}" = "yes"; then
+ ENABLE_JSONC_TRUE=
+ ENABLE_JSONC_FALSE='#'
+else
+ ENABLE_JSONC_TRUE='#'
+ ENABLE_JSONC_FALSE=
+fi
+
+
+# -----------------------------------------------------------------------------
+# compiler options
+
+
+case $host_cpu in #(
+ i?86) :
+ SSE_CANDIDATE="yes"
+ ;; #(
+ *) :
+ ;;
+esac
+
+if test "${SSE_CANDIDATE}" = "yes" -a "${enable_x86_sse}" = "yes"; then
+ opt="-msse2 -mfpmath=sse"
+ as_CACHEVAR=`$as_echo "ax_cv_check_cflags__${opt}" | $as_tr_sh`
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts ${opt}" >&5
+$as_echo_n "checking whether C compiler accepts ${opt}... " >&6; }
+if eval \${$as_CACHEVAR+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ ax_check_save_flags=$CFLAGS
+ CFLAGS="$CFLAGS ${opt}"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ eval "$as_CACHEVAR=yes"
+else
+ eval "$as_CACHEVAR=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ CFLAGS=$ax_check_save_flags
+fi
+eval ac_res=\$$as_CACHEVAR
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+if test x"`eval 'as_val=${'$as_CACHEVAR'};$as_echo "$as_val"'`" = xyes; then :
+ CFLAGS="${CFLAGS} ${opt}"
+else
+ :
+fi
+
+fi
+
+if test "${GCC}" = "yes"; then
+
+cat >>confdefs.h <<_ACEOF
+#define likely(x) __builtin_expect(!!(x), 1)
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define unlikely(x) __builtin_expect(!!(x), 0)
+_ACEOF
+
+else
+
+cat >>confdefs.h <<_ACEOF
+#define likely(x) (x)
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define unlikely(x) (x)
+_ACEOF
+
+fi
+
+if test "${GCC}" = "yes"; then
+
+$as_echo "#define __always_unused __attribute__((unused))" >>confdefs.h
+
+
+$as_echo "#define __maybe_unused __attribute__((unused))" >>confdefs.h
+
+else
+
+$as_echo "#define __always_unused /**/" >>confdefs.h
+
+
+$as_echo "#define __maybe_unused /**/" >>confdefs.h
+
+fi
+
+if test "${enable_pedantic}" = "yes"; then
+ enable_strict="yes"
+ CFLAGS="${CFLAGS} -pedantic -Wall -Wextra -Wno-long-long"
+fi
+
+
+# -----------------------------------------------------------------------------
+# memory allocation library
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for memory allocator" >&5
+$as_echo_n "checking for memory allocator... " >&6; }
+
+
+# Check whether --with-jemalloc-prefix was given.
+if test "${with_jemalloc_prefix+set}" = set; then :
+ withval=$with_jemalloc_prefix;
+ jemalloc_prefix="$withval"
+
+else
+
+ if test "`uname -s`" = "Darwin"; then
+ jemalloc_prefix="je_"
+ else
+ jemalloc_prefix=""
+ fi
+
+
+fi
+
+
+cat >>confdefs.h <<_ACEOF
+#define prefix_jemalloc ${jemalloc_prefix}
+_ACEOF
+
+
+enable_jemalloc=no
+
+# Check whether --with-jemalloc was given.
+if test "${with_jemalloc+set}" = set; then :
+ withval=$with_jemalloc;
+ if test "$withval" != "no"; then
+ if test "x${enable_tcmalloc}" = "xyes"; then
+ as_fn_error $? "Cannot compile with both jemalloc and tcmalloc" "$LINENO" 5
+ fi
+ enable_jemalloc=yes
+ jemalloc_base_dir="$withval"
+ case "$withval" in
+ yes)
+ jemalloc_base_dir="/usr"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for jemalloc includes standard directories" >&5
+$as_echo_n "checking checking for jemalloc includes standard directories... " >&6; }
+ ;;
+ *":"*)
+ jemalloc_include="`echo $withval |sed -e 's/:.*$//'`"
+ jemalloc_ldflags="`echo $withval |sed -e 's/^.*://'`"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for jemalloc includes in $jemalloc_include libs in $jemalloc_ldflags" >&5
+$as_echo_n "checking checking for jemalloc includes in $jemalloc_include libs in $jemalloc_ldflags... " >&6; }
+ ;;
+ *)
+ jemalloc_include="$withval/include"
+ jemalloc_ldflags="$withval/lib"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for jemalloc includes in $withval" >&5
+$as_echo_n "checking checking for jemalloc includes in $withval... " >&6; }
+ ;;
+ esac
+ fi
+
+fi
+
+
+has_jemalloc=0
+if test "$enable_jemalloc" != "no"; then
+ jemalloc_have_headers=0
+ jemalloc_have_libs=0
+ if test "$jemalloc_base_dir" != "/usr"; then
+ CFLAGS="${CFLAGS} -I${jemalloc_include}"
+ LDFLAGS="${LDFLAGS} -L${jemalloc_ldflags}"
+ LIBTOOL_LINK_FLAGS="${LIBTOOL_LINK_FLAGS} -R${jemalloc_ldflags}"
+ fi
+ func="${jemalloc_prefix}malloc_stats_print"
+ as_ac_Lib=`$as_echo "ac_cv_lib_jemalloc_${func}" | $as_tr_sh`
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${func} in -ljemalloc" >&5
+$as_echo_n "checking for ${func} in -ljemalloc... " >&6; }
+if eval \${$as_ac_Lib+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ljemalloc $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char ${func} ();
+int
+main ()
+{
+return ${func} ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ eval "$as_ac_Lib=yes"
+else
+ eval "$as_ac_Lib=no"
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+eval ac_res=\$$as_ac_Lib
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then :
+ jemalloc_have_libs=1
+fi
+
+ if test "$jemalloc_have_libs" != "0"; then
+ for ac_header in jemalloc/jemalloc.h
+do :
+ ac_fn_c_check_header_mongrel "$LINENO" "jemalloc/jemalloc.h" "ac_cv_header_jemalloc_jemalloc_h" "$ac_includes_default"
+if test "x$ac_cv_header_jemalloc_jemalloc_h" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_JEMALLOC_JEMALLOC_H 1
+_ACEOF
+ jemalloc_have_headers=1
+fi
+
+done
+
+ fi
+ if test "$jemalloc_have_headers" != "0"; then
+ has_jemalloc=1
+ LIBS="${LIBS} -ljemalloc"
+
+$as_echo "#define has_jemalloc 1" >>confdefs.h
+
+ else
+ as_fn_error $? "Couldn't find a jemalloc installation" "$LINENO" 5
+ fi
+fi
+
+
+if test "$has_jemalloc" = "1"; then
+
+$as_echo "#define ENABLE_JEMALLOC 1" >>confdefs.h
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: jemalloc" >&5
+$as_echo "jemalloc" >&6; }
+else
+
+
+# Check whether --with-tcmalloc-lib was given.
+if test "${with_tcmalloc_lib+set}" = set; then :
+ withval=$with_tcmalloc_lib;
+ with_tcmalloc_lib="$withval"
+
+else
+
+ with_tcmalloc_lib="tcmalloc"
+
+
+fi
+
+
+has_tcmalloc=0
+
+# Check whether --with-tcmalloc was given.
+if test "${with_tcmalloc+set}" = set; then :
+ withval=$with_tcmalloc;
+ if test "$withval" != "no"; then
+ if test "x${enable_jemalloc}" = "xyes"; then
+ as_fn_error $? "Cannot compile with both tcmalloc and jemalloc" "$LINENO" 5
+ fi
+ tcmalloc_have_lib=0
+ if test "x$withval" != "xyes" && test "x$withval" != "x"; then
+ tcmalloc_ldflags="$withval/lib"
+ LDFLAGS="${LDFLAGS} -L${tcmalloc_ldflags}"
+ LIBTOOL_LINK_FLAGS="${LIBTOOL_LINK_FLAGS} -rpath ${tcmalloc_ldflags}"
+ fi
+ as_ac_Lib=`$as_echo "ac_cv_lib_${with_tcmalloc_lib}''_tc_cfree" | $as_tr_sh`
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for tc_cfree in -l${with_tcmalloc_lib}" >&5
+$as_echo_n "checking for tc_cfree in -l${with_tcmalloc_lib}... " >&6; }
+if eval \${$as_ac_Lib+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-l${with_tcmalloc_lib} $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char tc_cfree ();
+int
+main ()
+{
+return tc_cfree ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ eval "$as_ac_Lib=yes"
+else
+ eval "$as_ac_Lib=no"
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+eval ac_res=\$$as_ac_Lib
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then :
+ tcmalloc_have_lib=1
+fi
+
+ if test "$tcmalloc_have_lib" != "0"; then
+ LIBS="${LIBS} -l${with_tcmalloc_lib}"
+ has_tcmalloc=1
+
+$as_echo "#define has_tcmalloc 1" >>confdefs.h
+
+ else
+ as_fn_error $? "Couldn't find a tcmalloc installation" "$LINENO" 5
+ fi
+ fi
+
+fi
+
+
+
+ if test "$has_tcmalloc" = "1"; then
+
+$as_echo "#define ENABLE_TCMALLOC 1" >>confdefs.h
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: tcmalloc" >&5
+$as_echo "tcmalloc" >&6; }
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: system" >&5
+$as_echo "system" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for mallopt" >&5
+$as_echo_n "checking for mallopt... " >&6; }
+if ${ac_cv_c_mallopt+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <malloc.h>
+ int main(int argc, char **argv) {
+ mallopt(M_ARENA_MAX, 1);
+ }
+
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_c_mallopt=yes
+else
+ ac_cv_c_mallopt=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_mallopt" >&5
+$as_echo "$ac_cv_c_mallopt" >&6; }
+if test $ac_cv_c_mallopt = yes; then
+
+$as_echo "#define HAVE_C_MALLOPT 1" >>confdefs.h
+
+fi
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for mallinfo" >&5
+$as_echo_n "checking for mallinfo... " >&6; }
+if ${ac_cv_c_mallinfo+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <malloc.h>
+int
+main ()
+{
+
+ struct mallinfo mi = mallinfo();
+ /* make sure that fields exists */
+ mi.uordblks = 0;
+ mi.hblkhd = 0;
+ mi.arena = 0;
+
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_c_mallinfo=yes
+else
+ ac_cv_c_mallinfo=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_mallinfo" >&5
+$as_echo "$ac_cv_c_mallinfo" >&6; }
+if test $ac_cv_c_mallinfo = yes; then
+
+$as_echo "#define HAVE_C_MALLINFO 1" >>confdefs.h
+
+fi
+
+ fi
+fi
+
+
+# -----------------------------------------------------------------------------
+# libcap
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBCAP" >&5
+$as_echo_n "checking for LIBCAP... " >&6; }
+
+if test -n "$LIBCAP_CFLAGS"; then
+ pkg_cv_LIBCAP_CFLAGS="$LIBCAP_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libcap\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libcap") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBCAP_CFLAGS=`$PKG_CONFIG --cflags "libcap" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$LIBCAP_LIBS"; then
+ pkg_cv_LIBCAP_LIBS="$LIBCAP_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libcap\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libcap") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBCAP_LIBS=`$PKG_CONFIG --libs "libcap" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ LIBCAP_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libcap" 2>&1`
+ else
+ LIBCAP_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libcap" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$LIBCAP_PKG_ERRORS" >&5
+
+ have_libcap=no
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ have_libcap=no
+
+else
+ LIBCAP_CFLAGS=$pkg_cv_LIBCAP_CFLAGS
+ LIBCAP_LIBS=$pkg_cv_LIBCAP_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for cap_get_proc, cap_set_proc in -lcap" >&5
+$as_echo_n "checking for cap_get_proc, cap_set_proc in -lcap... " >&6; }
+if ${ac_cv_lib_cap_cap_get_proc__cap_set_proc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lcap $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char cap_get_proc, cap_set_proc ();
+int
+main ()
+{
+return cap_get_proc, cap_set_proc ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_cap_cap_get_proc__cap_set_proc=yes
+else
+ ac_cv_lib_cap_cap_get_proc__cap_set_proc=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_cap_cap_get_proc__cap_set_proc" >&5
+$as_echo "$ac_cv_lib_cap_cap_get_proc__cap_set_proc" >&6; }
+if test "x$ac_cv_lib_cap_cap_get_proc__cap_set_proc" = xyes; then :
+ ac_fn_c_check_header_mongrel "$LINENO" "sys/capability.h" "ac_cv_header_sys_capability_h" "$ac_includes_default"
+if test "x$ac_cv_header_sys_capability_h" = xyes; then :
+ have_libcap=yes
+else
+ have_libcap=no
+
+fi
+
+
+else
+ have_libcap=no
+
+fi
+
+fi
+test "${with_libcap}" = "yes" -a "${have_libcap}" != "yes" && as_fn_error $? "libcap required but not found." "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if libcap should be used" >&5
+$as_echo_n "checking if libcap should be used... " >&6; }
+if test "${with_libcap}" != "no" -a "${have_libcap}" = "yes"; then
+ with_libcap="yes"
+
+$as_echo "#define HAVE_CAPABILITY 1" >>confdefs.h
+
+ OPTIONAL_LIBCAP_CFLAGS="${LIBCAP_CFLAGS}"
+ OPTIONAL_LIBCAP_LIBS="${LIBCAP_LIBS}"
+else
+ with_libcap="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libcap}" >&5
+$as_echo "${with_libcap}" >&6; }
+ if test "${with_libcap}" = "yes"; then
+ ENABLE_CAPABILITY_TRUE=
+ ENABLE_CAPABILITY_FALSE='#'
+else
+ ENABLE_CAPABILITY_TRUE='#'
+ ENABLE_CAPABILITY_FALSE=
+fi
+
+
+
+# -----------------------------------------------------------------------------
+# apps.plugin
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if apps.plugin should be enabled" >&5
+$as_echo_n "checking if apps.plugin should be enabled... " >&6; }
+if test "${build_target}" != "macos"; then
+ enable_plugin_apps="yes"
+else
+ enable_plugin_apps="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_plugin_apps}" >&5
+$as_echo "${enable_plugin_apps}" >&6; }
+ if test "${enable_plugin_apps}" = "yes"; then
+ ENABLE_PLUGIN_APPS_TRUE=
+ ENABLE_PLUGIN_APPS_FALSE='#'
+else
+ ENABLE_PLUGIN_APPS_TRUE='#'
+ ENABLE_PLUGIN_APPS_FALSE=
+fi
+
+
+
+# -----------------------------------------------------------------------------
+# freeipmi.plugin - libipmimonitoring
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for IPMIMONITORING" >&5
+$as_echo_n "checking for IPMIMONITORING... " >&6; }
+
+if test -n "$IPMIMONITORING_CFLAGS"; then
+ pkg_cv_IPMIMONITORING_CFLAGS="$IPMIMONITORING_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libipmimonitoring\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libipmimonitoring") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_IPMIMONITORING_CFLAGS=`$PKG_CONFIG --cflags "libipmimonitoring" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$IPMIMONITORING_LIBS"; then
+ pkg_cv_IPMIMONITORING_LIBS="$IPMIMONITORING_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libipmimonitoring\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libipmimonitoring") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_IPMIMONITORING_LIBS=`$PKG_CONFIG --libs "libipmimonitoring" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ IPMIMONITORING_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libipmimonitoring" 2>&1`
+ else
+ IPMIMONITORING_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libipmimonitoring" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$IPMIMONITORING_PKG_ERRORS" >&5
+
+ have_ipmimonitoring=no
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ have_ipmimonitoring=no
+
+else
+ IPMIMONITORING_CFLAGS=$pkg_cv_IPMIMONITORING_CFLAGS
+ IPMIMONITORING_LIBS=$pkg_cv_IPMIMONITORING_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for
+ ipmi_monitoring_sensor_readings_by_record_id,
+ ipmi_monitoring_sensor_readings_by_sensor_type,
+ ipmi_monitoring_sensor_read_sensor_number,
+ ipmi_monitoring_sensor_read_sensor_name,
+ ipmi_monitoring_sensor_read_sensor_state,
+ ipmi_monitoring_sensor_read_sensor_units,
+ ipmi_monitoring_sensor_iterator_next,
+ ipmi_monitoring_ctx_sensor_config_file,
+ ipmi_monitoring_ctx_sdr_cache_directory,
+ ipmi_monitoring_ctx_errormsg,
+ ipmi_monitoring_ctx_create
+ in -lipmimonitoring" >&5
+$as_echo_n "checking for
+ ipmi_monitoring_sensor_readings_by_record_id,
+ ipmi_monitoring_sensor_readings_by_sensor_type,
+ ipmi_monitoring_sensor_read_sensor_number,
+ ipmi_monitoring_sensor_read_sensor_name,
+ ipmi_monitoring_sensor_read_sensor_state,
+ ipmi_monitoring_sensor_read_sensor_units,
+ ipmi_monitoring_sensor_iterator_next,
+ ipmi_monitoring_ctx_sensor_config_file,
+ ipmi_monitoring_ctx_sdr_cache_directory,
+ ipmi_monitoring_ctx_errormsg,
+ ipmi_monitoring_ctx_create
+ in -lipmimonitoring... " >&6; }
+if ${ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lipmimonitoring $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char
+ ipmi_monitoring_sensor_readings_by_record_id,
+ ipmi_monitoring_sensor_readings_by_sensor_type,
+ ipmi_monitoring_sensor_read_sensor_number,
+ ipmi_monitoring_sensor_read_sensor_name,
+ ipmi_monitoring_sensor_read_sensor_state,
+ ipmi_monitoring_sensor_read_sensor_units,
+ ipmi_monitoring_sensor_iterator_next,
+ ipmi_monitoring_ctx_sensor_config_file,
+ ipmi_monitoring_ctx_sdr_cache_directory,
+ ipmi_monitoring_ctx_errormsg,
+ ipmi_monitoring_ctx_create
+ ();
+int
+main ()
+{
+return
+ ipmi_monitoring_sensor_readings_by_record_id,
+ ipmi_monitoring_sensor_readings_by_sensor_type,
+ ipmi_monitoring_sensor_read_sensor_number,
+ ipmi_monitoring_sensor_read_sensor_name,
+ ipmi_monitoring_sensor_read_sensor_state,
+ ipmi_monitoring_sensor_read_sensor_units,
+ ipmi_monitoring_sensor_iterator_next,
+ ipmi_monitoring_ctx_sensor_config_file,
+ ipmi_monitoring_ctx_sdr_cache_directory,
+ ipmi_monitoring_ctx_errormsg,
+ ipmi_monitoring_ctx_create
+ ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____=yes
+else
+ ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____" >&5
+$as_echo "$ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____" >&6; }
+if test "x$ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____" = xyes; then :
+ ac_fn_c_check_header_mongrel "$LINENO" "ipmi_monitoring.h" "ac_cv_header_ipmi_monitoring_h" "$ac_includes_default"
+if test "x$ac_cv_header_ipmi_monitoring_h" = xyes; then :
+ ac_fn_c_check_header_mongrel "$LINENO" "ipmi_monitoring_bitmasks.h" "ac_cv_header_ipmi_monitoring_bitmasks_h" "$ac_includes_default"
+if test "x$ac_cv_header_ipmi_monitoring_bitmasks_h" = xyes; then :
+ have_ipmimonitoring=yes
+else
+ have_ipmimonitoring=no
+
+fi
+
+
+else
+ have_ipmimonitoring=no
+
+fi
+
+
+else
+ have_ipmimonitoring=no
+
+fi
+
+fi
+test "${enable_plugin_freeipmi}" = "yes" -a "${have_ipmimonitoring}" != "yes" && \
+ as_fn_error $? "ipmimonitoring required but not found. Try installing 'libipmimonitoring-dev' or 'libipmimonitoring-devel'" "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if freeipmi.plugin should be enabled" >&5
+$as_echo_n "checking if freeipmi.plugin should be enabled... " >&6; }
+if test "${enable_plugin_freeipmi}" != "no" -a "${have_ipmimonitoring}" = "yes"; then
+ enable_plugin_freeipmi="yes"
+
+$as_echo "#define HAVE_FREEIPMI 1" >>confdefs.h
+
+ OPTIONAL_IPMIMONITORING_CFLAGS="${IPMIMONITORING_CFLAGS}"
+ OPTIONAL_IPMIMONITORING_LIBS="${IPMIMONITORING_LIBS}"
+else
+ enable_plugin_freeipmi="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_plugin_freeipmi}" >&5
+$as_echo "${enable_plugin_freeipmi}" >&6; }
+ if test "${enable_plugin_freeipmi}" = "yes"; then
+ ENABLE_PLUGIN_FREEIPMI_TRUE=
+ ENABLE_PLUGIN_FREEIPMI_FALSE='#'
+else
+ ENABLE_PLUGIN_FREEIPMI_TRUE='#'
+ ENABLE_PLUGIN_FREEIPMI_FALSE=
+fi
+
+
+
+# -----------------------------------------------------------------------------
+# cups.plugin - libcups
+
+# Only check most recently added method of cups
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for httpConnect2 in -lcups" >&5
+$as_echo_n "checking for httpConnect2 in -lcups... " >&6; }
+if ${ac_cv_lib_cups_httpConnect2+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lcups $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char httpConnect2 ();
+int
+main ()
+{
+return httpConnect2 ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_cups_httpConnect2=yes
+else
+ ac_cv_lib_cups_httpConnect2=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_cups_httpConnect2" >&5
+$as_echo "$ac_cv_lib_cups_httpConnect2" >&6; }
+if test "x$ac_cv_lib_cups_httpConnect2" = xyes; then :
+ ac_fn_c_check_header_mongrel "$LINENO" "cups/cups.h" "ac_cv_header_cups_cups_h" "$ac_includes_default"
+if test "x$ac_cv_header_cups_cups_h" = xyes; then :
+ have_cups=yes
+else
+ have_cups=no
+
+fi
+
+
+else
+ have_cups=no
+
+fi
+
+
+test "${enable_plugin_cups}" = "yes" -a "${have_cups}" != "yes" && \
+ as_fn_error $? "cups required but not found. Try installing 'cups'" "$LINENO" 5
+
+
+# Check whether --with-cups-config was given.
+if test "${with_cups_config+set}" = set; then :
+ withval=$with_cups_config; with_cups_config="$withval"
+else
+ with_cups_config=system
+
+fi
+
+
+if test "x$with_cups_config" != "xsystem"; then :
+
+ CUPSCONFIG=$with_cups_config
+
+else
+
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cups-config", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cups-config; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_CUPSCONFIG+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $CUPSCONFIG in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_CUPSCONFIG="$CUPSCONFIG" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_CUPSCONFIG="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+CUPSCONFIG=$ac_cv_path_CUPSCONFIG
+if test -n "$CUPSCONFIG"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CUPSCONFIG" >&5
+$as_echo "$CUPSCONFIG" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_path_CUPSCONFIG"; then
+ ac_pt_CUPSCONFIG=$CUPSCONFIG
+ # Extract the first word of "cups-config", so it can be a program name with args.
+set dummy cups-config; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_ac_pt_CUPSCONFIG+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $ac_pt_CUPSCONFIG in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_ac_pt_CUPSCONFIG="$ac_pt_CUPSCONFIG" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_ac_pt_CUPSCONFIG="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+ac_pt_CUPSCONFIG=$ac_cv_path_ac_pt_CUPSCONFIG
+if test -n "$ac_pt_CUPSCONFIG"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_CUPSCONFIG" >&5
+$as_echo "$ac_pt_CUPSCONFIG" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_pt_CUPSCONFIG" = x; then
+ CUPSCONFIG=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CUPSCONFIG=$ac_pt_CUPSCONFIG
+ fi
+else
+ CUPSCONFIG="$ac_cv_path_CUPSCONFIG"
+fi
+
+ if test -z "$CUPSCONFIG"; then :
+
+ have_cups=no
+
+fi
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if cups.plugin should be enabled" >&5
+$as_echo_n "checking if cups.plugin should be enabled... " >&6; }
+if test "${enable_plugin_cups}" != "no" -a "${have_cups}" = "yes"; then
+ enable_plugin_cups="yes"
+
+$as_echo "#define HAVE_CUPS 1" >>confdefs.h
+
+
+ CUPS_CFLAGS="${CUPS_CFLAGS} `$CUPSCONFIG --cflags`"
+ CUPS_LIBS="${CUPS_LIBS} `$CUPSCONFIG --libs`"
+
+ OPTIONAL_CUPS_CFLAGS="${CUPS_CFLAGS}"
+ OPTIONAL_CUPS_LIBS="${CUPS_LIBS}"
+else
+ enable_plugin_cups="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_plugin_cups}" >&5
+$as_echo "${enable_plugin_cups}" >&6; }
+ if test "${enable_plugin_cups}" = "yes"; then
+ ENABLE_PLUGIN_CUPS_TRUE=
+ ENABLE_PLUGIN_CUPS_FALSE='#'
+else
+ ENABLE_PLUGIN_CUPS_TRUE='#'
+ ENABLE_PLUGIN_CUPS_FALSE=
+fi
+
+
+
+# -----------------------------------------------------------------------------
+# nfacct.plugin - libmnl, libnetfilter_acct
+
+ac_fn_c_check_header_mongrel "$LINENO" "linux/netfilter/nfnetlink_conntrack.h" "ac_cv_header_linux_netfilter_nfnetlink_conntrack_h" "$ac_includes_default"
+if test "x$ac_cv_header_linux_netfilter_nfnetlink_conntrack_h" = xyes; then :
+ ac_fn_c_check_decl "$LINENO" "CTA_STATS_MAX" "ac_cv_have_decl_CTA_STATS_MAX" "#include <linux/netfilter/nfnetlink_conntrack.h>
+
+"
+if test "x$ac_cv_have_decl_CTA_STATS_MAX" = xyes; then :
+ have_nfnetlink_conntrack=yes
+else
+ have_nfnetlink_conntrack=no
+fi
+
+else
+ have_nfnetlink_conntrack=no
+
+fi
+
+
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for NFACCT" >&5
+$as_echo_n "checking for NFACCT... " >&6; }
+
+if test -n "$NFACCT_CFLAGS"; then
+ pkg_cv_NFACCT_CFLAGS="$NFACCT_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libnetfilter_acct\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libnetfilter_acct") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_NFACCT_CFLAGS=`$PKG_CONFIG --cflags "libnetfilter_acct" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$NFACCT_LIBS"; then
+ pkg_cv_NFACCT_LIBS="$NFACCT_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libnetfilter_acct\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libnetfilter_acct") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_NFACCT_LIBS=`$PKG_CONFIG --libs "libnetfilter_acct" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ NFACCT_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libnetfilter_acct" 2>&1`
+ else
+ NFACCT_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libnetfilter_acct" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$NFACCT_PKG_ERRORS" >&5
+
+ have_libnetfilter_acct=no
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ have_libnetfilter_acct=no
+
+else
+ NFACCT_CFLAGS=$pkg_cv_NFACCT_CFLAGS
+ NFACCT_LIBS=$pkg_cv_NFACCT_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for nfacct_alloc in -lnetfilter_acct" >&5
+$as_echo_n "checking for nfacct_alloc in -lnetfilter_acct... " >&6; }
+if ${ac_cv_lib_netfilter_acct_nfacct_alloc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lnetfilter_acct $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char nfacct_alloc ();
+int
+main ()
+{
+return nfacct_alloc ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_netfilter_acct_nfacct_alloc=yes
+else
+ ac_cv_lib_netfilter_acct_nfacct_alloc=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_netfilter_acct_nfacct_alloc" >&5
+$as_echo "$ac_cv_lib_netfilter_acct_nfacct_alloc" >&6; }
+if test "x$ac_cv_lib_netfilter_acct_nfacct_alloc" = xyes; then :
+ have_libnetfilter_acct=yes
+else
+ have_libnetfilter_acct=no
+
+fi
+
+fi
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBMNL" >&5
+$as_echo_n "checking for LIBMNL... " >&6; }
+
+if test -n "$LIBMNL_CFLAGS"; then
+ pkg_cv_LIBMNL_CFLAGS="$LIBMNL_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libmnl\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libmnl") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBMNL_CFLAGS=`$PKG_CONFIG --cflags "libmnl" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$LIBMNL_LIBS"; then
+ pkg_cv_LIBMNL_LIBS="$LIBMNL_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libmnl\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libmnl") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBMNL_LIBS=`$PKG_CONFIG --libs "libmnl" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ LIBMNL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libmnl" 2>&1`
+ else
+ LIBMNL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libmnl" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$LIBMNL_PKG_ERRORS" >&5
+
+ have_libmnl=no
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ have_libmnl=no
+
+else
+ LIBMNL_CFLAGS=$pkg_cv_LIBMNL_CFLAGS
+ LIBMNL_LIBS=$pkg_cv_LIBMNL_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for mnl_socket_open in -lmnl" >&5
+$as_echo_n "checking for mnl_socket_open in -lmnl... " >&6; }
+if ${ac_cv_lib_mnl_mnl_socket_open+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lmnl $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char mnl_socket_open ();
+int
+main ()
+{
+return mnl_socket_open ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_mnl_mnl_socket_open=yes
+else
+ ac_cv_lib_mnl_mnl_socket_open=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_mnl_mnl_socket_open" >&5
+$as_echo "$ac_cv_lib_mnl_mnl_socket_open" >&6; }
+if test "x$ac_cv_lib_mnl_mnl_socket_open" = xyes; then :
+ have_libmnl=yes
+else
+ have_libmnl=no
+
+fi
+
+fi
+
+test "${enable_plugin_nfacct}" = "yes" -a "${have_nfnetlink_conntrack}" != "yes" && \
+ as_fn_error $? "nfnetlink_conntrack.h required but not found or too old" "$LINENO" 5
+
+test "${enable_plugin_nfacct}" = "yes" -a "${have_libnetfilter_acct}" != "yes" && \
+ as_fn_error $? "netfilter_acct required but not found" "$LINENO" 5
+
+test "${enable_plugin_nfacct}" = "yes" -a "${have_libmnl}" != "yes" && \
+ as_fn_error $? "libmnl required but not found. Try installing 'libmnl-dev' or 'libmnl-devel'" "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if nfacct.plugin should be enabled" >&5
+$as_echo_n "checking if nfacct.plugin should be enabled... " >&6; }
+if test "${enable_plugin_nfacct}" != "no" -a "${have_libnetfilter_acct}" = "yes" \
+ -a "${have_libmnl}" = "yes" \
+ -a "${have_nfnetlink_conntrack}" = "yes"; then
+ enable_plugin_nfacct="yes"
+
+$as_echo "#define HAVE_LIBMNL 1" >>confdefs.h
+
+
+$as_echo "#define HAVE_LIBNETFILTER_ACCT 1" >>confdefs.h
+
+ OPTIONAL_NFACCT_CFLAGS="${NFACCT_CFLAGS} ${LIBMNL_CFLAGS}"
+ OPTIONAL_NFACCT_LIBS="${NFACCT_LIBS} ${LIBMNL_LIBS}"
+else
+ enable_plugin_nfacct="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_plugin_nfacct}" >&5
+$as_echo "${enable_plugin_nfacct}" >&6; }
+ if test "${enable_plugin_nfacct}" = "yes"; then
+ ENABLE_PLUGIN_NFACCT_TRUE=
+ ENABLE_PLUGIN_NFACCT_FALSE='#'
+else
+ ENABLE_PLUGIN_NFACCT_TRUE='#'
+ ENABLE_PLUGIN_NFACCT_FALSE=
+fi
+
+
+
+# -----------------------------------------------------------------------------
+# xenstat.plugin - libxenstat
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for YAJL" >&5
+$as_echo_n "checking for YAJL... " >&6; }
+
+if test -n "$YAJL_CFLAGS"; then
+ pkg_cv_YAJL_CFLAGS="$YAJL_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"yajl\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "yajl") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_YAJL_CFLAGS=`$PKG_CONFIG --cflags "yajl" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$YAJL_LIBS"; then
+ pkg_cv_YAJL_LIBS="$YAJL_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"yajl\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "yajl") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_YAJL_LIBS=`$PKG_CONFIG --libs "yajl" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ YAJL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "yajl" 2>&1`
+ else
+ YAJL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "yajl" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$YAJL_PKG_ERRORS" >&5
+
+ have_libyajl=no
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ have_libyajl=no
+
+else
+ YAJL_CFLAGS=$pkg_cv_YAJL_CFLAGS
+ YAJL_LIBS=$pkg_cv_YAJL_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for yajl_tree_get in -lyajl" >&5
+$as_echo_n "checking for yajl_tree_get in -lyajl... " >&6; }
+if ${ac_cv_lib_yajl_yajl_tree_get+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lyajl $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char yajl_tree_get ();
+int
+main ()
+{
+return yajl_tree_get ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_yajl_yajl_tree_get=yes
+else
+ ac_cv_lib_yajl_yajl_tree_get=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_yajl_yajl_tree_get" >&5
+$as_echo "$ac_cv_lib_yajl_yajl_tree_get" >&6; }
+if test "x$ac_cv_lib_yajl_yajl_tree_get" = xyes; then :
+ have_libyajl=yes
+else
+ have_libyajl=no
+
+fi
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for xenstat_init in -lxenstat" >&5
+$as_echo_n "checking for xenstat_init in -lxenstat... " >&6; }
+if ${ac_cv_lib_xenstat_xenstat_init+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lxenstat -lyajl
+ $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char xenstat_init ();
+int
+main ()
+{
+return xenstat_init ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_xenstat_xenstat_init=yes
+else
+ ac_cv_lib_xenstat_xenstat_init=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_xenstat_xenstat_init" >&5
+$as_echo "$ac_cv_lib_xenstat_xenstat_init" >&6; }
+if test "x$ac_cv_lib_xenstat_xenstat_init" = xyes; then :
+ ac_fn_c_check_header_mongrel "$LINENO" "xenstat.h" "ac_cv_header_xenstat_h" "$ac_includes_default"
+if test "x$ac_cv_header_xenstat_h" = xyes; then :
+ have_libxenstat=yes
+else
+ have_libxenstat=no
+
+fi
+
+
+else
+ have_libxenstat=no
+fi
+
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for XENLIGHT" >&5
+$as_echo_n "checking for XENLIGHT... " >&6; }
+
+if test -n "$XENLIGHT_CFLAGS"; then
+ pkg_cv_XENLIGHT_CFLAGS="$XENLIGHT_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"xenlight\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "xenlight") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_XENLIGHT_CFLAGS=`$PKG_CONFIG --cflags "xenlight" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$XENLIGHT_LIBS"; then
+ pkg_cv_XENLIGHT_LIBS="$XENLIGHT_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"xenlight\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "xenlight") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_XENLIGHT_LIBS=`$PKG_CONFIG --libs "xenlight" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ XENLIGHT_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "xenlight" 2>&1`
+ else
+ XENLIGHT_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "xenlight" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$XENLIGHT_PKG_ERRORS" >&5
+
+ have_libxenlight=no
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ have_libxenlight=no
+
+else
+ XENLIGHT_CFLAGS=$pkg_cv_XENLIGHT_CFLAGS
+ XENLIGHT_LIBS=$pkg_cv_XENLIGHT_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libxl_domain_info in -lxenlight" >&5
+$as_echo_n "checking for libxl_domain_info in -lxenlight... " >&6; }
+if ${ac_cv_lib_xenlight_libxl_domain_info+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lxenlight $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char libxl_domain_info ();
+int
+main ()
+{
+return libxl_domain_info ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_xenlight_libxl_domain_info=yes
+else
+ ac_cv_lib_xenlight_libxl_domain_info=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_xenlight_libxl_domain_info" >&5
+$as_echo "$ac_cv_lib_xenlight_libxl_domain_info" >&6; }
+if test "x$ac_cv_lib_xenlight_libxl_domain_info" = xyes; then :
+ ac_fn_c_check_header_mongrel "$LINENO" "libxl.h" "ac_cv_header_libxl_h" "$ac_includes_default"
+if test "x$ac_cv_header_libxl_h" = xyes; then :
+ have_libxenlight=yes
+else
+ have_libxenlight=no
+
+fi
+
+
+else
+ have_libxenlight=no
+
+fi
+
+fi
+
+test "${enable_plugin_xenstat}" = "yes" -a "${have_libxenstat}" != "yes" && \
+ as_fn_error $? "libxenstat required but not found. try installing 'xen-dom0-libs-devel'" "$LINENO" 5
+
+test "${enable_plugin_xenstat}" = "yes" -a "${have_libxenlight}" != "yes" && \
+ as_fn_error $? "libxenlight required but not found. try installing 'xen-dom0-libs-devel'" "$LINENO" 5
+
+test "${enable_plugin_xenstat}" = "yes" -a "${have_libyajl}" != "yes" && \
+ as_fn_error $? "libyajl required but not found. Try installing 'yajl-devel'" "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if xenstat.plugin should be enabled" >&5
+$as_echo_n "checking if xenstat.plugin should be enabled... " >&6; }
+if test "${enable_plugin_xenstat}" != "no" -a "${have_libxenstat}" = "yes" -a "${have_libxenlight}" = "yes" -a "${have_libyajl}" = "yes"; then
+ enable_plugin_xenstat="yes"
+
+$as_echo "#define HAVE_LIBXENSTAT 1" >>confdefs.h
+
+
+$as_echo "#define HAVE_LIBXENLIGHT 1" >>confdefs.h
+
+
+$as_echo "#define HAVE_LIBYAJL 1" >>confdefs.h
+
+ OPTIONAL_XENSTAT_CFLAGS="${XENLIGHT_CFLAGS} ${YAJL_CFLAGS}"
+ OPTIONAL_XENSTAT_LIBS="-lxenstat ${XENLIGHT_LIBS} ${YAJL_LIBS}"
+else
+ enable_plugin_xenstat="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_plugin_xenstat}" >&5
+$as_echo "${enable_plugin_xenstat}" >&6; }
+ if test "${enable_plugin_xenstat}" = "yes"; then
+ ENABLE_PLUGIN_XENSTAT_TRUE=
+ ENABLE_PLUGIN_XENSTAT_FALSE='#'
+else
+ ENABLE_PLUGIN_XENSTAT_TRUE='#'
+ ENABLE_PLUGIN_XENSTAT_FALSE=
+fi
+
+
+
+# -----------------------------------------------------------------------------
+# perf.plugin
+
+ac_fn_c_check_header_mongrel "$LINENO" "linux/perf_event.h" "ac_cv_header_linux_perf_event_h" "$ac_includes_default"
+if test "x$ac_cv_header_linux_perf_event_h" = xyes; then :
+ ac_fn_c_check_decl "$LINENO" "PERF_COUNT_HW_REF_CPU_CYCLES" "ac_cv_have_decl_PERF_COUNT_HW_REF_CPU_CYCLES" "#include <linux/perf_event.h>
+
+"
+if test "x$ac_cv_have_decl_PERF_COUNT_HW_REF_CPU_CYCLES" = xyes; then :
+ have_perf_event=yes
+else
+ have_perf_event=no
+fi
+
+else
+ have_perf_event=no
+
+fi
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if perf.plugin should be enabled" >&5
+$as_echo_n "checking if perf.plugin should be enabled... " >&6; }
+if test "${build_target}" == "linux" -a "${have_perf_event}" = "yes"; then
+ enable_plugin_perf="yes"
+else
+ enable_plugin_perf="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_plugin_perf}" >&5
+$as_echo "${enable_plugin_perf}" >&6; }
+ if test "${enable_plugin_perf}" = "yes"; then
+ ENABLE_PLUGIN_PERF_TRUE=
+ ENABLE_PLUGIN_PERF_FALSE='#'
+else
+ ENABLE_PLUGIN_PERF_TRUE='#'
+ ENABLE_PLUGIN_PERF_FALSE=
+fi
+
+
+
+# -----------------------------------------------------------------------------
+# AWS Kinesis backend - libaws-cpp-sdk-kinesis, libaws-cpp-sdk-core, libssl, libcrypto, libcurl
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBCRYPTO" >&5
+$as_echo_n "checking for LIBCRYPTO... " >&6; }
+
+if test -n "$LIBCRYPTO_CFLAGS"; then
+ pkg_cv_LIBCRYPTO_CFLAGS="$LIBCRYPTO_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libcrypto\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libcrypto") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBCRYPTO_CFLAGS=`$PKG_CONFIG --cflags "libcrypto" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$LIBCRYPTO_LIBS"; then
+ pkg_cv_LIBCRYPTO_LIBS="$LIBCRYPTO_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libcrypto\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libcrypto") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBCRYPTO_LIBS=`$PKG_CONFIG --libs "libcrypto" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ LIBCRYPTO_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libcrypto" 2>&1`
+ else
+ LIBCRYPTO_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libcrypto" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$LIBCRYPTO_PKG_ERRORS" >&5
+
+ have_libcrypto=no
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ have_libcrypto=no
+
+else
+ LIBCRYPTO_CFLAGS=$pkg_cv_LIBCRYPTO_CFLAGS
+ LIBCRYPTO_LIBS=$pkg_cv_LIBCRYPTO_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CRYPTO_new_ex_data in -lcrypto" >&5
+$as_echo_n "checking for CRYPTO_new_ex_data in -lcrypto... " >&6; }
+if ${ac_cv_lib_crypto_CRYPTO_new_ex_data+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lcrypto $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char CRYPTO_new_ex_data ();
+int
+main ()
+{
+return CRYPTO_new_ex_data ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_crypto_CRYPTO_new_ex_data=yes
+else
+ ac_cv_lib_crypto_CRYPTO_new_ex_data=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_crypto_CRYPTO_new_ex_data" >&5
+$as_echo "$ac_cv_lib_crypto_CRYPTO_new_ex_data" >&6; }
+if test "x$ac_cv_lib_crypto_CRYPTO_new_ex_data" = xyes; then :
+ have_libcrypto=yes
+else
+ have_libcrypto=no
+
+fi
+
+fi
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBSSL" >&5
+$as_echo_n "checking for LIBSSL... " >&6; }
+
+if test -n "$LIBSSL_CFLAGS"; then
+ pkg_cv_LIBSSL_CFLAGS="$LIBSSL_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libssl\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libssl") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBSSL_CFLAGS=`$PKG_CONFIG --cflags "libssl" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$LIBSSL_LIBS"; then
+ pkg_cv_LIBSSL_LIBS="$LIBSSL_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libssl\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libssl") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBSSL_LIBS=`$PKG_CONFIG --libs "libssl" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ LIBSSL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libssl" 2>&1`
+ else
+ LIBSSL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libssl" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$LIBSSL_PKG_ERRORS" >&5
+
+ have_libssl=no
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ have_libssl=no
+
+else
+ LIBSSL_CFLAGS=$pkg_cv_LIBSSL_CFLAGS
+ LIBSSL_LIBS=$pkg_cv_LIBSSL_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SSL_connect in -lssl" >&5
+$as_echo_n "checking for SSL_connect in -lssl... " >&6; }
+if ${ac_cv_lib_ssl_SSL_connect+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lssl $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char SSL_connect ();
+int
+main ()
+{
+return SSL_connect ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_ssl_SSL_connect=yes
+else
+ ac_cv_lib_ssl_SSL_connect=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ssl_SSL_connect" >&5
+$as_echo "$ac_cv_lib_ssl_SSL_connect" >&6; }
+if test "x$ac_cv_lib_ssl_SSL_connect" = xyes; then :
+ have_libssl=yes
+else
+ have_libssl=no
+
+fi
+
+fi
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBCURL" >&5
+$as_echo_n "checking for LIBCURL... " >&6; }
+
+if test -n "$LIBCURL_CFLAGS"; then
+ pkg_cv_LIBCURL_CFLAGS="$LIBCURL_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libcurl\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libcurl") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBCURL_CFLAGS=`$PKG_CONFIG --cflags "libcurl" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$LIBCURL_LIBS"; then
+ pkg_cv_LIBCURL_LIBS="$LIBCURL_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libcurl\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libcurl") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBCURL_LIBS=`$PKG_CONFIG --libs "libcurl" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ LIBCURL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libcurl" 2>&1`
+ else
+ LIBCURL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libcurl" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$LIBCURL_PKG_ERRORS" >&5
+
+ have_libcurl=no
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ have_libcurl=no
+
+else
+ LIBCURL_CFLAGS=$pkg_cv_LIBCURL_CFLAGS
+ LIBCURL_LIBS=$pkg_cv_LIBCURL_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for curl_easy_init in -lcurl" >&5
+$as_echo_n "checking for curl_easy_init in -lcurl... " >&6; }
+if ${ac_cv_lib_curl_curl_easy_init+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lcurl $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char curl_easy_init ();
+int
+main ()
+{
+return curl_easy_init ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_curl_curl_easy_init=yes
+else
+ ac_cv_lib_curl_curl_easy_init=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_curl_curl_easy_init" >&5
+$as_echo "$ac_cv_lib_curl_curl_easy_init" >&6; }
+if test "x$ac_cv_lib_curl_curl_easy_init" = xyes; then :
+ have_libcurl=yes
+else
+ have_libcurl=no
+
+fi
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for cJSON_free in -laws-cpp-sdk-core" >&5
+$as_echo_n "checking for cJSON_free in -laws-cpp-sdk-core... " >&6; }
+if ${ac_cv_lib_aws_cpp_sdk_core_cJSON_free+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-laws-cpp-sdk-core ${LIBCRYPTO_LIBS} ${LIBSSL_LIBS} ${LIBCURL_LIBS}
+ $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char cJSON_free ();
+int
+main ()
+{
+return cJSON_free ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_aws_cpp_sdk_core_cJSON_free=yes
+else
+ ac_cv_lib_aws_cpp_sdk_core_cJSON_free=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_aws_cpp_sdk_core_cJSON_free" >&5
+$as_echo "$ac_cv_lib_aws_cpp_sdk_core_cJSON_free" >&6; }
+if test "x$ac_cv_lib_aws_cpp_sdk_core_cJSON_free" = xyes; then :
+ have_libaws_cpp_sdk_core=yes
+else
+ have_libaws_cpp_sdk_core=no
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Aws::Kinesis::Model::PutRecordRequest in -laws-cpp-sdk-kinesis" >&5
+$as_echo_n "checking for Aws::Kinesis::Model::PutRecordRequest in -laws-cpp-sdk-kinesis... " >&6; }
+
+if test "${have_libaws_cpp_sdk_core}" = "yes" -a "${have_libcrypto}" = "yes" -a "${have_libssl}" = "yes" -a "${have_libcurl}" = "yes"; then
+
+ ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ save_LIBS="${LIBS}"
+ LIBS="-laws-cpp-sdk-kinesis -laws-cpp-sdk-core ${LIBCRYPTO_LIBS} ${LIBSSL_LIBS} ${LIBCURL_LIBS}"
+ save_CXXFLAGS="${CXXFLAGS}"
+ CXXFLAGS="${CXXFLAGS} -std=c++11"
+
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+ #include <aws/core/Aws.h>
+ #include <aws/core/client/ClientConfiguration.h>
+ #include <aws/core/auth/AWSCredentials.h>
+ #include <aws/core/utils/Outcome.h>
+ #include <aws/kinesis/KinesisClient.h>
+ #include <aws/kinesis/model/PutRecordRequest.h>
+
+int
+main ()
+{
+Aws::Kinesis::Model::PutRecordRequest request;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+ have_libaws_cpp_sdk_kinesis=yes
+else
+ have_libaws_cpp_sdk_kinesis=no
+
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+
+ LIBS="${save_LIBS}"
+ CXXFLAGS="${save_CXXFLAGS}"
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+else
+ have_libaws_cpp_sdk_kinesis=no
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${have_libaws_cpp_sdk_kinesis}" >&5
+$as_echo "${have_libaws_cpp_sdk_kinesis}" >&6; }
+
+test "${enable_backend_kinesis}" = "yes" -a "${have_libaws_cpp_sdk_kinesis}" != "yes" && \
+ as_fn_error $? "libaws-cpp-sdk-kinesis required but not found. try installing AWS C++ SDK" "$LINENO" 5
+
+test "${enable_backend_kinesis}" = "yes" -a "${have_libaws_cpp_sdk_core}" != "yes" && \
+ as_fn_error $? "libaws-cpp-sdk-core required but not found. try installing AWS C++ SDK" "$LINENO" 5
+
+test "${enable_backend_kinesis}" = "yes" -a "${have_libcurl}" != "yes" && \
+ as_fn_error $? "libcurl required but not found" "$LINENO" 5
+
+test "${enable_backend_kinesis}" = "yes" -a "${have_libssl}" != "yes" && \
+ as_fn_error $? "libssl required but not found" "$LINENO" 5
+
+test "${enable_backend_kinesis}" = "yes" -a "${have_libcrypto}" != "yes" && \
+ as_fn_error $? "libcrypto required but not found" "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if kinesis backend should be enabled" >&5
+$as_echo_n "checking if kinesis backend should be enabled... " >&6; }
+if test "${enable_backend_kinesis}" != "no" -a "${have_libaws_cpp_sdk_kinesis}" = "yes" -a "${have_libaws_cpp_sdk_core}" = "yes" \
+ -a "${have_libcurl}" = "yes" -a "${have_libssl}" = "yes" -a "${have_libcrypto}" = "yes"; then
+ enable_backend_kinesis="yes"
+
+$as_echo "#define HAVE_KINESIS 1" >>confdefs.h
+
+ OPTIONAL_KINESIS_CFLAGS="${LIBCRYPTO_CFLAGS} ${LIBSSL_CFLAGS} ${LIBCURL_CFLAGS}"
+ CXX11FLAG="-std=c++11"
+ OPTIONAL_KINESIS_LIBS="-laws-cpp-sdk-kinesis -laws-cpp-sdk-core ${LIBCRYPTO_LIBS} ${LIBSSL_LIBS} ${LIBCURL_LIBS}"
+else
+ enable_backend_kinesis="no"
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_backend_kinesis}" >&5
+$as_echo "${enable_backend_kinesis}" >&6; }
+ if test "${enable_backend_kinesis}" = "yes"; then
+ ENABLE_BACKEND_KINESIS_TRUE=
+ ENABLE_BACKEND_KINESIS_FALSE='#'
+else
+ ENABLE_BACKEND_KINESIS_TRUE='#'
+ ENABLE_BACKEND_KINESIS_FALSE=
+fi
+
+
+
+# -----------------------------------------------------------------------------
+# Prometheus remote write backend - libprotobuf, libsnappy, protoc
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for PROTOBUF" >&5
+$as_echo_n "checking for PROTOBUF... " >&6; }
+
+if test -n "$PROTOBUF_CFLAGS"; then
+ pkg_cv_PROTOBUF_CFLAGS="$PROTOBUF_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"protobuf >= 3\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "protobuf >= 3") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_PROTOBUF_CFLAGS=`$PKG_CONFIG --cflags "protobuf >= 3" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$PROTOBUF_LIBS"; then
+ pkg_cv_PROTOBUF_LIBS="$PROTOBUF_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"protobuf >= 3\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "protobuf >= 3") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_PROTOBUF_LIBS=`$PKG_CONFIG --libs "protobuf >= 3" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ PROTOBUF_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "protobuf >= 3" 2>&1`
+ else
+ PROTOBUF_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "protobuf >= 3" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$PROTOBUF_PKG_ERRORS" >&5
+
+ have_libprotobuf=no
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ have_libprotobuf=no
+
+else
+ PROTOBUF_CFLAGS=$pkg_cv_PROTOBUF_CFLAGS
+ PROTOBUF_LIBS=$pkg_cv_PROTOBUF_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ have_libprotobuf=yes
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for snappy::RawCompress in -lsnappy" >&5
+$as_echo_n "checking for snappy::RawCompress in -lsnappy... " >&6; }
+
+
+ ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ save_LIBS="${LIBS}"
+ LIBS="-lsnappy"
+ save_CXXFLAGS="${CXXFLAGS}"
+ CXXFLAGS="${CXXFLAGS} -std=c++11"
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+ #include <stdlib.h>
+ #include <snappy.h>
+
+int
+main ()
+{
+
+ const char *input = "test";
+ size_t compressed_length;
+ char *buffer = (char *)malloc(5 * sizeof(char));
+ snappy::RawCompress(input, 4, buffer, &compressed_length);
+ free(buffer);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+
+ have_libsnappy=yes
+ SNAPPY_CFLAGS=""
+ SNAPPY_LIBS="-lsnappy"
+
+else
+ have_libsnappy=no
+
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+
+ LIBS="${save_LIBS}"
+ CXXFLAGS="${save_CXXFLAGS}"
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${have_libsnappy}" >&5
+$as_echo "${have_libsnappy}" >&6; }
+
+# Extract the first word of "protoc", so it can be a program name with args.
+set dummy protoc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_PROTOC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $PROTOC in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_PROTOC="$PROTOC" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_PROTOC="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ test -z "$ac_cv_path_PROTOC" && ac_cv_path_PROTOC="no"
+ ;;
+esac
+fi
+PROTOC=$ac_cv_path_PROTOC
+if test -n "$PROTOC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PROTOC" >&5
+$as_echo "$PROTOC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+if test x"${PROTOC}" == x"no"; then :
+ have_protoc=no
+else
+ have_protoc=yes
+
+fi
+
+# Extract the first word of "${CXX}", so it can be a program name with args.
+set dummy ${CXX}; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_CXX_BINARY+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $CXX_BINARY in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_CXX_BINARY="$CXX_BINARY" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_CXX_BINARY="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ test -z "$ac_cv_path_CXX_BINARY" && ac_cv_path_CXX_BINARY="no"
+ ;;
+esac
+fi
+CXX_BINARY=$ac_cv_path_CXX_BINARY
+if test -n "$CXX_BINARY"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX_BINARY" >&5
+$as_echo "$CXX_BINARY" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+if test x"${CXX_BINARY}" == x"no"; then :
+ have_CXX_compiler=no
+else
+ have_CXX_compiler=yes
+
+fi
+
+test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_libprotobuf}" != "yes" && \
+ as_fn_error $? "libprotobuf required but not found. try installing protobuf" "$LINENO" 5
+
+test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_libsnappy}" != "yes" && \
+ as_fn_error $? "libsnappy required but not found. try installing snappy" "$LINENO" 5
+
+test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_protoc}" != "yes" && \
+ as_fn_error $? "protoc compiler required but not found. try installing protobuf" "$LINENO" 5
+
+test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_CXX_compiler}" != "yes" && \
+ as_fn_error $? "C++ compiler required but not found. try installing g++" "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if prometheus remote write backend should be enabled" >&5
+$as_echo_n "checking if prometheus remote write backend should be enabled... " >&6; }
+if test "${enable_backend_prometeus_remote_write}" != "no" -a "${have_libprotobuf}" = "yes" -a "${have_libsnappy}" = "yes" \
+ -a "${have_protoc}" = "yes" -a "${have_CXX_compiler}" = "yes"; then
+ enable_backend_prometheus_remote_write="yes"
+
+$as_echo "#define ENABLE_PROMETHEUS_REMOTE_WRITE 1" >>confdefs.h
+
+ OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS="${PROTOBUF_CFLAGS} ${SNAPPY_CFLAGS}"
+ CXX11FLAG="-std=c++11"
+ OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS="${PROTOBUF_LIBS} ${SNAPPY_LIBS}"
+else
+ enable_backend_prometheus_remote_write="no"
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_backend_prometheus_remote_write}" >&5
+$as_echo "${enable_backend_prometheus_remote_write}" >&6; }
+ if test "${enable_backend_prometheus_remote_write}" = "yes"; then
+ ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE=
+ ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_FALSE='#'
+else
+ ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE='#'
+ ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_FALSE=
+fi
+
+
+
+# -----------------------------------------------------------------------------
+# MongoDB backend - libmongoc
+
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBMONGOC" >&5
+$as_echo_n "checking for LIBMONGOC... " >&6; }
+
+if test -n "$LIBMONGOC_CFLAGS"; then
+ pkg_cv_LIBMONGOC_CFLAGS="$LIBMONGOC_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libmongoc-1.0 >= 1.7\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libmongoc-1.0 >= 1.7") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBMONGOC_CFLAGS=`$PKG_CONFIG --cflags "libmongoc-1.0 >= 1.7" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$LIBMONGOC_LIBS"; then
+ pkg_cv_LIBMONGOC_LIBS="$LIBMONGOC_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libmongoc-1.0 >= 1.7\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libmongoc-1.0 >= 1.7") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_LIBMONGOC_LIBS=`$PKG_CONFIG --libs "libmongoc-1.0 >= 1.7" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ LIBMONGOC_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libmongoc-1.0 >= 1.7" 2>&1`
+ else
+ LIBMONGOC_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libmongoc-1.0 >= 1.7" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$LIBMONGOC_PKG_ERRORS" >&5
+
+ have_libmongoc=no
+
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ have_libmongoc=no
+
+else
+ LIBMONGOC_CFLAGS=$pkg_cv_LIBMONGOC_CFLAGS
+ LIBMONGOC_LIBS=$pkg_cv_LIBMONGOC_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ have_libmongoc=yes
+fi
+
+test "${enable_backend_mongodb}" = "yes" -a "${have_libmongoc}" != "yes" && \
+ as_fn_error $? "libmongoc required but not found. Try installing \`mongoc\`." "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if mongodb backend should be enabled" >&5
+$as_echo_n "checking if mongodb backend should be enabled... " >&6; }
+if test "${enable_backend_mongodb}" != "no" -a "${have_libmongoc}" = "yes"; then
+ enable_backend_mongodb="yes"
+
+$as_echo "#define HAVE_MONGOC 1" >>confdefs.h
+
+ OPTIONAL_MONGOC_CFLAGS="${LIBMONGOC_CFLAGS}"
+ OPTIONAL_MONGOC_LIBS="${LIBMONGOC_LIBS}"
+else
+ enable_backend_mongodb="no"
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_backend_mongodb}" >&5
+$as_echo "${enable_backend_mongodb}" >&6; }
+ if test "${enable_backend_mongodb}" = "yes"; then
+ ENABLE_BACKEND_MONGODB_TRUE=
+ ENABLE_BACKEND_MONGODB_FALSE='#'
+else
+ ENABLE_BACKEND_MONGODB_TRUE='#'
+ ENABLE_BACKEND_MONGODB_FALSE=
+fi
+
+
+
+# -----------------------------------------------------------------------------
+# check for setns() - cgroup-network
+
+ac_fn_c_check_func "$LINENO" "setns" "ac_cv_func_setns"
+if test "x$ac_cv_func_setns" = xyes; then :
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if cgroup-network can be enabled" >&5
+$as_echo_n "checking if cgroup-network can be enabled... " >&6; }
+if test "$ac_cv_func_setns" = "yes" ; then
+ have_setns="yes"
+
+$as_echo "#define HAVE_SETNS 1" >>confdefs.h
+
+else
+ have_setns="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${have_setns}" >&5
+$as_echo "${have_setns}" >&6; }
+ if test "${have_setns}" = "yes"; then
+ ENABLE_PLUGIN_CGROUP_NETWORK_TRUE=
+ ENABLE_PLUGIN_CGROUP_NETWORK_FALSE='#'
+else
+ ENABLE_PLUGIN_CGROUP_NETWORK_TRUE='#'
+ ENABLE_PLUGIN_CGROUP_NETWORK_FALSE=
+fi
+
+
+
+# -----------------------------------------------------------------------------
+# Link-Time-Optimization
+
+if test "${enable_lto}" != "no"; then
+ opt="-flto"
+ as_CACHEVAR=`$as_echo "ax_cv_check_cflags__${opt}" | $as_tr_sh`
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts ${opt}" >&5
+$as_echo_n "checking whether C compiler accepts ${opt}... " >&6; }
+if eval \${$as_CACHEVAR+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ ax_check_save_flags=$CFLAGS
+ CFLAGS="$CFLAGS ${opt}"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ eval "$as_CACHEVAR=yes"
+else
+ eval "$as_CACHEVAR=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ CFLAGS=$ax_check_save_flags
+fi
+eval ac_res=\$$as_CACHEVAR
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+if test x"`eval 'as_val=${'$as_CACHEVAR'};$as_echo "$as_val"'`" = xyes; then :
+ have_lto=yes
+else
+ have_lto=no
+fi
+
+fi
+if test "${have_lto}" = "yes"; then
+ oCFLAGS="${CFLAGS}"
+ CFLAGS="${CFLAGS} -flto"
+ ac_cv_c_lto_cross_compile="${enable_lto}"
+ test "${ac_cv_c_lto_cross_compile}" != "yes" && ac_cv_c_lto_cross_compile="no"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if -flto builds executables" >&5
+$as_echo_n "checking if -flto builds executables... " >&6; }
+if ${ac_cv_c_lto+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then :
+ ac_cv_c_lto=${ac_cv_c_lto_cross_compile}
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdio.h>
+ int main(int argc, char **argv) {
+ return 0;
+ }
+
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ ac_cv_c_lto=yes
+else
+ ac_cv_c_lto=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_lto" >&5
+$as_echo "$ac_cv_c_lto" >&6; }
+if test "${ac_cv_c_lto}" = "yes"; then
+
+$as_echo "#define HAVE_LTO 1" >>confdefs.h
+
+fi
+
+ CFLAGS="${oCFLAGS}"
+ test "${ac_cv_c_lto}" != "yes" && have_lto="no"
+fi
+test "${enable_lto}" = "yes" -a "${have_lto}" != "yes" && \
+ as_fn_error $? "LTO is required but is not available." "$LINENO" 5
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if LTO should be enabled" >&5
+$as_echo_n "checking if LTO should be enabled... " >&6; }
+if test "${enable_lto}" != "no" -a "${have_lto}" = "yes"; then
+ enable_lto="yes"
+ CFLAGS="${CFLAGS} -flto"
+else
+ enable_lto="no"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_lto}" >&5
+$as_echo "${enable_lto}" >&6; }
+
+
+# -----------------------------------------------------------------------------
+
+ if test "${enable_backend_kinesis}" = "yes" -o "${enable_backend_prometheus_remote_write}" = "yes"; then
+ ENABLE_CXX_LINKER_TRUE=
+ ENABLE_CXX_LINKER_FALSE='#'
+else
+ ENABLE_CXX_LINKER_TRUE='#'
+ ENABLE_CXX_LINKER_FALSE=
+fi
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define NETDATA_USER "${with_user}"
+_ACEOF
+
+
+varlibdir="${localstatedir}/lib/netdata"
+registrydir="${localstatedir}/lib/netdata/registry"
+cachedir="${localstatedir}/cache/netdata"
+chartsdir="${libexecdir}/netdata/charts.d"
+nodedir="${libexecdir}/netdata/node.d"
+pythondir="${libexecdir}/netdata/python.d"
+configdir="${sysconfdir}/netdata"
+libconfigdir="${libdir}/netdata/conf.d"
+logdir="${localstatedir}/log/netdata"
+pluginsdir="${libexecdir}/netdata/plugins.d"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+CFLAGS="${CFLAGS} ${OPTIONAL_MATH_CFLAGS} ${OPTIONAL_NFACCT_CFLAGS} ${OPTIONAL_ZLIB_CFLAGS} ${OPTIONAL_UUID_CFLAGS} \
+ ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS} ${OPTIONAL_CUPS_CFLAGS} ${OPTIONAL_XENSTAT_FLAGS} \
+ ${OPTIONAL_KINESIS_CFLAGS} ${OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS} ${OPTIONAL_MONGOC_CFLAGS}"
+
+CXXFLAGS="${CFLAGS} ${CXX11FLAG}"
+
+CPPFLAGS="\
+ -DTARGET_OS=${build_target_id} \
+ -DVARLIB_DIR=\"\\\"${varlibdir}\\\"\" \
+ -DCACHE_DIR=\"\\\"${cachedir}\\\"\" \
+ -DCONFIG_DIR=\"\\\"${configdir}\\\"\" \
+ -DLIBCONFIG_DIR=\"\\\"${libconfigdir}\\\"\" \
+ -DLOG_DIR=\"\\\"${logdir}\\\"\" \
+ -DPLUGINS_DIR=\"\\\"${pluginsdir}\\\"\" \
+ -DRUN_DIR=\"\\\"${localstatedir}/run/netdata\\\"\" \
+ -DWEB_DIR=\"\\\"${webdir}\\\"\" \
+"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ac_config_files="$ac_config_files Makefile netdata.spec backends/graphite/Makefile backends/json/Makefile backends/Makefile backends/opentsdb/Makefile backends/prometheus/Makefile backends/prometheus/remote_write/Makefile backends/aws_kinesis/Makefile backends/mongodb/Makefile collectors/Makefile collectors/apps.plugin/Makefile collectors/cgroups.plugin/Makefile collectors/charts.d.plugin/Makefile collectors/checks.plugin/Makefile collectors/diskspace.plugin/Makefile collectors/fping.plugin/Makefile collectors/ioping.plugin/Makefile collectors/freebsd.plugin/Makefile collectors/freeipmi.plugin/Makefile collectors/cups.plugin/Makefile collectors/idlejitter.plugin/Makefile collectors/macos.plugin/Makefile collectors/nfacct.plugin/Makefile collectors/node.d.plugin/Makefile collectors/plugins.d/Makefile collectors/proc.plugin/Makefile collectors/python.d.plugin/Makefile collectors/statsd.plugin/Makefile collectors/tc.plugin/Makefile collectors/xenstat.plugin/Makefile collectors/perf.plugin/Makefile daemon/Makefile database/Makefile database/engine/Makefile diagrams/Makefile health/Makefile health/notifications/Makefile libnetdata/Makefile libnetdata/adaptive_resortable_list/Makefile libnetdata/avl/Makefile libnetdata/buffer/Makefile libnetdata/clocks/Makefile libnetdata/config/Makefile libnetdata/dictionary/Makefile libnetdata/eval/Makefile libnetdata/locks/Makefile libnetdata/log/Makefile libnetdata/popen/Makefile libnetdata/procfile/Makefile libnetdata/simple_pattern/Makefile libnetdata/socket/Makefile libnetdata/statistical/Makefile libnetdata/storage_number/Makefile libnetdata/threads/Makefile libnetdata/url/Makefile libnetdata/json/Makefile libnetdata/health/Makefile registry/Makefile streaming/Makefile system/Makefile tests/Makefile web/Makefile web/api/Makefile web/api/badges/Makefile web/api/exporters/Makefile web/api/exporters/shell/Makefile web/api/exporters/prometheus/Makefile web/api/formatters/Makefile web/api/formatters/csv/Makefile web/api/formatters/json/Makefile web/api/formatters/ssv/Makefile web/api/formatters/value/Makefile web/api/queries/Makefile web/api/queries/average/Makefile web/api/queries/des/Makefile web/api/queries/incremental_sum/Makefile web/api/queries/max/Makefile web/api/queries/median/Makefile web/api/queries/min/Makefile web/api/queries/ses/Makefile web/api/queries/stddev/Makefile web/api/queries/sum/Makefile web/api/health/Makefile web/gui/Makefile web/server/Makefile web/server/static/Makefile"
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+ for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
+ esac ;;
+ esac
+ done
+
+ (set) 2>&1 |
+ case $as_nl`(ac_space=' '; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ # `set' does not quote correctly, so add quotes: double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \.
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;; #(
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+) |
+ sed '
+ /^ac_cv_env_/b end
+ t clear
+ :clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+ if test -w "$cache_file"; then
+ if test "x$cache_file" != "x/dev/null"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
+$as_echo "$as_me: updating cache $cache_file" >&6;}
+ if test ! -f "$cache_file" || test -h "$cache_file"; then
+ cat confcache >"$cache_file"
+ else
+ case $cache_file in #(
+ */* | ?:*)
+ mv -f confcache "$cache_file"$$ &&
+ mv -f "$cache_file"$$ "$cache_file" ;; #(
+ *)
+ mv -f confcache "$cache_file" ;;
+ esac
+ fi
+ fi
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
+$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+ fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+DEFS=-DHAVE_CONFIG_H
+
+ac_libobjs=
+ac_ltlibobjs=
+U=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+ # 1. Remove the extension, and $U if already installed.
+ ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+ ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
+ # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR
+ # will be set to the directory where LIBOBJS objects are built.
+ as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+ as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then
+ as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5
+$as_echo_n "checking that generated files are newer than configure... " >&6; }
+ if test -n "$am_sleep_pid"; then
+ # Hide warnings about reused PIDs.
+ wait $am_sleep_pid 2>/dev/null
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5
+$as_echo "done" >&6; }
+ if test -n "$EXEEXT"; then
+ am__EXEEXT_TRUE=
+ am__EXEEXT_FALSE='#'
+else
+ am__EXEEXT_TRUE='#'
+ am__EXEEXT_FALSE=
+fi
+
+if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then
+ as_fn_error $? "conditional \"AMDEP\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
+ as_fn_error $? "conditional \"am__fastdepCC\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then
+ as_fn_error $? "conditional \"am__fastdepCXX\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${FREEBSD_TRUE}" && test -z "${FREEBSD_FALSE}"; then
+ as_fn_error $? "conditional \"FREEBSD\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${MACOS_TRUE}" && test -z "${MACOS_FALSE}"; then
+ as_fn_error $? "conditional \"MACOS\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${LINUX_TRUE}" && test -z "${LINUX_FALSE}"; then
+ as_fn_error $? "conditional \"LINUX\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_DBENGINE_TRUE}" && test -z "${ENABLE_DBENGINE_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_DBENGINE\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_HTTPS_TRUE}" && test -z "${ENABLE_HTTPS_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_HTTPS\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_JSONC_TRUE}" && test -z "${ENABLE_JSONC_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_JSONC\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_CAPABILITY_TRUE}" && test -z "${ENABLE_CAPABILITY_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_CAPABILITY\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_PLUGIN_APPS_TRUE}" && test -z "${ENABLE_PLUGIN_APPS_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_PLUGIN_APPS\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_PLUGIN_FREEIPMI_TRUE}" && test -z "${ENABLE_PLUGIN_FREEIPMI_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_PLUGIN_FREEIPMI\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_PLUGIN_CUPS_TRUE}" && test -z "${ENABLE_PLUGIN_CUPS_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_PLUGIN_CUPS\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_PLUGIN_NFACCT_TRUE}" && test -z "${ENABLE_PLUGIN_NFACCT_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_PLUGIN_NFACCT\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_PLUGIN_XENSTAT_TRUE}" && test -z "${ENABLE_PLUGIN_XENSTAT_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_PLUGIN_XENSTAT\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_PLUGIN_PERF_TRUE}" && test -z "${ENABLE_PLUGIN_PERF_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_PLUGIN_PERF\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_BACKEND_KINESIS_TRUE}" && test -z "${ENABLE_BACKEND_KINESIS_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_BACKEND_KINESIS\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_TRUE}" && test -z "${ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_BACKEND_MONGODB_TRUE}" && test -z "${ENABLE_BACKEND_MONGODB_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_BACKEND_MONGODB\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_PLUGIN_CGROUP_NETWORK_TRUE}" && test -z "${ENABLE_PLUGIN_CGROUP_NETWORK_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_PLUGIN_CGROUP_NETWORK\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${ENABLE_CXX_LINKER_TRUE}" && test -z "${ENABLE_CXX_LINKER_FALSE}"; then
+ as_fn_error $? "conditional \"ENABLE_CXX_LINKER\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+
+: "${CONFIG_STATUS=./config.status}"
+ac_write_fail=0
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
+$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
+as_write_fail=0
+cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='print -r --'
+ as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in #(
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there. '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+ fi
+ $as_echo "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
+
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
+
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+ case `echo 'xy\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -pR'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -pR'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -pR'
+ fi
+else
+ as_ln_s='cp -pR'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p='mkdir -p "$as_dir"'
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+## ----------------------------------- ##
+## Main body of $CONFIG_STATUS script. ##
+## ----------------------------------- ##
+_ASEOF
+test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# Save the log message, to keep $0 and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by netdata $as_me v1.17.0, which was
+generated by GNU Autoconf 2.69. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+_ACEOF
+
+case $ac_config_files in *"
+"*) set x $ac_config_files; shift; ac_config_files=$*;;
+esac
+
+case $ac_config_headers in *"
+"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
+esac
+
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+# Files that config.status was made for.
+config_files="$ac_config_files"
+config_headers="$ac_config_headers"
+config_commands="$ac_config_commands"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ac_cs_usage="\
+\`$as_me' instantiates files and other configuration actions
+from templates according to the current configuration. Unless the files
+and actions are specified as TAGs, all are instantiated by default.
+
+Usage: $0 [OPTION]... [TAG]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number and configuration settings, then exit
+ --config print configuration, then exit
+ -q, --quiet, --silent
+ do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+ --header=FILE[:TEMPLATE]
+ instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Configuration commands:
+$config_commands
+
+Report bugs to the package provider."
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
+ac_cs_version="\\
+netdata config.status v1.17.0
+configured by $0, generated by GNU Autoconf 2.69,
+ with options \\"\$ac_cs_config\\"
+
+Copyright (C) 2012 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+INSTALL='$INSTALL'
+MKDIR_P='$MKDIR_P'
+AWK='$AWK'
+test -n "\$AWK" || AWK=awk
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# The default lists apply if the user does not specify any file.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=?*)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ --*=)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=
+ ac_shift=:
+ ;;
+ *)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+ $as_echo "$ac_cs_version"; exit ;;
+ --config | --confi | --conf | --con | --co | --c )
+ $as_echo "$ac_cs_config"; exit ;;
+ --debug | --debu | --deb | --de | --d | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ '') as_fn_error $? "missing file argument" ;;
+ esac
+ as_fn_append CONFIG_FILES " '$ac_optarg'"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ as_fn_append CONFIG_HEADERS " '$ac_optarg'"
+ ac_need_defaults=false;;
+ --he | --h)
+ # Conflict between --help and --header
+ as_fn_error $? "ambiguous option: \`$1'
+Try \`$0 --help' for more information.";;
+ --help | --hel | -h )
+ $as_echo "$ac_cs_usage"; exit ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) as_fn_error $? "unrecognized option: \`$1'
+Try \`$0 --help' for more information." ;;
+
+ *) as_fn_append ac_config_targets " $1"
+ ac_need_defaults=false ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+if \$ac_cs_recheck; then
+ set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ shift
+ \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
+ CONFIG_SHELL='$SHELL'
+ export CONFIG_SHELL
+ exec "\$@"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+ $as_echo "$ac_log"
+} >&5
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+#
+# INIT-COMMANDS
+#
+AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+ case $ac_config_target in
+ "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;;
+ "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;;
+ "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+ "netdata.spec") CONFIG_FILES="$CONFIG_FILES netdata.spec" ;;
+ "backends/graphite/Makefile") CONFIG_FILES="$CONFIG_FILES backends/graphite/Makefile" ;;
+ "backends/json/Makefile") CONFIG_FILES="$CONFIG_FILES backends/json/Makefile" ;;
+ "backends/Makefile") CONFIG_FILES="$CONFIG_FILES backends/Makefile" ;;
+ "backends/opentsdb/Makefile") CONFIG_FILES="$CONFIG_FILES backends/opentsdb/Makefile" ;;
+ "backends/prometheus/Makefile") CONFIG_FILES="$CONFIG_FILES backends/prometheus/Makefile" ;;
+ "backends/prometheus/remote_write/Makefile") CONFIG_FILES="$CONFIG_FILES backends/prometheus/remote_write/Makefile" ;;
+ "backends/aws_kinesis/Makefile") CONFIG_FILES="$CONFIG_FILES backends/aws_kinesis/Makefile" ;;
+ "backends/mongodb/Makefile") CONFIG_FILES="$CONFIG_FILES backends/mongodb/Makefile" ;;
+ "collectors/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/Makefile" ;;
+ "collectors/apps.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/apps.plugin/Makefile" ;;
+ "collectors/cgroups.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/cgroups.plugin/Makefile" ;;
+ "collectors/charts.d.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/charts.d.plugin/Makefile" ;;
+ "collectors/checks.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/checks.plugin/Makefile" ;;
+ "collectors/diskspace.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/diskspace.plugin/Makefile" ;;
+ "collectors/fping.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/fping.plugin/Makefile" ;;
+ "collectors/ioping.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/ioping.plugin/Makefile" ;;
+ "collectors/freebsd.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/freebsd.plugin/Makefile" ;;
+ "collectors/freeipmi.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/freeipmi.plugin/Makefile" ;;
+ "collectors/cups.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/cups.plugin/Makefile" ;;
+ "collectors/idlejitter.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/idlejitter.plugin/Makefile" ;;
+ "collectors/macos.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/macos.plugin/Makefile" ;;
+ "collectors/nfacct.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/nfacct.plugin/Makefile" ;;
+ "collectors/node.d.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/node.d.plugin/Makefile" ;;
+ "collectors/plugins.d/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/plugins.d/Makefile" ;;
+ "collectors/proc.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/proc.plugin/Makefile" ;;
+ "collectors/python.d.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/python.d.plugin/Makefile" ;;
+ "collectors/statsd.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/statsd.plugin/Makefile" ;;
+ "collectors/tc.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/tc.plugin/Makefile" ;;
+ "collectors/xenstat.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/xenstat.plugin/Makefile" ;;
+ "collectors/perf.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/perf.plugin/Makefile" ;;
+ "daemon/Makefile") CONFIG_FILES="$CONFIG_FILES daemon/Makefile" ;;
+ "database/Makefile") CONFIG_FILES="$CONFIG_FILES database/Makefile" ;;
+ "database/engine/Makefile") CONFIG_FILES="$CONFIG_FILES database/engine/Makefile" ;;
+ "diagrams/Makefile") CONFIG_FILES="$CONFIG_FILES diagrams/Makefile" ;;
+ "health/Makefile") CONFIG_FILES="$CONFIG_FILES health/Makefile" ;;
+ "health/notifications/Makefile") CONFIG_FILES="$CONFIG_FILES health/notifications/Makefile" ;;
+ "libnetdata/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/Makefile" ;;
+ "libnetdata/adaptive_resortable_list/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/adaptive_resortable_list/Makefile" ;;
+ "libnetdata/avl/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/avl/Makefile" ;;
+ "libnetdata/buffer/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/buffer/Makefile" ;;
+ "libnetdata/clocks/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/clocks/Makefile" ;;
+ "libnetdata/config/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/config/Makefile" ;;
+ "libnetdata/dictionary/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/dictionary/Makefile" ;;
+ "libnetdata/eval/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/eval/Makefile" ;;
+ "libnetdata/locks/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/locks/Makefile" ;;
+ "libnetdata/log/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/log/Makefile" ;;
+ "libnetdata/popen/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/popen/Makefile" ;;
+ "libnetdata/procfile/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/procfile/Makefile" ;;
+ "libnetdata/simple_pattern/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/simple_pattern/Makefile" ;;
+ "libnetdata/socket/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/socket/Makefile" ;;
+ "libnetdata/statistical/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/statistical/Makefile" ;;
+ "libnetdata/storage_number/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/storage_number/Makefile" ;;
+ "libnetdata/threads/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/threads/Makefile" ;;
+ "libnetdata/url/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/url/Makefile" ;;
+ "libnetdata/json/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/json/Makefile" ;;
+ "libnetdata/health/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/health/Makefile" ;;
+ "registry/Makefile") CONFIG_FILES="$CONFIG_FILES registry/Makefile" ;;
+ "streaming/Makefile") CONFIG_FILES="$CONFIG_FILES streaming/Makefile" ;;
+ "system/Makefile") CONFIG_FILES="$CONFIG_FILES system/Makefile" ;;
+ "tests/Makefile") CONFIG_FILES="$CONFIG_FILES tests/Makefile" ;;
+ "web/Makefile") CONFIG_FILES="$CONFIG_FILES web/Makefile" ;;
+ "web/api/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/Makefile" ;;
+ "web/api/badges/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/badges/Makefile" ;;
+ "web/api/exporters/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/exporters/Makefile" ;;
+ "web/api/exporters/shell/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/exporters/shell/Makefile" ;;
+ "web/api/exporters/prometheus/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/exporters/prometheus/Makefile" ;;
+ "web/api/formatters/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/Makefile" ;;
+ "web/api/formatters/csv/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/csv/Makefile" ;;
+ "web/api/formatters/json/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/json/Makefile" ;;
+ "web/api/formatters/ssv/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/ssv/Makefile" ;;
+ "web/api/formatters/value/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/value/Makefile" ;;
+ "web/api/queries/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/Makefile" ;;
+ "web/api/queries/average/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/average/Makefile" ;;
+ "web/api/queries/des/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/des/Makefile" ;;
+ "web/api/queries/incremental_sum/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/incremental_sum/Makefile" ;;
+ "web/api/queries/max/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/max/Makefile" ;;
+ "web/api/queries/median/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/median/Makefile" ;;
+ "web/api/queries/min/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/min/Makefile" ;;
+ "web/api/queries/ses/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/ses/Makefile" ;;
+ "web/api/queries/stddev/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/stddev/Makefile" ;;
+ "web/api/queries/sum/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/sum/Makefile" ;;
+ "web/api/health/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/health/Makefile" ;;
+ "web/gui/Makefile") CONFIG_FILES="$CONFIG_FILES web/gui/Makefile" ;;
+ "web/server/Makefile") CONFIG_FILES="$CONFIG_FILES web/server/Makefile" ;;
+ "web/server/static/Makefile") CONFIG_FILES="$CONFIG_FILES web/server/static/Makefile" ;;
+
+ *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
+ esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+ test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
+ test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+ tmp= ac_tmp=
+ trap 'exit_status=$?
+ : "${ac_tmp:=$tmp}"
+ { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status
+' 0
+ trap 'as_fn_exit 1' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+ test -d "$tmp"
+} ||
+{
+ tmp=./conf$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
+ac_tmp=$tmp
+
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
+if test -n "$CONFIG_FILES"; then
+
+
+ac_cr=`echo X | tr X '\015'`
+# On cygwin, bash can eat \r inside `` if the user requested igncr.
+# But we know of no other shell where ac_cr would be empty at this
+# point, so we can use a bashism as a fallback.
+if test "x$ac_cr" = x; then
+ eval ac_cr=\$\'\\r\'
+fi
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+ ac_cs_awk_cr='\\r'
+else
+ ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$ac_tmp/subs1.awk" &&
+_ACEOF
+
+
+{
+ echo "cat >conf$$subs.awk <<_ACEOF" &&
+ echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
+ echo "_ACEOF"
+} >conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+ . ./conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+
+ ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
+ if test $ac_delim_n = $ac_delim_num; then
+ break
+ elif $ac_last_try; then
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+rm -f conf$$subs.sh
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&
+_ACEOF
+sed -n '
+h
+s/^/S["/; s/!.*/"]=/
+p
+g
+s/^[^!]*!//
+:repl
+t repl
+s/'"$ac_delim"'$//
+t delim
+:nl
+h
+s/\(.\{148\}\)..*/\1/
+t more1
+s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
+p
+n
+b repl
+:more1
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t nl
+:delim
+h
+s/\(.\{148\}\)..*/\1/
+t more2
+s/["\\]/\\&/g; s/^/"/; s/$/"/
+p
+b
+:more2
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t delim
+' <conf$$subs.awk | sed '
+/^[^""]/{
+ N
+ s/\n//
+}
+' >>$CONFIG_STATUS || ac_write_fail=1
+rm -f conf$$subs.awk
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACAWK
+cat >>"\$ac_tmp/subs1.awk" <<_ACAWK &&
+ for (key in S) S_is_set[key] = 1
+ FS = ""
+
+}
+{
+ line = $ 0
+ nfields = split(line, field, "@")
+ substed = 0
+ len = length(field[1])
+ for (i = 2; i < nfields; i++) {
+ key = field[i]
+ keylen = length(key)
+ if (S_is_set[key]) {
+ value = S[key]
+ line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+ len += length(value) + length(field[++i])
+ substed = 1
+ } else
+ len += 1 + keylen
+ }
+
+ print line
+}
+
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+ sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+ cat
+fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \
+ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
+_ACEOF
+
+# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
+# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{
+h
+s///
+s/^/:/
+s/[ ]*$/:/
+s/:\$(srcdir):/:/g
+s/:\${srcdir}:/:/g
+s/:@srcdir@:/:/g
+s/^:*//
+s/:*$//
+x
+s/\(=[ ]*\).*/\1/
+G
+s/\n//
+s/^[^=]*=[ ]*$//
+}'
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+fi # test -n "$CONFIG_FILES"
+
+# Set up the scripts for CONFIG_HEADERS section.
+# No need to generate them if there are no CONFIG_HEADERS.
+# This happens for instance with `./config.status Makefile'.
+if test -n "$CONFIG_HEADERS"; then
+cat >"$ac_tmp/defines.awk" <<\_ACAWK ||
+BEGIN {
+_ACEOF
+
+# Transform confdefs.h into an awk script `defines.awk', embedded as
+# here-document in config.status, that substitutes the proper values into
+# config.h.in to produce config.h.
+
+# Create a delimiter string that does not exist in confdefs.h, to ease
+# handling of long lines.
+ac_delim='%!_!# '
+for ac_last_try in false false :; do
+ ac_tt=`sed -n "/$ac_delim/p" confdefs.h`
+ if test -z "$ac_tt"; then
+ break
+ elif $ac_last_try; then
+ as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+
+# For the awk script, D is an array of macro values keyed by name,
+# likewise P contains macro parameters if any. Preserve backslash
+# newline sequences.
+
+ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
+sed -n '
+s/.\{148\}/&'"$ac_delim"'/g
+t rset
+:rset
+s/^[ ]*#[ ]*define[ ][ ]*/ /
+t def
+d
+:def
+s/\\$//
+t bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3"/p
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p
+d
+:bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3\\\\\\n"\\/p
+t cont
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
+t cont
+d
+:cont
+n
+s/.\{148\}/&'"$ac_delim"'/g
+t clear
+:clear
+s/\\$//
+t bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/"/p
+d
+:bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
+b cont
+' <confdefs.h | sed '
+s/'"$ac_delim"'/"\\\
+"/g' >>$CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ for (key in D) D_is_set[key] = 1
+ FS = ""
+}
+/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
+ line = \$ 0
+ split(line, arg, " ")
+ if (arg[1] == "#") {
+ defundef = arg[2]
+ mac1 = arg[3]
+ } else {
+ defundef = substr(arg[1], 2)
+ mac1 = arg[2]
+ }
+ split(mac1, mac2, "(") #)
+ macro = mac2[1]
+ prefix = substr(line, 1, index(line, defundef) - 1)
+ if (D_is_set[macro]) {
+ # Preserve the white space surrounding the "#".
+ print prefix "define", macro P[macro] D[macro]
+ next
+ } else {
+ # Replace #undef with comments. This is necessary, for example,
+ # in the case of _POSIX_SOURCE, which is predefined and required
+ # on some systems where configure will not decide to define it.
+ if (defundef == "undef") {
+ print "/*", prefix defundef, macro, "*/"
+ next
+ }
+ }
+}
+{ print }
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ as_fn_error $? "could not setup config headers machinery" "$LINENO" 5
+fi # test -n "$CONFIG_HEADERS"
+
+
+eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS"
+shift
+for ac_tag
+do
+ case $ac_tag in
+ :[FHLC]) ac_mode=$ac_tag; continue;;
+ esac
+ case $ac_mode$ac_tag in
+ :[FHL]*:*);;
+ :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;;
+ :[FH]-) ac_tag=-:-;;
+ :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+ esac
+ ac_save_IFS=$IFS
+ IFS=:
+ set x $ac_tag
+ IFS=$ac_save_IFS
+ shift
+ ac_file=$1
+ shift
+
+ case $ac_mode in
+ :L) ac_source=$1;;
+ :[FH])
+ ac_file_inputs=
+ for ac_f
+ do
+ case $ac_f in
+ -) ac_f="$ac_tmp/stdin";;
+ *) # Look for the file first in the build tree, then in the source tree
+ # (if the path is not absolute). The absolute path cannot be DOS-style,
+ # because $ac_f cannot contain `:'.
+ test -f "$ac_f" ||
+ case $ac_f in
+ [\\/$]*) false;;
+ *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+ esac ||
+ as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
+ esac
+ case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+ as_fn_append ac_file_inputs " '$ac_f'"
+ done
+
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ configure_input='Generated from '`
+ $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+ `' by configure.'
+ if test x"$ac_file" != x-; then
+ configure_input="$ac_file. $configure_input"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
+$as_echo "$as_me: creating $ac_file" >&6;}
+ fi
+ # Neutralize special characters interpreted by sed in replacement strings.
+ case $configure_input in #(
+ *\&* | *\|* | *\\* )
+ ac_sed_conf_input=`$as_echo "$configure_input" |
+ sed 's/[\\\\&|]/\\\\&/g'`;; #(
+ *) ac_sed_conf_input=$configure_input;;
+ esac
+
+ case $ac_tag in
+ *:-:* | *:-) cat >"$ac_tmp/stdin" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
+ esac
+ ;;
+ esac
+
+ ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ as_dir="$ac_dir"; as_fn_mkdir_p
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+ case $ac_mode in
+ :F)
+ #
+ # CONFIG_FILE
+ #
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+ esac
+ ac_MKDIR_P=$MKDIR_P
+ case $MKDIR_P in
+ [\\/$]* | ?:[\\/]* ) ;;
+ */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;;
+ esac
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+ac_sed_dataroot='
+/datarootdir/ {
+ p
+ q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ ac_datarootdir_hack='
+ s&@datadir@&$datadir&g
+ s&@docdir@&$docdir&g
+ s&@infodir@&$infodir&g
+ s&@localedir@&$localedir&g
+ s&@mandir@&$mandir&g
+ s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
+_ACEOF
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_sed_extra="$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s|@configure_input@|$ac_sed_conf_input|;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+s&@MKDIR_P@&$ac_MKDIR_P&;t t
+$ac_datarootdir_hack
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \
+ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \
+ "$ac_tmp/out"`; test -z "$ac_out"; } &&
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&5
+$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&2;}
+
+ rm -f "$ac_tmp/stdin"
+ case $ac_file in
+ -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";;
+ *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";;
+ esac \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ ;;
+ :H)
+ #
+ # CONFIG_HEADER
+ #
+ if test x"$ac_file" != x-; then
+ {
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs"
+ } >"$ac_tmp/config.h" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
+$as_echo "$as_me: $ac_file is unchanged" >&6;}
+ else
+ rm -f "$ac_file"
+ mv "$ac_tmp/config.h" "$ac_file" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ fi
+ else
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \
+ || as_fn_error $? "could not create -" "$LINENO" 5
+ fi
+# Compute "$ac_file"'s index in $config_headers.
+_am_arg="$ac_file"
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+ case $_am_header in
+ $_am_arg | $_am_arg:* )
+ break ;;
+ * )
+ _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+ esac
+done
+echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" ||
+$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$_am_arg" : 'X\(//\)[^/]' \| \
+ X"$_am_arg" : 'X\(//\)$' \| \
+ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$_am_arg" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`/stamp-h$_am_stamp_count
+ ;;
+
+ :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5
+$as_echo "$as_me: executing $ac_file commands" >&6;}
+ ;;
+ esac
+
+
+ case $ac_file$ac_mode in
+ "depfiles":C) test x"$AMDEP_TRUE" != x"" || {
+ # Older Autoconf quotes --file arguments for eval, but not when files
+ # are listed without --file. Let's play safe and only enable the eval
+ # if we detect the quoting.
+ case $CONFIG_FILES in
+ *\'*) eval set x "$CONFIG_FILES" ;;
+ *) set x $CONFIG_FILES ;;
+ esac
+ shift
+ for mf
+ do
+ # Strip MF so we end up with the name of the file.
+ mf=`echo "$mf" | sed -e 's/:.*$//'`
+ # Check whether this is an Automake generated Makefile or not.
+ # We used to match only the files named 'Makefile.in', but
+ # some people rename them; so instead we look at the file content.
+ # Grep'ing the first line is not enough: some people post-process
+ # each Makefile.in and add a new line on top of each file to say so.
+ # Grep'ing the whole file is not good either: AIX grep has a line
+ # limit of 2048, but all sed's we know have understand at least 4000.
+ if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
+ dirpart=`$as_dirname -- "$mf" ||
+$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$mf" : 'X\(//\)[^/]' \| \
+ X"$mf" : 'X\(//\)$' \| \
+ X"$mf" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$mf" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ else
+ continue
+ fi
+ # Extract the definition of DEPDIR, am__include, and am__quote
+ # from the Makefile without running 'make'.
+ DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
+ test -z "$DEPDIR" && continue
+ am__include=`sed -n 's/^am__include = //p' < "$mf"`
+ test -z "$am__include" && continue
+ am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
+ # Find all dependency output files, they are included files with
+ # $(DEPDIR) in their names. We invoke sed twice because it is the
+ # simplest approach to changing $(DEPDIR) to its actual value in the
+ # expansion.
+ for file in `sed -n "
+ s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
+ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do
+ # Make sure the directory exists.
+ test -f "$dirpart/$file" && continue
+ fdir=`$as_dirname -- "$file" ||
+$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$file" : 'X\(//\)[^/]' \| \
+ X"$file" : 'X\(//\)$' \| \
+ X"$file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ as_dir=$dirpart/$fdir; as_fn_mkdir_p
+ # echo "creating $dirpart/$file"
+ echo '# dummy' > "$dirpart/$file"
+ done
+ done
+}
+ ;;
+
+ esac
+done # for ac_tag
+
+
+as_fn_exit 0
+_ACEOF
+ac_clean_files=$ac_clean_files_save
+
+test $ac_write_fail = 0 ||
+ as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded. So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status. When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+ ac_cs_success=:
+ ac_config_status_args=
+ test "$silent" = yes &&
+ ac_config_status_args="$ac_config_status_args --quiet"
+ exec 5>/dev/null
+ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+ exec 5>>config.log
+ # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+ # would make configure fail if this is the last instruction.
+ $ac_cs_success || as_fn_exit 1
+fi
+if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
+fi
+
+
+test "${with_math}" != "yes" && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: You are building without math. math allows accurate calculations. It should be enabled." >&5
+$as_echo "$as_me: WARNING: You are building without math. math allows accurate calculations. It should be enabled." >&2;} || :
+test "${with_zlib}" != "yes" && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: You are building without zlib. zlib allows netdata to transfer a lot less data with web clients. It should be enabled." >&5
+$as_echo "$as_me: WARNING: You are building without zlib. zlib allows netdata to transfer a lot less data with web clients. It should be enabled." >&2;} || :
diff --git a/configure.ac b/configure.ac
index c65e406f..deb71400 100644
--- a/configure.ac
+++ b/configure.ac
@@ -83,6 +83,12 @@ AC_ARG_ENABLE(
[enable_backend_prometheus_remote_write="detect"]
)
AC_ARG_ENABLE(
+ [backend-mongodb],
+ [AS_HELP_STRING([--enable-backend-mongodb], [enable mongodb backend @<:@default autodetect@:>@])],
+ ,
+ [enable_backend_mongodb="detect"]
+)
+AC_ARG_ENABLE(
[pedantic],
[AS_HELP_STRING([--enable-pedantic], [enable pedantic compiler warnings @<:@default disabled@:>@])],
,
@@ -283,7 +289,7 @@ OPTIONAL_UV_LIBS="${UV_LIBS}"
AC_CHECK_LIB(
[lz4],
- [LZ4_decompress_safe],
+ [LZ4_compress_default],
[LZ4_LIBS="-llz4"]
)
@@ -378,8 +384,11 @@ test "${enable_dbengine}" = "yes" -a -z "${LZ4_LIBS}" && \
test "${enable_dbengine}" = "yes" -a -z "${JUDY_LIBS}" && \
AC_MSG_ERROR([libJudy required but not found. Try installing 'libjudy-dev' or 'Judy-devel'.])
-test "${enable_dbengine}" = "yes" -o "${enable_https}" = "yes" -a -z "${SSL_LIBS}" && \
- AC_MSG_ERROR([OpenSSL required but not found. Try installing 'libssl-dev' or 'openssl-devel'.])
+test "${enable_https}" = "yes" -a -z "${SSL_LIBS}" && \
+ AC_MSG_ERROR([OpenSSL required for HTTPS but not found. Try installing 'libssl-dev' or 'openssl-devel'.])
+
+test "${enable_dbengine}" = "yes" -a -z "${SSL_LIBS}" && \
+ AC_MSG_ERROR([OpenSSL required for DBENGINE but not found. Try installing 'libssl-dev' or 'openssl-devel'.])
AC_MSG_CHECKING([if netdata dbengine should be used])
if test "${enable_dbengine}" != "no" -a "${UV_LIBS}" -a "${LZ4_LIBS}" -a "${JUDY_LIBS}" -a "${SSL_LIBS}"; then
@@ -882,7 +891,7 @@ AM_CONDITIONAL([ENABLE_BACKEND_KINESIS], [test "${enable_backend_kinesis}" = "ye
PKG_CHECK_MODULES(
[PROTOBUF],
- [protobuf],
+ [protobuf >= 3],
[have_libprotobuf=yes],
[have_libprotobuf=no]
)
@@ -965,6 +974,33 @@ AM_CONDITIONAL([ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE], [test "${enable_backend
# -----------------------------------------------------------------------------
+# MongoDB backend - libmongoc
+
+PKG_CHECK_MODULES(
+ [LIBMONGOC],
+ [libmongoc-1.0 >= 1.7],
+ [have_libmongoc=yes],
+ [have_libmongoc=no]
+)
+
+test "${enable_backend_mongodb}" = "yes" -a "${have_libmongoc}" != "yes" && \
+ AC_MSG_ERROR([libmongoc required but not found. Try installing `mongoc`.])
+
+AC_MSG_CHECKING([if mongodb backend should be enabled])
+if test "${enable_backend_mongodb}" != "no" -a "${have_libmongoc}" = "yes"; then
+ enable_backend_mongodb="yes"
+ AC_DEFINE([HAVE_MONGOC], [1], [libmongoc usability])
+ OPTIONAL_MONGOC_CFLAGS="${LIBMONGOC_CFLAGS}"
+ OPTIONAL_MONGOC_LIBS="${LIBMONGOC_LIBS}"
+else
+ enable_backend_mongodb="no"
+fi
+
+AC_MSG_RESULT([${enable_backend_mongodb}])
+AM_CONDITIONAL([ENABLE_BACKEND_MONGODB], [test "${enable_backend_mongodb}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
# check for setns() - cgroup-network
AC_CHECK_FUNC([setns])
@@ -988,7 +1024,7 @@ if test "${enable_lto}" != "no"; then
fi
if test "${have_lto}" = "yes"; then
oCFLAGS="${CFLAGS}"
- CFLAGS="${CFLAGS} -flto ${OPTIONAL_MATH_CFLAGS} ${OPTIONAL_NFACCT_CFLAGS} ${OPTIONAL_ZLIB_CFLAGS} ${OPTIONAL_UUID_CFLAGS} ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS} ${OPTIONAL_CUPS_CFLAGS} ${OPTIONAL_XENSTAT_FLAGS} ${OPTIONAL_KINESIS_CFLAGS} ${OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS}"
+ CFLAGS="${CFLAGS} -flto"
ac_cv_c_lto_cross_compile="${enable_lto}"
test "${ac_cv_c_lto_cross_compile}" != "yes" && ac_cv_c_lto_cross_compile="no"
AC_C_LTO
@@ -1037,6 +1073,10 @@ AC_SUBST([logdir])
AC_SUBST([pluginsdir])
AC_SUBST([webdir])
+CFLAGS="${CFLAGS} ${OPTIONAL_MATH_CFLAGS} ${OPTIONAL_NFACCT_CFLAGS} ${OPTIONAL_ZLIB_CFLAGS} ${OPTIONAL_UUID_CFLAGS} \
+ ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS} ${OPTIONAL_CUPS_CFLAGS} ${OPTIONAL_XENSTAT_FLAGS} \
+ ${OPTIONAL_KINESIS_CFLAGS} ${OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS} ${OPTIONAL_MONGOC_CFLAGS}"
+
CXXFLAGS="${CFLAGS} ${CXX11FLAG}"
CPPFLAGS="\
@@ -1076,6 +1116,8 @@ AC_SUBST([OPTIONAL_KINESIS_CFLAGS])
AC_SUBST([OPTIONAL_KINESIS_LIBS])
AC_SUBST([OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS])
AC_SUBST([OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS])
+AC_SUBST([OPTIONAL_MONGOC_CFLAGS])
+AC_SUBST([OPTIONAL_MONGOC_LIBS])
AC_CONFIG_FILES([
@@ -1088,6 +1130,7 @@ AC_CONFIG_FILES([
backends/prometheus/Makefile
backends/prometheus/remote_write/Makefile
backends/aws_kinesis/Makefile
+ backends/mongodb/Makefile
collectors/Makefile
collectors/apps.plugin/Makefile
collectors/cgroups.plugin/Makefile
diff --git a/contrib/README.md b/contrib/README.md
index c5ce873a..8f28282a 100644
--- a/contrib/README.md
+++ b/contrib/README.md
@@ -1,4 +1,4 @@
-# netdata contrib
+# Netdata contrib
## Building .deb packages
@@ -7,23 +7,28 @@ Debian package. It has been tested on Debian Jessie and Wheezy,
but should work, possibly with minor changes, if you have other
dpkg-based systems such as Ubuntu or Mint.
-To build netdata for a Debian Jessie system, the debian directory
-has to be available in the root of the netdata source. The easiest
+To build Netdata for a Debian Jessie system, the debian directory
+has to be available in the root of the Netdata source. The easiest
way to do this is with a symlink:
- ~/netdata$ ln -s contrib/debian
+```
+~/netdata$ ln -s contrib/debian
+```
Then build the debian package:
- ~/netdata$ dpkg-buildpackage -us -uc -rfakeroot
+```
+~/netdata$ dpkg-buildpackage -us -uc -rfakeroot
+```
This should give a package that can be installed in the parent
directory, which you can install manually with dpkg.
- ~/netdata$ ls ../*.deb
- ../netdata_1.0.0_amd64.deb
- ~/netdata$ sudo dpkg -i ../netdata_1.0.0_amd64.deb
-
+```
+~/netdata$ ls ../*.deb
+../netdata_1.0.0_amd64.deb
+~/netdata$ sudo dpkg -i ../netdata_1.0.0_amd64.deb
+```
### Building for a Debian system without systemd
@@ -32,29 +37,29 @@ are based on systemd. To build non-systemd packages (for example,
for Debian wheezy), you will need to make a couple of minor
updates first.
-* edit `contrib/debian/rules` and adjust the `dh` rule near the
- top to remove systemd (see comments in that file).
+- edit `contrib/debian/rules` and adjust the `dh` rule near the
+ top to remove systemd (see comments in that file).
-* rename `contrib/debian/control.wheezy` to `contrib/debian/control`.
+- rename `contrib/debian/control.wheezy` to `contrib/debian/control`.
-* change `control.wheezy from contrib/Makefile* to control`.
+- change `control.wheezy from contrib/Makefile* to control`.
-* uncomment `EXTRA_OPTS="-P /var/run/netdata.pid"` in
- `contrib/debian/netdata.default`
+- uncomment `EXTRA_OPTS="-P /var/run/netdata.pid"` in
+ `contrib/debian/netdata.default`
-* edit `contrib/debian/netdata.init` and change `PIDFILE` to
- `/var/run/netdata.pid`
+- edit `contrib/debian/netdata.init` and change `PIDFILE` to
+ `/var/run/netdata.pid`
-* remove `dpkg-statoverride --update --add --force root netdata 0775 /var/lib/netdata/registry` from
- `contrib/debian/netdata.postinst.in`. If you are going to handle the unique id file differently.
+- remove `dpkg-statoverride --update --add --force root netdata 0775 /var/lib/netdata/registry` from
+ `contrib/debian/netdata.postinst.in`. If you are going to handle the unique id file differently.
Then proceed as the main instructions above.
-### Reinstalling netdata
+### Reinstalling Netdata
-The recommended way to upgrade netdata packages built from this
+The recommended way to upgrade Netdata packages built from this
source is to remove the current package from your system, then
install the new package. Upgrading on wheezy is known to not
work cleanly; Jessie may behave as expected.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcontrib%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcontrib%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/contrib/debian/changelog b/contrib/debian/changelog
index d9cf8bdb..ddfffe4e 100644
--- a/contrib/debian/changelog
+++ b/contrib/debian/changelog
@@ -3,3 +3,4 @@ netdata (PREVIOUS_PACKAGE_VERSION) unstable; urgency=medium
* Initial Release
-- Netdata Builder <bot@netdata.cloud> PREVIOUS_PACKAGE_DATE
+
diff --git a/contrib/debian/control.trusty b/contrib/debian/control.trusty
new file mode 100644
index 00000000..cb8a52e0
--- /dev/null
+++ b/contrib/debian/control.trusty
@@ -0,0 +1,56 @@
+Source: netdata
+Build-Depends: debhelper (>= 9),
+ dh-autoreconf,
+ dh-systemd (>= 1.5),
+ dpkg-dev (>= 1.13.19),
+ zlib1g-dev,
+ uuid-dev,
+ liblz4-dev,
+ libjudy-dev,
+ libssl-dev,
+ libmnl-dev,
+ libjson-c-dev,
+ libcups2-dev,
+ libipmimonitoring-dev,
+ libnetfilter-acct-dev,
+ libsnappy-dev,
+ libprotobuf-dev,
+ libprotoc-dev,
+ autogen,
+ autoconf,
+ automake,
+ pkg-config,
+ curl,
+ gcc,
+ g++
+Section: net
+Priority: optional
+Maintainer: Netdata Builder <bot@netdata.cloud>
+Standards-Version: 3.9.6
+Homepage: https://netdata.cloud
+
+Package: netdata
+Architecture: any
+Depends: adduser,
+ libcap2-bin (>= 1:2.0),
+ lsb-base (>= 3.1-23.2),
+ zlib1g,
+ libuuid1,
+ liblz4-1,
+ libjudydebian1,
+ openssl,
+ libmnl0,
+ libjson-c2,
+ cups,
+ freeipmi,
+ libnetfilter-acct1,
+ libprotobuf-c0,
+ libsnappy1,
+ libprotoc8,
+ ${misc:Depends},
+ ${shlibs:Depends}
+Description: real-time charts for system monitoring
+ Netdata is a daemon that collects data in realtime (per second)
+ and presents a web site to view and analyze them. The presentation
+ is also real-time and full of interactive charts that precisely
+ render all collected values.
diff --git a/contrib/debian/install_go.sh b/contrib/debian/install_go.sh
index 17a3b409..22ff691f 100755
--- a/contrib/debian/install_go.sh
+++ b/contrib/debian/install_go.sh
@@ -1,7 +1,8 @@
#!/usr/bin/env bash
-LIB_DIR="$1"
-LIBEXEC_DIR="$2"
+GO_PACKAGE_VERSION="$1"
+LIB_DIR="$2"
+LIBEXEC_DIR="$3"
# ############################################################
# Package Go within netdata (TBD: Package it separately)
@@ -35,8 +36,6 @@ download_go() {
}
install_go() {
- # When updating this value, ensure correct checksums in packaging/go.d.checksums
- GO_PACKAGE_VERSION="v0.7.0"
ARCH_MAP=(
'i386::386'
'i686::386'
@@ -62,7 +61,7 @@ install_go() {
fi
done
tmp=$(mktemp -d /tmp/netdata-go-XXXXXX)
- GO_PACKAGE_BASENAME="go.d.plugin-${GO_PACKAGE_VERSION}.${OS}-${ARCH}"
+ GO_PACKAGE_BASENAME="go.d.plugin-${GO_PACKAGE_VERSION}.${OS}-${ARCH}.tar.gz"
download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/${GO_PACKAGE_BASENAME}" "${tmp}/${GO_PACKAGE_BASENAME}"
download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/config.tar.gz" "${tmp}/config.tar.gz"
@@ -88,7 +87,10 @@ install_go() {
# Install files
tar -xf "${tmp}/config.tar.gz" -C "${LIB_DIR}/conf.d/"
- mv "${tmp}/$GO_PACKAGE_BASENAME" "${LIBEXEC_DIR}/plugins.d/go.d.plugin"
+ tar xf "${tmp}/${GO_PACKAGE_BASENAME}"
+ mv "${GO_PACKAGE_BASENAME/\.tar\.gz/}" "${LIBEXEC_DIR}/plugins.d/go.d.plugin"
+
+ rm -rf "${tmp}"
fi
return 0
}
diff --git a/contrib/debian/rules b/contrib/debian/rules
index 88a8ab36..036e0108 100755
--- a/contrib/debian/rules
+++ b/contrib/debian/rules
@@ -18,8 +18,8 @@ TOP = $(CURDIR)/debian/netdata
override_dh_auto_configure:
autoreconf -ivf
- dh_auto_configure -- --prefix=/usr --sysconfdir=/etc --localstatedir=/var \
- --libexecdir=/usr/libexec --with-user=netdata --with-math --with-webdir=/var/lib/netdata/www
+ dh_auto_configure -- --prefix=/usr --sysconfdir=/etc --localstatedir=/var --libdir=/usr/lib \
+ --libexecdir=/usr/libexec --with-user=netdata --with-math --with-zlib --with-webdir=/var/lib/netdata/www
debian/%.postinst: debian/%.postinst.in
sed 's/@DEB_HOST_MULTIARCH@/$(DEB_HOST_MULTIARCH)/g' $< > $@
@@ -55,7 +55,7 @@ override_dh_install: debian/netdata.postinst
# Install go
#
- debian/install_go.sh $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/ $(TOP)/usr/libexec/netdata
+ debian/install_go.sh $$(cat ${CURDIR}/packaging/go.d.version) $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/ $(TOP)/usr/libexec/netdata
override_dh_installdocs:
dh_installdocs
diff --git a/contrib/sles11/README.md b/contrib/sles11/README.md
index d052b945..2c938001 100644
--- a/contrib/sles11/README.md
+++ b/contrib/sles11/README.md
@@ -1,11 +1,12 @@
-# spec to build netdata RPM for sles 11
+# Spec to build Netdata RPM for sles 11
Based on [opensuse rpm spec](https://build.opensuse.org/package/show/network/netdata) with some
changes and additions for sles 11 backport, namely:
-- init.d script
-- run-time dependency on python ordereddict backport
-- patch for netdata python.d plugin to work with older python
-- crude hack of notification script to work with bash 3 (email and syslog only, one destination,
- see comments at the top)
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcontrib%2Fsles11%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+- init.d script
+- run-time dependency on python ordereddict backport
+- patch for Netdata python.d plugin to work with older python
+- crude hack of notification script to work with bash 3 (email and syslog only, one destination,
+ see comments at the top)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcontrib%2Fsles11%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/coverity-install.sh b/coverity-install.sh
index 99d2f2fe..83d2ad92 100755
--- a/coverity-install.sh
+++ b/coverity-install.sh
@@ -5,38 +5,4 @@
#
# Author: Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-token="${COVERITY_SCAN_TOKEN}"
-([ -z "${token}" ] && [ -f .coverity-token ]) && token="$(<.coverity-token)"
-if [ -z "${token}" ]; then
- echo >&2 "Save the coverity token to .coverity-token or export it as COVERITY_SCAN_TOKEN."
- exit 1
-fi
-
-covbuild="$(which cov-build 2>/dev/null || command -v cov-build 2>/dev/null)"
-([ -z "${covbuild}" ] && [ -f .coverity-build ]) && covbuild="$(<.coverity-build)"
-if [ ! -z "${covbuild}" ]; then
- echo >&2 "Coverity already installed, nothing to do!"
- exit 0
-fi
-
-echo >&2 "Installing coverity..."
-WORKDIR="/opt/coverity-source"
-mkdir -p "${WORKDIR}"
-
-curl -SL --data "token=${token}&project=${REPOSITORY}" https://scan.coverity.com/download/linux64 > "${WORKDIR}/coverity_tool.tar.gz"
-if [ -f "${WORKDIR}/coverity_tool.tar.gz" ]; then
- tar -x -C "${WORKDIR}" -f "${WORKDIR}/coverity_tool.tar.gz"
- sudo mv "${WORKDIR}/cov-analysis-linux64-2017.07" /opt/coverity
- export PATH=${PATH}:/opt/coverity/bin/
-else
- echo "Failed to download coverity tool tarball!"
-fi
-
-# Validate the installation
-covbuild="$(which cov-build 2>/dev/null || command -v cov-build 2>/dev/null)"
-if [ -z "$covbuild" ]; then
- echo "Failed to install coverity!"
- exit 1
-else
- echo >&2 "Coverity scan installed!"
-fi
+exec ./coverity-scan.sh install "${@}"
diff --git a/coverity-scan.sh b/coverity-scan.sh
index 977a2c29..ee8f19e7 100755
--- a/coverity-scan.sh
+++ b/coverity-scan.sh
@@ -1,62 +1,167 @@
#!/usr/bin/env bash
# Coverity scan script
#
-# To run this script you need to provide API token. This can be done either by:
-# - Putting token in ".coverity-token" file
-# - Assigning token value to COVERITY_SCAN_TOKEN environment variable
-#
# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
#
# Author : Costa Tsaousis (costa@netdata.cloud)
# Author : Pawel Krupa (paulfantom)
# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-cpus=$(grep -c ^processor </proc/cpuinfo)
+# To run manually, save configuration to .coverity-scan.conf like this:
+#
+# the repository to report to coverity - devs can set here their own fork
+# REPOSITORY="netdata/netdata"
+#
+# the email of the developer, as given to coverity
+# COVERITY_SCAN_SUBMIT_MAIL="you@example.com"
+#
+# the token given by coverity to the developer
+# COVERITY_SCAN_TOKEN="TOKEN taken from Coverity site"
+#
+# the absolute path of the cov-build - optional
+# COVERITY_BUILD_PATH="/opt/cov-analysis-linux64-2019.03/bin/cov-build"
+#
+# when set, the script will print on screen the curl command that submits the build to coverity
+# this includes the token, so the default is not to print it.
+# COVERITY_SUBMIT_DEBUG=1
+#
+# All these variables can also be exported before running this script.
+#
+# If the first parameter of this script is "install",
+# coverity build tools will be downloaded and installed in /opt/coverity
+
+# the version of coverity to use
+COVERITY_BUILD_VERSION="cov-analysis-linux64-2019.03"
+
+source packaging/installer/functions.sh || exit 1
+
+cpus=$(find_processors)
[ -z "${cpus}" ] && cpus=1
+if [ -f ".coverity-scan.conf" ]
+then
+ source ".coverity-scan.conf" || exit 1
+fi
+
+repo="${REPOSITORY}"
+if [ -z "${repo}" ]; then
+ fatal "export variable REPOSITORY or set it in .coverity-scan.conf"
+fi
+repo="${repo//\//%2F}"
+
+email="${COVERITY_SCAN_SUBMIT_MAIL}"
+if [ -z "${email}" ]; then
+ fatal "export variable COVERITY_SCAN_SUBMIT_MAIL or set it in .coverity-scan.conf"
+fi
+
token="${COVERITY_SCAN_TOKEN}"
-([ -z "${token}" ] && [ -f .coverity-token ]) && token="$(<.coverity-token)"
if [ -z "${token}" ]; then
- echo >&2 "Save the coverity token to .coverity-token or export it as COVERITY_SCAN_TOKEN."
- exit 1
+ fatal "export variable COVERITY_SCAN_TOKEN or set it in .coverity-scan.conf"
fi
-export PATH=${PATH}:/opt/coverity/bin/
-covbuild="$(which cov-build 2>/dev/null || command -v cov-build 2>/dev/null)"
-([ -z "${covbuild}" ] && [ -f .coverity-build ]) && covbuild="$(<.coverity-build)"
-if [ -z "${covbuild}" ]; then
- echo >&2 "Cannot find 'cov-build' binary in \$PATH."
- exit 1
-elif [ ! -x "${covbuild}" ]; then
- echo >&2 "The command ${covbuild} is not executable. Save command the full filename of cov-build in .coverity-build"
- exit 1
-fi
+# only print the output of a command
+# when debugging is enabled
+# used to hide the token when debugging is not enabled
+debugrun() {
+ if [ "${COVERITY_SUBMIT_DEBUG}" = "1" ]
+ then
+ run "${@}"
+ return $?
+ else
+ "${@}"
+ return $?
+ fi
+}
+
+scanit() {
+ export PATH="${PATH}:/opt/${COVERITY_BUILD_VERSION}/bin/"
+ covbuild="${COVERITY_BUILD_PATH}"
+ [ -z "${covbuild}" ] && covbuild="$(which cov-build 2>/dev/null || command -v cov-build 2>/dev/null)"
+ if [ -z "${covbuild}" ]; then
+ fatal "Cannot find 'cov-build' binary in \$PATH. Export variable COVERITY_BUILD_PATH or set it in .coverity-scan.conf"
+ elif [ ! -x "${covbuild}" ]; then
+ fatal "The command '${covbuild}' is not executable. Export variable COVERITY_BUILD_PATH or set it in .coverity-scan.conf"
+ fi
+
+ version="$(grep "^#define PACKAGE_VERSION" config.h | cut -d '"' -f 2)"
+ progress "Working on netdata version: ${version}"
+
+ progress "Cleaning up old builds..."
+ run make clean || echo >&2 "Nothing to clean"
-version="$(grep "^#define PACKAGE_VERSION" config.h | cut -d '"' -f 2)"
-echo >&2 "Working on netdata version: ${version}"
+ [ -d "cov-int" ] && rm -rf "cov-int"
-echo >&2 "Cleaning up old builds..."
-make clean || echo >&2 "Nothing to clean"
+ [ -f netdata-coverity-analysis.tgz ] && run rm netdata-coverity-analysis.tgz
-[ -d "cov-int" ] && rm -rf "cov-int"
+ progress "Configuring netdata source..."
+ run autoreconf -ivf
+ run ./configure --disable-lto \
+ --enable-https \
+ --enable-jsonc \
+ --enable-plugin-nfacct \
+ --enable-plugin-freeipmi \
+ --enable-plugin-cups \
+ --enable-backend-prometheus-remote-write \
+ ${NULL}
-[ -f netdata-coverity-analysis.tgz ] && rm netdata-coverity-analysis.tgz
+ # TODO: enable these plugins too
+ # --enable-plugin-xenstat \
+ # --enable-backend-kinesis \
+ # --enable-backend-mongodb \
-autoreconf -ivf
-./configure --enable-plugin-nfacct --enable-plugin-freeipmi
-"${covbuild}" --dir cov-int make -j${cpus} || exit 1
+ progress "Analyzing netdata..."
+ run "${covbuild}" --dir cov-int make -j${cpus} || exit 1
-echo >&2 "Compressing data..."
-tar czvf netdata-coverity-analysis.tgz cov-int || exit 1
+ echo >&2 "Compressing analysis..."
+ run tar czvf netdata-coverity-analysis.tgz cov-int || exit 1
-echo >&2 "Sending analysis for version ${version} ..."
-COVERITY_SUBMIT_RESULT=$(curl --progress-bar --form token="${token}" \
- --form email=${COVERITY_SCAN_SUBMIT_MAIL} \
- --form file=@netdata-coverity-analysis.tgz \
- --form version="${version}" \
- --form description="netdata, real-time performance monitoring, done right." \
- https://scan.coverity.com/builds?project=${REPOSITORY})
+ echo >&2 "Sending analysis to coverity for netdata version ${version} ..."
+ COVERITY_SUBMIT_RESULT=$(debugrun curl --progress-bar \
+ --form token="${token}" \
+ --form email=${email} \
+ --form file=@netdata-coverity-analysis.tgz \
+ --form version="${version}" \
+ --form description="netdata, monitor everything, in real-time." \
+ https://scan.coverity.com/builds?project=${repo})
-echo ${COVERITY_SUBMIT_RESULT} | grep -q -e 'Build successfully submitted' || echo >&2 "scan results were not pushed to coverity. Message was: ${COVERITY_SUBMIT_RESULT}"
+ echo ${COVERITY_SUBMIT_RESULT} | grep -q -e 'Build successfully submitted' || echo >&2 "scan results were not pushed to coverity. Message was: ${COVERITY_SUBMIT_RESULT}"
-echo >&2 "Coverity scan mechanism completed"
+ progress "Coverity scan completed"
+}
+
+installit() {
+ progress "Downloading coverity..."
+ cd /tmp || exit 1
+
+ [ -f "${COVERITY_BUILD_VERSION}.tar.gz" ] && run rm -f "${COVERITY_BUILD_VERSION}.tar.gz"
+ debugrun curl --remote-name --remote-header-name --show-error --location --data "token=${token}&project=${repo}" https://scan.coverity.com/download/linux64
+
+ if [ -f "${COVERITY_BUILD_VERSION}.tar.gz" ]; then
+ progress "Installing coverity..."
+ cd /opt || exit 1
+ run sudo tar -z -x -f "/tmp/${COVERITY_BUILD_VERSION}.tar.gz" || exit 1
+ rm "/tmp/${COVERITY_BUILD_VERSION}.tar.gz"
+ export PATH=${PATH}:/opt/${COVERITY_BUILD_VERSION}/bin/
+ else
+ fatal "Failed to download coverity tool tarball!"
+ fi
+
+ # Validate the installation
+ covbuild="$(which cov-build 2>/dev/null || command -v cov-build 2>/dev/null)"
+ if [ -z "$covbuild" ]; then
+ fatal "Failed to install coverity."
+ fi
+
+ progress "Coverity scan tools are installed."
+ return 0
+}
+
+if [ "${1}" = "install" ]
+then
+ shift 1
+ installit "${@}"
+ exit $?
+else
+ scanit "${@}"
+ exit $?
+fi
diff --git a/daemon/Makefile.in b/daemon/Makefile.in
new file mode 100644
index 00000000..e53dbbca
--- /dev/null
+++ b/daemon/Makefile.in
@@ -0,0 +1,614 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = daemon
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ anonymous-statistics.sh \
+ $(NULL)
+
+SUFFIXES = .in
+dist_noinst_DATA = \
+ README.md \
+ config/README.md \
+ anonymous-statistics.sh.in \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ anonymous-statistics.sh \
+ system-info.sh \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu daemon/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu daemon/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_pluginsSCRIPTS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_pluginsSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_pluginsSCRIPTS install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_pluginsSCRIPTS
+
+.PRECIOUS: Makefile
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/daemon/README.md b/daemon/README.md
index 62cc8c3b..2facbdda 100644
--- a/daemon/README.md
+++ b/daemon/README.md
@@ -2,21 +2,21 @@
## Starting netdata
-- You can start netdata by executing it with `/usr/sbin/netdata` (the installer will also start it).
+- You can start Netdata by executing it with `/usr/sbin/netdata` (the installer will also start it).
-- You can stop netdata by killing it with `killall netdata`.
- You can stop and start netdata at any point. Netdata saves on exit its round robbin
- database to `/var/cache/netdata` so that it will continue from where it stopped the last time.
+- You can stop Netdata by killing it with `killall netdata`.
+ You can stop and start Netdata at any point. Netdata saves on exit its round robbin
+ database to `/var/cache/netdata` so that it will continue from where it stopped the last time.
Access to the web site, for all graphs, is by default on port `19999`, so go to:
- ```
- http://127.0.0.1:19999/
- ```
+```
+http://127.0.0.1:19999/
+```
You can get the running config file at any time, by accessing `http://127.0.0.1:19999/netdata.conf`.
-### Starting netdata at boot
+### Starting Netdata at boot
In the `system` directory you can find scripts and configurations for the various distros.
@@ -27,7 +27,7 @@ The installer already installs `netdata.service` if it detects a systemd system.
To install `netdata.service` by hand, run:
```sh
-# stop netdata
+# stop Netdata
killall netdata
# copy netdata.service to systemd
@@ -36,10 +36,10 @@ cp system/netdata.service /etc/systemd/system/
# let systemd know there is a new service
systemctl daemon-reload
-# enable netdata at boot
+# enable Netdata at boot
systemctl enable netdata
-# start netdata
+# start Netdata
systemctl start netdata
```
@@ -48,7 +48,7 @@ systemctl start netdata
In the system directory you can find `netdata-lsb`. Copy it to the proper place according to your distribution documentation. For Ubuntu, this can be done via running the following commands as root.
```sh
-# copy the netdata startup file to /etc/init.d
+# copy the Netdata startup file to /etc/init.d
cp system/netdata-lsb /etc/init.d/netdata
# make sure it is executable
@@ -67,7 +67,7 @@ In the `system` directory you can find `netdata-openrc`. Copy it to the proper p
For older versions of RHEL/CentOS that don't have systemd, an init script is included in the system directory. This can be installed by running the following commands as root.
```sh
-# copy the netdata startup file to /etc/init.d
+# copy the Netdata startup file to /etc/init.d
cp system/netdata-init-d /etc/init.d/netdata
# make sure it is executable
@@ -77,11 +77,11 @@ chmod +x /etc/init.d/netdata
chkconfig --add netdata
```
-_There have been some recent work on the init script, see PR https://github.com/netdata/netdata/pull/403_
+_There have been some recent work on the init script, see PR <https://github.com/netdata/netdata/pull/403>_
#### other systems
-You can start netdata by running it from `/etc/rc.local` or equivalent.
+You can start Netdata by running it from `/etc/rc.local` or equivalent.
## Command line options
@@ -97,9 +97,9 @@ netdata -h
The program will print the supported command line parameters.
-The command line options of the netdata 1.10.0 version are the following:
-```
+The command line options of the Netdata 1.10.0 version are the following:
+```
^
|.-. .-. .-. .-. . netdata
| '-' '-' '-' '-' real-time performance monitoring, done right!
@@ -182,38 +182,38 @@ The command line options of the netdata 1.10.0 version are the following:
## Log files
-netdata uses 3 log files:
+Netdata uses 3 log files:
-1. `error.log`
-2. `access.log`
-3. `debug.log`
+1. `error.log`
+2. `access.log`
+3. `debug.log`
Any of them can be disabled by setting it to `/dev/null` or `none` in `netdata.conf`.
By default `error.log` and `access.log` are enabled. `debug.log` is only enabled if
-debugging/tracing is also enabled (netdata needs to be compiled with debugging enabled).
+debugging/tracing is also enabled (Netdata needs to be compiled with debugging enabled).
Log files are stored in `/var/log/netdata/` by default.
#### error.log
-The `error.log` is the `stderr` of the netdata daemon and all external plugins run by netdata.
+The `error.log` is the `stderr` of the `netdata` daemon and all external plugins run by netdata.
-So if any process, in the netdata process tree, writes anything to its standard error,
+So if any process, in the Netdata process tree, writes anything to its standard error,
it will appear in `error.log`.
-For most netdata programs (including standard external plugins shipped by netdata), the
+For most Netdata programs (including standard external plugins shipped by netdata), the
following lines may appear:
-tag|description
-:---:|:----
-`INFO`|Something important the user should know.
-`ERROR`|Something that might disable a part of netdata.<br/>The log line includes `errno` (if it is not zero).
-`FATAL`|Something prevented a program from running.<br/>The log line includes `errno` (if it is not zero) and the program exited.
+| tag|description|
+|:-:|:----------|
+| `INFO`|Something important the user should know.|
+| `ERROR`|Something that might disable a part of netdata.<br/>The log line includes `errno` (if it is not zero).|
+| `FATAL`|Something prevented a program from running.<br/>The log line includes `errno` (if it is not zero) and the program exited.|
So, when auto-detection of data collection fail, `ERROR` lines are logged and the relevant modules
are disabled, but the program continues to run.
-When a netdata program cannot run at all, a `FATAL` line is logged.
+When a Netdata program cannot run at all, a `FATAL` line is logged.
#### access.log
@@ -225,34 +225,32 @@ DATE: ID: (sent/all = SENT_BYTES/ALL_BYTES bytes PERCENT_COMPRESSION%, prep/sent
where:
- - `ID` is the client ID. Client IDs are auto-incremented every time a client connects to netdata.
- - `SENT_BYTES` is the number of bytes sent to the client, without the HTTP response header.
- - `ALL_BYTES` is the number of bytes of the response, before compression.
- - `PERCENT_COMPRESSION` is the percentage of traffic saved due to compression.
- - `PREP_TIME` is the time in milliseconds needed to prepared the response.
- - `SENT_TIME` is the time in milliseconds needed to sent the response to the client.
- - `TOTAL_TIME` is the total time the request was inside netdata (from the first byte of the request to the last byte of the response).
- - `ACTION` can be `filecopy`, `options` (used in CORS), `data` (API call).
-
+- `ID` is the client ID. Client IDs are auto-incremented every time a client connects to netdata.
+- `SENT_BYTES` is the number of bytes sent to the client, without the HTTP response header.
+- `ALL_BYTES` is the number of bytes of the response, before compression.
+- `PERCENT_COMPRESSION` is the percentage of traffic saved due to compression.
+- `PREP_TIME` is the time in milliseconds needed to prepared the response.
+- `SENT_TIME` is the time in milliseconds needed to sent the response to the client.
+- `TOTAL_TIME` is the total time the request was inside Netdata (from the first byte of the request to the last byte of the response).
+- `ACTION` can be `filecopy`, `options` (used in CORS), `data` (API call).
#### debug.log
See [debugging](#debugging).
-
## OOM Score
-netdata runs with `OOMScore = 1000`. This means netdata will be the first to be killed when your
+Netdata runs with `OOMScore = 1000`. This means Netdata will be the first to be killed when your
server runs out of memory.
-You can set netdata OOMScore in `netdata.conf`, like this:
+You can set Netdata OOMScore in `netdata.conf`, like this:
```
[global]
OOM score = 1000
```
-netdata logs its OOM score when it starts:
+Netdata logs its OOM score when it starts:
```sh
# grep OOM /var/log/netdata/error.log
@@ -261,16 +259,16 @@ netdata logs its OOM score when it starts:
#### OOM score and systemd
-netdata will not be able to lower its OOM Score below zero, when it is started as the `netdata`
+Netdata will not be able to lower its OOM Score below zero, when it is started as the `netdata`
user (systemd case).
-To allow netdata control its OOM Score in such cases, you will need to edit
+To allow Netdata control its OOM Score in such cases, you will need to edit
`netdata.service` and set:
```
[Service]
-# The minimum netdata Out-Of-Memory (OOM) score.
-# netdata (via [global].OOM score in netdata.conf) can only increase the value set here.
+# The minimum Netdata Out-Of-Memory (OOM) score.
+# Netdata (via [global].OOM score in netdata.conf) can only increase the value set here.
# To decrease it, set the minimum here and set the same or a higher value in netdata.conf.
# Valid values: -1000 (never kill netdata) to 1000 (always kill netdata).
OOMScoreAdjust=-1000
@@ -278,7 +276,7 @@ OOMScoreAdjust=-1000
Run `systemctl daemon-reload` to reload these changes.
-The above, sets and OOMScore for netdata to `-1000`, so that netdata can increase it via
+The above, sets and OOMScore for Netdata to `-1000`, so that Netdata can increase it via
`netdata.conf`.
If you want to control it entirely via systemd, you can set in `netdata.conf`:
@@ -290,12 +288,11 @@ If you want to control it entirely via systemd, you can set in `netdata.conf`:
Using the above, whatever OOM Score you have set at `netdata.service` will be maintained by netdata.
-
## Netdata process scheduling policy
-By default netdata runs with the `idle` process scheduling policy, so that it uses CPU resources, only when there is idle CPU to spare. On very busy servers (or weak servers), this can lead to gaps on the charts.
+By default Netdata runs with the `idle` process scheduling policy, so that it uses CPU resources, only when there is idle CPU to spare. On very busy servers (or weak servers), this can lead to gaps on the charts.
-You can set netdata scheduling policy in `netdata.conf`, like this:
+You can set Netdata scheduling policy in `netdata.conf`, like this:
```
[global]
@@ -304,14 +301,14 @@ You can set netdata scheduling policy in `netdata.conf`, like this:
You can use the following:
-policy|description
-:-----:|:--------
-`idle`|use CPU only when there is spare - this is lower than nice 19 - it is the default for netdata and it is so low that netdata will run in "slow motion" under extreme system load, resulting in short (1-2 seconds) gaps at the charts.
-`other`<br/>or<br/>`nice`|this is the default policy for all processes under Linux. It provides dynamic priorities based on the `nice` level of each process. Check below for setting this `nice` level for netdata.
-`batch`|This policy is similar to `other` in that it schedules the thread according to its dynamic priority (based on the `nice` value). The difference is that this policy will cause the scheduler to always assume that the thread is CPU-intensive. Consequently, the scheduler will apply a small scheduling penalty with respect to wake-up behavior, so that this thread is mildly disfavored in scheduling decisions.
-`fifo`|`fifo` can be used only with static priorities higher than 0, which means that when a `fifo` threads becomes runnable, it will always immediately preempt any currently running `other`, `batch`, or `idle` thread. `fifo` is a simple scheduling algorithm without time slicing.
-`rr`|a simple enhancement of `fifo`. Everything described above for `fifo` also applies to `rr`, except that each thread is allowed to run only for a maximum time quantum.
-`keep`<br/>or<br/>`none`|do not set scheduling policy, priority or nice level - i.e. keep running with whatever it is set already (e.g. by systemd).
+| policy|description|
+|:----:|:----------|
+| `idle`|use CPU only when there is spare - this is lower than nice 19 - it is the default for Netdata and it is so low that Netdata will run in "slow motion" under extreme system load, resulting in short (1-2 seconds) gaps at the charts.|
+| `other`<br/>or<br/>`nice`|this is the default policy for all processes under Linux. It provides dynamic priorities based on the `nice` level of each process. Check below for setting this `nice` level for netdata.|
+| `batch`|This policy is similar to `other` in that it schedules the thread according to its dynamic priority (based on the `nice` value). The difference is that this policy will cause the scheduler to always assume that the thread is CPU-intensive. Consequently, the scheduler will apply a small scheduling penalty with respect to wake-up behavior, so that this thread is mildly disfavored in scheduling decisions.|
+| `fifo`|`fifo` can be used only with static priorities higher than 0, which means that when a `fifo` threads becomes runnable, it will always immediately preempt any currently running `other`, `batch`, or `idle` thread. `fifo` is a simple scheduling algorithm without time slicing.|
+| `rr`|a simple enhancement of `fifo`. Everything described above for `fifo` also applies to `rr`, except that each thread is allowed to run only for a maximum time quantum.|
+| `keep`<br/>or<br/>`none`|do not set scheduling policy, priority or nice level - i.e. keep running with whatever it is set already (e.g. by systemd).|
For more information see `man sched`.
@@ -337,30 +334,30 @@ When the policy is set to `other`, `nice`, or `batch`, the following will appear
## scheduling settings and systemd
-netdata will not be able to set its scheduling policy and priority to more important values when it is started as the `netdata` user (systemd case).
+Netdata will not be able to set its scheduling policy and priority to more important values when it is started as the `netdata` user (systemd case).
You can set these settings at `/etc/systemd/system/netdata.service`:
```
[Service]
-# By default netdata switches to scheduling policy idle, which makes it use CPU, only
+# By default Netdata switches to scheduling policy idle, which makes it use CPU, only
# when there is spare available.
# Valid policies: other (the system default) | batch | idle | fifo | rr
#CPUSchedulingPolicy=other
-# This sets the maximum scheduling priority netdata can set (for policies: rr and fifo).
-# netdata (via [global].process scheduling priority in netdata.conf) can only lower this value.
+# This sets the maximum scheduling priority Netdata can set (for policies: rr and fifo).
+# Netdata (via [global].process scheduling priority in netdata.conf) can only lower this value.
# Priority gets values 1 (lowest) to 99 (highest).
#CPUSchedulingPriority=1
# For scheduling policy 'other' and 'batch', this sets the lowest niceness of netdata.
-# netdata (via [global].process nice level in netdata.conf) can only increase the value set here.
+# Netdata (via [global].process nice level in netdata.conf) can only increase the value set here.
#Nice=0
```
Run `systemctl daemon-reload` to reload these changes.
-Now, tell netdata to keep these settings, as set by systemd, by editing `netdata.conf` and setting:
+Now, tell Netdata to keep these settings, as set by systemd, by editing `netdata.conf` and setting:
```
[global]
@@ -369,10 +366,9 @@ Now, tell netdata to keep these settings, as set by systemd, by editing `netdata
Using the above, whatever scheduling settings you have set at `netdata.service` will be maintained by netdata.
+#### Example 1: Netdata with nice -1 on non-systemd systems
-#### Example 1: netdata with nice -1 on non-systemd systems
-
-On a system that is not based on systemd, to make netdata run with nice level -1 (a little bit higher to the default for all programs), edit netdata.conf and set:
+On a system that is not based on systemd, to make Netdata run with nice level -1 (a little bit higher to the default for all programs), edit `netdata.conf` and set:
```
[global]
@@ -386,10 +382,9 @@ then execute this to restart netdata:
sudo service netdata restart
```
+#### Example 2: Netdata with nice -1 on systemd systems
-#### Example 2: netdata with nice -1 on systemd systems
-
-On a system that is based on systemd, to make netdata run with nice level -1 (a little bit higher to the default for all programs), edit netdata.conf and set:
+On a system that is based on systemd, to make Netdata run with nice level -1 (a little bit higher to the default for all programs), edit `netdata.conf` and set:
```
[global]
@@ -415,9 +410,9 @@ sudo systemctl restart netdata
You may notice that netdata's virtual memory size, as reported by `ps` or `/proc/pid/status` (or even netdata's applications virtual memory chart) is unrealistically high.
-For example, it may be reported to be 150+MB, even if the resident memory size is just 25MB. Similar values may be reported for netdata plugins too.
+For example, it may be reported to be 150+MB, even if the resident memory size is just 25MB. Similar values may be reported for Netdata plugins too.
-Check this for example: A netdata installation with default settings on Ubuntu 16.04LTS. The top chart is **real memory used**, while the bottom one is **virtual memory**:
+Check this for example: A Netdata installation with default settings on Ubuntu 16.04LTS. The top chart is **real memory used**, while the bottom one is **virtual memory**:
![image](https://cloud.githubusercontent.com/assets/2662304/19013772/5eb7173e-87e3-11e6-8f2b-a2ccfeb06faf.png)
@@ -431,19 +426,18 @@ number of threads running.
The system does this for speed. Having a separate memory arena for each thread, allows the
threads to run in parallel in multi-core systems, without any locks between them.
-This behaviour is system specific. For example, the chart above when running netdata on alpine
-linux (that uses **musl** instead of **glibc**) is this:
+This behaviour is system specific. For example, the chart above when running Netdata on Alpine Linux (that uses **musl** instead of **glibc**) is this:
![image](https://cloud.githubusercontent.com/assets/2662304/19013807/7cf5878e-87e4-11e6-9651-082e68701eab.png)
**Can we do anything to lower it?**
-Since netdata already uses minimal memory allocations while it runs (i.e. it adapts its memory on start, so that while repeatedly collects data it does not do memory allocations), it already instructs the system memory allocator to minimize the memory arenas for each thread. We have also added [2 configuration options](https://github.com/netdata/netdata/blob/5645b1ee35248d94e6931b64a8688f7f0d865ec6/src/main.c#L410-L418)
+Since Netdata already uses minimal memory allocations while it runs (i.e. it adapts its memory on start, so that while repeatedly collects data it does not do memory allocations), it already instructs the system memory allocator to minimize the memory arenas for each thread. We have also added [2 configuration options](https://github.com/netdata/netdata/blob/5645b1ee35248d94e6931b64a8688f7f0d865ec6/src/main.c#L410-L418)
to allow you tweak these settings: `glibc malloc arena max for plugins` and `glibc malloc arena max for netdata`.
However, even if we instructed the memory allocator to use just one arena, it seems it allocates an arena per thread.
-netdata also supports `jemalloc` and `tcmalloc`, however both behave exactly the same to the glibc memory allocator in this aspect.
+Netdata also supports `jemalloc` and `tcmalloc`, however both behave exactly the same to the glibc memory allocator in this aspect.
**Is this a problem?**
@@ -452,53 +446,52 @@ No, it is not.
Linux reserves real memory (physical RAM) in pages (on x86 machines pages are 4KB each).
So even if the system memory allocator is allocating huge amounts of virtual memory,
only the 4KB pages that are actually used are reserving physical RAM. The **real memory** chart
-on netdata application section, shows the amount of physical memory these pages occupy(it
+on Netdata application section, shows the amount of physical memory these pages occupy(it
accounts the whole pages, even if parts of them are actually used).
-
## Debugging
-When you compile netdata with debugging:
+When you compile Netdata with debugging:
-1. compiler optimizations for your CPU are disabled (netdata will run somewhat slower)
+1. compiler optimizations for your CPU are disabled (Netdata will run somewhat slower)
-2. a lot of code is added all over netdata, to log debug messages to `/var/log/netdata/debug.log`. However, nothing is printed by default. netdata allows you to select which sections of netdata you want to trace. Tracing is activated via the config option `debug flags`. It accepts a hex number, to enable or disable specific sections. You can find the options supported at [log.h](../libnetdata/log/log.h). They are the `D_*` defines. The value `0xffffffffffffffff` will enable all possible debug flags.
+2. a lot of code is added all over netdata, to log debug messages to `/var/log/netdata/debug.log`. However, nothing is printed by default. Netdata allows you to select which sections of Netdata you want to trace. Tracing is activated via the config option `debug flags`. It accepts a hex number, to enable or disable specific sections. You can find the options supported at [log.h](../libnetdata/log/log.h). They are the `D_*` defines. The value `0xffffffffffffffff` will enable all possible debug flags.
-Once netdata is compiled with debugging and tracing is enabled for a few sections, the file `/var/log/netdata/debug.log` will contain the messages.
+Once Netdata is compiled with debugging and tracing is enabled for a few sections, the file `/var/log/netdata/debug.log` will contain the messages.
> Do not forget to disable tracing (`debug flags = 0`) when you are done tracing. The file `debug.log` can grow too fast.
-#### compiling netdata with debugging
+#### compiling Netdata with debugging
-To compile netdata with debugging, use this:
+To compile Netdata with debugging, use this:
```sh
-# step into the netdata source directory
+# step into the Netdata source directory
cd /usr/src/netdata.git
# run the installer with debugging enabled
CFLAGS="-O1 -ggdb -DNETDATA_INTERNAL_CHECKS=1" ./netdata-installer.sh
```
-The above will compile and install netdata with debugging info embedded. You can now use `debug flags` to set the section(s) you need to trace.
+The above will compile and install Netdata with debugging info embedded. You can now use `debug flags` to set the section(s) you need to trace.
#### debugging crashes
-We have made the most to make netdata crash free. If however, netdata crashes on your system, it would be very helpful to provide stack traces of the crash. Without them, is will be almost impossible to find the issue (the code base is quite large to find such an issue by just objerving it).
+We have made the most to make Netdata crash free. If however, Netdata crashes on your system, it would be very helpful to provide stack traces of the crash. Without them, is will be almost impossible to find the issue (the code base is quite large to find such an issue by just objerving it).
-To provide stack traces, **you need to have netdata compiled with debugging**. There is no need to enable any tracing (`debug flags`).
+To provide stack traces, **you need to have Netdata compiled with debugging**. There is no need to enable any tracing (`debug flags`).
Then you need to be in one of the following 2 cases:
-1. netdata crashes and you have a core dump
+1. Netdata crashes and you have a core dump
-2. you can reproduce the crash
+2. you can reproduce the crash
If you are not on these cases, you need to find a way to be (i.e. if your system does not produce core dumps, check your distro documentation to enable them).
-#### netdata crashes and you have a core dump
+#### Netdata crashes and you have a core dump
-> you need to have netdata compiled with debugging info for this to work (check above)
+> you need to have Netdata compiled with debugging info for this to work (check above)
Run the following command and post the output on a github issue.
@@ -506,9 +499,9 @@ Run the following command and post the output on a github issue.
gdb $(which netdata) /path/to/core/dump
```
-#### you can reproduce a netdata crash on your system
+#### you can reproduce a Netdata crash on your system
-> you need to have netdata compiled with debugging info for this to work (check above)
+> you need to have Netdata compiled with debugging info for this to work (check above)
Install the package `valgrind` and run:
@@ -516,7 +509,6 @@ Install the package `valgrind` and run:
valgrind $(which netdata) -D
```
-netdata will start and it will be a lot slower. Now reproduce the crash and `valgrind` will dump on your console the stack trace. Open a new github issue and post the output.
-
+Netdata will start and it will be a lot slower. Now reproduce the crash and `valgrind` will dump on your console the stack trace. Open a new github issue and post the output.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/daemon/anonymous-statistics.sh b/daemon/anonymous-statistics.sh
new file mode 100644
index 00000000..8c5a77f5
--- /dev/null
+++ b/daemon/anonymous-statistics.sh
@@ -0,0 +1,89 @@
+#!/usr/bin/env sh
+
+# Valid actions:
+
+# - FATAL - netdata exited due to a fatal condition
+# ACTION_RESULT -- program name and thread tag
+# ACTION_DATA -- fmt, args passed to fatal
+# - START - netdata started
+# ACTION_DATA -- nan
+# - EXIT - installation action
+# ACTION_DATA -- ret value of
+
+ACTION="${1}"
+ACTION_RESULT="${2}"
+ACTION_DATA="${3}"
+ACTION_DATA=$(echo "${ACTION_DATA}" | tr '"' "'")
+
+# -------------------------------------------------------------------------------------------------
+# check opt-out
+
+if [ -f "/etc/netdata/.opt-out-from-anonymous-statistics" ]; then
+ exit 0
+fi
+
+# Shorten version for easier reporting
+NETDATA_VERSION=$(echo "${NETDATA_VERSION}" | sed 's/-.*//g' | tr -d 'v')
+
+# -------------------------------------------------------------------------------------------------
+# send the anonymous statistics to GA
+# https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters
+if [ -n "$(command -v curl 2>/dev/null)" ]; then
+ curl -X POST -Ss --max-time 2 \
+ --data "v=1" \
+ --data "tid=UA-64295674-3" \
+ --data "aip=1" \
+ --data "ds=shell" \
+ --data-urlencode "cid=${NETDATA_REGISTRY_UNIQUE_ID}" \
+ --data-urlencode "cs=${NETDATA_REGISTRY_UNIQUE_ID}" \
+ --data "t=event" \
+ --data "ni=1" \
+ --data "an=anonymous-statistics" \
+ --data-urlencode "av=${NETDATA_VERSION}" \
+ --data-urlencode "ec=${ACTION}" \
+ --data-urlencode "ea=${ACTION_RESULT}" \
+ --data-urlencode "el=${ACTION_DATA}" \
+ --data-urlencode "cd1=${NETDATA_SYSTEM_OS_NAME}" \
+ --data-urlencode "cd2=${NETDATA_SYSTEM_OS_ID}" \
+ --data-urlencode "cd3=${NETDATA_SYSTEM_OS_ID_LIKE}" \
+ --data-urlencode "cd4=${NETDATA_SYSTEM_OS_VERSION}" \
+ --data-urlencode "cd5=${NETDATA_SYSTEM_OS_VERSION_ID}" \
+ --data-urlencode "cd6=${NETDATA_SYSTEM_OS_DETECTION}" \
+ --data-urlencode "cd7=${NETDATA_SYSTEM_KERNEL_NAME}" \
+ --data-urlencode "cd8=${NETDATA_SYSTEM_KERNEL_VERSION}" \
+ --data-urlencode "cd9=${NETDATA_SYSTEM_ARCHITECTURE}" \
+ --data-urlencode "cd10=${NETDATA_SYSTEM_VIRTUALIZATION}" \
+ --data-urlencode "cd11=${NETDATA_SYSTEM_VIRT_DETECTION}" \
+ --data-urlencode "cd12=${NETDATA_SYSTEM_CONTAINER}" \
+ --data-urlencode "cd13=${NETDATA_SYSTEM_CONTAINER_DETECTION}" \
+ "https://www.google-analytics.com/collect" >/dev/null 2>&1
+else
+ wget -q -O - --timeout=1 "https://www.google-analytics.com/collect?\
+&v=1\
+&tid=UA-64295674-3\
+&aip=1\
+&ds=shell\
+&cid=${NETDATA_REGISTRY_UNIQUE_ID}\
+&cs=${NETDATA_REGISTRY_UNIQUE_ID}\
+&t=event\
+&ni=1\
+&an=anonymous-statistics\
+&av=${NETDATA_VERSION}\
+&ec=${ACTION}\
+&ea=${ACTION_RESULT}\
+&el=${ACTION_DATA}\
+&cd1=${NETDATA_SYSTEM_OS_NAME}\
+&cd2=${NETDATA_SYSTEM_OS_ID}\
+&cd3=${NETDATA_SYSTEM_OS_ID_LIKE}\
+&cd4=${NETDATA_SYSTEM_OS_VERSION}\
+&cd5=${NETDATA_SYSTEM_OS_VERSION_ID}\
+&cd6=${NETDATA_SYSTEM_OS_DETECTION}\
+&cd7=${NETDATA_SYSTEM_KERNEL_NAME}\
+&cd8=${NETDATA_SYSTEM_KERNEL_VERSION}\
+&cd9=${NETDATA_SYSTEM_ARCHITECTURE}\
+&cd10=${NETDATA_SYSTEM_VIRTUALIZATION}\
+&cd11=${NETDATA_SYSTEM_VIRT_DETECTION}\
+&cd12=${NETDATA_SYSTEM_CONTAINER}\
+&cd13=${NETDATA_SYSTEM_CONTAINER_DETECTION}\
+" > /dev/null 2>&1
+fi
diff --git a/daemon/config/README.md b/daemon/config/README.md
index c36a5b6d..dd032dbb 100644
--- a/daemon/config/README.md
+++ b/daemon/config/README.md
@@ -1,6 +1,5 @@
# Daemon configuration
-
<details markdown="1"><summary>The daemon configuration file is read from `/etc/netdata/netdata.conf`.</summary>
Depending on your installation method, Netdata will have been installed either directly under `/`, or under `/opt/netdata`. The paths mentioned here and in the documentation in general assume that your installation is under `/`. If it is not, you will find the exact same paths under `/opt/netdata` as well. (i.e. `/etc/netdata` will be `/opt/netdata/etc/netdata`).</details>
@@ -8,21 +7,21 @@ This config file **is not needed by default**. Netdata works fine out of the box
`netdata.conf` has sections stated with `[section]`. You will see the following sections:
-1. `[global]` to [configure](#global-section-options) the [netdata daemon](../).
-2. `[web]` to [configure the web server](../../web/server).
-3. `[plugins]` to [configure](#plugins-section-options) which [collectors](../../collectors) to use and PATH settings.
-4. `[health]` to [configure](#health-section-options) general settings for [health monitoring](../../health)
-5. `[registry]` for the [netdata registry](../../registry).
-6. `[backend]` to set up [streaming and replication](../../streaming) options.
-7. `[statsd]` for the general settings of the [stats.d.plugin](../../collectors/statsd.plugin).
-8. `[plugin:NAME]` sections for each collector plugin, under the comment [Per plugin configuration](#per-plugin-configuration).
-9. `[CHART_NAME]` sections for each chart defined, under the comment [Per chart configuration](#per-chart-configuration).
+1. `[global]` to [configure](#global-section-options) the [Netdata daemon](../).
+2. `[web]` to [configure the web server](../../web/server).
+3. `[plugins]` to [configure](#plugins-section-options) which [collectors](../../collectors) to use and PATH settings.
+4. `[health]` to [configure](#health-section-options) general settings for [health monitoring](../../health)
+5. `[registry]` for the [Netdata registry](../../registry).
+6. `[backend]` to set up [streaming and replication](../../streaming) options.
+7. `[statsd]` for the general settings of the [stats.d.plugin](../../collectors/statsd.plugin).
+8. `[plugin:NAME]` sections for each collector plugin, under the comment [Per plugin configuration](#per-plugin-configuration).
+9. `[CHART_NAME]` sections for each chart defined, under the comment [Per chart configuration](#per-chart-configuration).
-The configuration file is a `name = value` dictionary. Netdata will not complain if you set options unknown to it. When you check the running configuration by accessing the URL `/netdata.conf` on your netdata server, netdata will add a comment on settings it does not currently use.
+The configuration file is a `name = value` dictionary. Netdata will not complain if you set options unknown to it. When you check the running configuration by accessing the URL `/netdata.conf` on your Netdata server, Netdata will add a comment on settings it does not currently use.
## Applying changes
-After `netdata.conf` has been modified, netdata needs to be restarted for changes to apply:
+After `netdata.conf` has been modified, Netdata needs to be restarted for changes to apply:
```bash
sudo service netdata restart
@@ -40,41 +39,41 @@ Please note that your data history will be lost if you have modified `history` p
### [global] section options
-setting | default | info
-:------:|:-------:|:----
-process scheduling policy | `keep` | See [netdata process scheduling policy](../#netdata-process-scheduling-policy)
-OOM score | `1000` | See [OOM score](../#oom-score)
-glibc malloc arena max for plugins | `1` | See [Virtual memory](../#virtual-memory).
-glibc malloc arena max for netdata | `1` | See [Virtual memory](../#virtual-memory).
-hostname | auto-detected | The hostname of the computer running netdata.
-history | `3996` | The number of entries the netdata daemon will by default keep in memory for each chart dimension. This setting can also be configured per chart. Check [Memory Requirements](../../database/#database) for more information.
-update every | `1` | The frequency in seconds, for data collection. For more information see [Performance](../../docs/Performance.md#performance).
-config directory | `/etc/netdata` | The directory configuration files are kept.
-stock config directory | `/usr/lib/netdata/conf.d` |
-log directory | `/var/log/netdata` | The directory in which the [log files](../#log-files) are kept.
-web files directory | `/usr/share/netdata/web` | The directory the web static files are kept.
-cache directory | `/var/cache/netdata` | The directory the memory database will be stored if and when netdata exits. Netdata will re-read the database when it will start again, to continue from the same point.
-lib directory | `/var/lib/netdata` | Contains the alarm log and the netdata instance guid.
-home directory | `/var/cache/netdata` | Contains the db files for the collected metrics
-plugins directory | `"/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d"` | The directory plugin programs are kept. This setting supports multiple directories, space separated. If any directory path contains spaces, enclose it in single or double quotes.
-memory mode | `save` | When set to `save` netdata will save its round robin database on exit and load it on startup. When set to `map` the cache files will be updated in real time (check `man mmap` - do not set this on systems with heavy load or slow disks - the disks will continuously sync the in-memory database of netdata). When set to `dbengine` it behaves similarly to `map` but with much better disk and memory efficiency, however, with higher overhead. When set to `ram` the round robin database will be temporary and it will be lost when netdata exits. `none` disables the database at this host. This also disables health monitoring (there cannot be health monitoring without a database). host access prefix | | This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43).
-memory deduplication (ksm) | `yes` | When set to `yes`, netdata will offer its in-memory round robin database to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](../../database/#ksm)
-TZ environment variable | `:/etc/localtime` | Where to find the timezone
-timezone | auto-detected | The timezone retrieved from the environment variable
-debug flags | `0x0000000000000000` | Bitmap of debug options to enable. For more information check [Tracing Options](../#debugging).
-debug log | `/var/log/netdata/debug.log` | The filename to save debug information. This file will not be created if debugging is not enabled. You can also set it to `syslog` to send the debug messages to syslog, or `none` to disable this log. For more information check [Tracing Options](../#debugging).
-error log | `/var/log/netdata/error.log` | The filename to save error messages for netdata daemon and all plugins (`stderr` is sent here for all netdata programs, including the plugins). You can also set it to `syslog` to send the errors to syslog, or `none` to disable this log.
-access log | `/var/log/netdata/access.log` | The filename to save the log of web clients accessing netdata charts. You can also set it to `syslog` to send the access log to syslog, or `none` to disable this log.
-errors flood protection period | `1200` | UNUSED - Length of period (in sec) during which the number of errors should not exceed the `errors to trigger flood protection`.
-errors to trigger flood protection | `200` | UNUSED - Number of errors written to the log in `errors flood protection period` sec before flood protection is activated.
-run as user | `netdata` | The user netdata will run as.
-pthread stack size | auto-detected |
-cleanup obsolete charts after seconds | `3600` | See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions
-gap when lost iterations above | `1` |
-cleanup orphan hosts after seconds | `3600` | How long to wait until automatically removing from the DB a remote netdata host (slave) that is no longer sending data.
-delete obsolete charts files | `yes` | See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers), also affects the deletion of files for obsolete dimensions
-delete orphan hosts files | `yes` | Set to `no` to disable non-responsive host removal.
-enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero.
+| setting|default|info|||
+|:-----:|:-----:|:---|---|---|
+| process scheduling policy|`keep`|See [Netdata process scheduling policy](../#netdata-process-scheduling-policy)|||
+| OOM score|`1000`|See [OOM score](../#oom-score)|||
+| glibc malloc arena max for plugins|`1`|See [Virtual memory](../#virtual-memory).|||
+| glibc malloc arena max for Netdata|`1`|See [Virtual memory](../#virtual-memory).|||
+| hostname|auto-detected|The hostname of the computer running Netdata.|||
+| history|`3996`|The number of entries the `netdata` daemon will by default keep in memory for each chart dimension. This setting can also be configured per chart. Check [Memory Requirements](../../database/#database) for more information.|||
+| update every|`1`|The frequency in seconds, for data collection. For more information see [Performance](../../docs/Performance.md#performance).|||
+| config directory|`/etc/netdata`|The directory configuration files are kept.|||
+| stock config directory|`/usr/lib/netdata/conf.d`||||
+| log directory|`/var/log/netdata`|The directory in which the [log files](../#log-files) are kept.|||
+| web files directory|`/usr/share/netdata/web`|The directory the web static files are kept.|||
+| cache directory|`/var/cache/netdata`|The directory the memory database will be stored if and when Netdata exits. Netdata will re-read the database when it will start again, to continue from the same point.|||
+| lib directory|`/var/lib/netdata`|Contains the alarm log and the Netdata instance guid.|||
+| home directory|`/var/cache/netdata`|Contains the db files for the collected metrics|||
+| plugins directory|`"/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d"`|The directory plugin programs are kept. This setting supports multiple directories, space separated. If any directory path contains spaces, enclose it in single or double quotes.|||
+| memory mode|`save`|When set to `save` Netdata will save its round robin database on exit and load it on startup. When set to `map` the cache files will be updated in real time (check `man mmap` - do not set this on systems with heavy load or slow disks - the disks will continuously sync the in-memory database of Netdata). When set to `dbengine` it behaves similarly to `map` but with much better disk and memory efficiency, however, with higher overhead. When set to `ram` the round robin database will be temporary and it will be lost when Netdata exits. `none` disables the database at this host. This also disables health monitoring (there cannot be health monitoring without a database). host access prefix||This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43).|
+| memory deduplication (ksm)|`yes`|When set to `yes`, Netdata will offer its in-memory round robin database to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](../../database/#ksm)|||
+| TZ environment variable|`:/etc/localtime`|Where to find the timezone|||
+| timezone|auto-detected|The timezone retrieved from the environment variable|||
+| debug flags|`0x0000000000000000`|Bitmap of debug options to enable. For more information check [Tracing Options](../#debugging).|||
+| debug log|`/var/log/netdata/debug.log`|The filename to save debug information. This file will not be created if debugging is not enabled. You can also set it to `syslog` to send the debug messages to syslog, or `none` to disable this log. For more information check [Tracing Options](../#debugging).|||
+| error log|`/var/log/netdata/error.log`|The filename to save error messages for Netdata daemon and all plugins (`stderr` is sent here for all Netdata programs, including the plugins). You can also set it to `syslog` to send the errors to syslog, or `none` to disable this log.|||
+| access log|`/var/log/netdata/access.log`|The filename to save the log of web clients accessing Netdata charts. You can also set it to `syslog` to send the access log to syslog, or `none` to disable this log.|||
+| errors flood protection period|`1200`|UNUSED - Length of period (in sec) during which the number of errors should not exceed the `errors to trigger flood protection`.|||
+| errors to trigger flood protection|`200`|UNUSED - Number of errors written to the log in `errors flood protection period` sec before flood protection is activated.|||
+| run as user|`netdata`|The user Netdata will run as.|||
+| pthread stack size|auto-detected||||
+| cleanup obsolete charts after seconds|`3600`|See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions|||
+| gap when lost iterations above|`1`||||
+| cleanup orphan hosts after seconds|`3600`|How long to wait until automatically removing from the DB a remote Netdata host (slave) that is no longer sending data.|||
+| delete obsolete charts files|`yes`|See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers), also affects the deletion of files for obsolete dimensions|||
+| delete orphan hosts files|`yes`|Set to `no` to disable non-responsive host removal.|||
+| enable zero metrics|`no`|Set to `yes` to show charts when all their metrics are zero.|||
### [web] section options
@@ -86,13 +85,13 @@ In this section you will see be a boolean (`yes`/`no`) option for each plugin (e
Additionally, there will be the following options:
-setting | default | info
-:------:|:-------:|:----
-PATH environment variable | `auto-detected` |
-PYTHONPATH environment variable | | Used to set a custom python path
-enable running new plugins | `yes` | When set to `yes`, netdata will enable detected plugins, even if they are not configured explicitly. Setting this to `no` will only enable plugins explicitly configirued in this file with a `yes`
-check for new plugins every | 60 | The time in seconds to check for new plugins in the plugins directory. This allows having other applications dynamically creating plugins for netdata.
-checks | `no` | This is a debugging plugin for the internal latency
+| setting|default|info|
+|:-----:|:-----:|:---|
+| PATH environment variable|`auto-detected`||
+| PYTHONPATH environment variable||Used to set a custom python path|
+| enable running new plugins|`yes`|When set to `yes`, Netdata will enable detected plugins, even if they are not configured explicitly. Setting this to `no` will only enable plugins explicitly configirued in this file with a `yes`|
+| check for new plugins every|60|The time in seconds to check for new plugins in the plugins directory. This allows having other applications dynamically creating plugins for Netdata.|
+| checks|`no`|This is a debugging plugin for the internal latency|
### [health] section options
@@ -102,16 +101,16 @@ Specific alarms are configured in per-collector config files under the `health.d
[Alarm notifications](../../health/notifications/#netdata-alarm-notifications) are configured in `health_alarm_notify.conf`.
-setting | default | info
-:------:|:-------:|:----
-enabled | `yes` | Set to `no` to disable all alarms and notifications
-in memory max health log entries | 1000 | Size of the alarm history held in RAM
-script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alarm notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`).
-stock health configuration directory | `/usr/lib/netdata/conf.d/health.d` | Contains the stock alarm configuration files for each collector
-health configuration directory | `/etc/netdata/health.d` | The directory containing the user alarm configuration files, to override the stock configurations
-run at least every seconds | `10` | Controls how often all alarm conditions should be evaluated.
-postpone alarms during hibernation for seconds | `60` | Prevents false alarms. May need to be increased if you get alarms during hibernation.
-rotate log every lines | 2000 | Controls the number of alarm log entries stored in `<lib directory>/health-log.db`, where `<lib directory>` is the one configured in the [[global] section](#global-section-options)
+| setting|default|info|
+|:-----:|:-----:|:---|
+| enabled|`yes`|Set to `no` to disable all alarms and notifications|
+| in memory max health log entries|1000|Size of the alarm history held in RAM|
+| script to execute on alarm|`/usr/libexec/netdata/plugins.d/alarm-notify.sh`|The script that sends alarm notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`).|
+| stock health configuration directory|`/usr/lib/netdata/conf.d/health.d`|Contains the stock alarm configuration files for each collector|
+| health configuration directory|`/etc/netdata/health.d`|The directory containing the user alarm configuration files, to override the stock configurations|
+| run at least every seconds|`10`|Controls how often all alarm conditions should be evaluated.|
+| postpone alarms during hibernation for seconds|`60`|Prevents false alarms. May need to be increased if you get alarms during hibernation.|
+| rotate log every lines|2000|Controls the number of alarm log entries stored in `<lib directory>/health-log.db`, where `<lib directory>` is the one configured in the [\[global\] section](#global-section-options)|
### [registry] section options
@@ -129,16 +128,16 @@ The configuration options for plugins appear in sections following the pattern `
Most internal plugins will provide additional options. Check [Internal Plugins](../../collectors/) for more information.
-Please note, that by default Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+Please note, that by default Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
#### External plugins
External plugins will have only 2 options at `netdata.conf`:
-setting | default | info
-:------:|:-------:|:----
-update every|the value of `[global].update every` setting|The frequency in seconds the plugin should collect values. For more information check [Performance](../../docs/Performance.md#performance).
-command options|*empty*|Additional command line options to pass to the plugin.
+| setting | default | info |
+| :-----:|:-----:|:---|
+| update every | the value of `[global].update every` setting|The frequency in seconds the plugin should collect values. For more information check [Performance](../../docs/Performance.md#performance).|
+| command options | _empty_ | Additional command line options to pass to the plugin.|
External plugins that need additional configuration may support a dedicated file in `/etc/netdata`. Check their documentation.
@@ -146,4 +145,4 @@ External plugins that need additional configuration may support a dedicated file
In this section you will find a separate subsection for each chart shown on the dashboard. You can control all aspects of a specific chart here. You can understand what each option does by reading [how charts are defined](../../collectors/plugins.d/#chart). If you don't know how to find the name of a chart, you can learn about it [here](../../docs/Charts.md).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2Fconfig%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2Fconfig%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/daemon/system-info.sh b/daemon/system-info.sh
index 1f5bc52b..6fab741f 100755
--- a/daemon/system-info.sh
+++ b/daemon/system-info.sh
@@ -22,8 +22,8 @@ if [ "${KERNEL_NAME}" = "Darwin" ]; then
OIFS="$IFS"
IFS=$'\n'
set $(sw_vers) > /dev/null
- NAME=$(echo $1 | tr "\n" ' ' | sed 's/ProductName:[ ]*//')
- VERSION=$(echo $2 | tr "\n" ' ' | sed 's/ProductVersion:[ ]*//')
+ NAME=$(echo $1 | tr "\n\t" ' ' | sed -e 's/ProductName:[ ]*//' -e 's/[ ]*$//')
+ VERSION=$(echo $2 | tr "\n\t" ' ' | sed -e 's/ProductVersion:[ ]*//' -e 's/[ ]*$//')
ID="mac"
ID_LIKE="mac"
OS_DETECTION="sw_vers"
@@ -44,7 +44,8 @@ else
if [ "${NAME}" = "unknown" ]; then NAME="${DISTRIB_ID}"; fi
if [ "${VERSION}" = "unknown" ]; then VERSION="${DISTRIB_RELEASE}"; fi
if [ "${ID}" = "unknown" ]; then ID="${DISTRIB_CODENAME}"; fi
- elif [ -n "$(command -v lsb_release 2>/dev/null)" ]; then
+ fi
+ if [ -n "$(command -v lsb_release 2>/dev/null)" ]; then
if [ "${OS_DETECTION}" = "unknown" ]; then OS_DETECTION="lsb_release"; else OS_DETECTION="Mixed"; fi
if [ "${NAME}" = "unknown" ]; then NAME="$(lsb_release -is 2>/dev/null)"; fi
if [ "${VERSION}" = "unknown" ]; then VERSION="$(lsb_release -rs 2>/dev/null)"; fi
diff --git a/daemon/unit_test.c b/daemon/unit_test.c
index f9b58ce6..1c84022c 100644
--- a/daemon/unit_test.c
+++ b/daemon/unit_test.c
@@ -1581,34 +1581,12 @@ static inline void rrddim_set_by_pointer_fake_time(RRDDIM *rd, collected_number
if(unlikely(v > rd->collected_value_max)) rd->collected_value_max = v;
}
-int test_dbengine(void)
+static RRDHOST *dbengine_rrdhost_find_or_create(char *name)
{
- const int CHARTS = 128;
- const int DIMS = 16; /* That gives us 2048 metrics */
- const int POINTS = 16384; /* This produces 128MiB of metric data */
- const int QUERY_BATCH = 4096;
- uint8_t same;
- int i, j, k, c, errors;
- RRDHOST *host = NULL;
- RRDSET *st[CHARTS];
- RRDDIM *rd[CHARTS][DIMS];
- char name[101];
- time_t time_now;
- collected_number last;
- struct rrddim_query_handle handle;
- calculated_number value, expected;
- storage_number n;
-
- error_log_limit_unlimited();
- fprintf(stderr, "\nRunning DB-engine test\n");
-
- default_rrd_memory_mode = RRD_MEMORY_MODE_DBENGINE;
-
- debug(D_RRDHOST, "Initializing localhost with hostname 'unittest-dbengine'");
- host = rrdhost_find_or_create(
- "unittest-dbengine"
- , "unittest-dbengine"
- , "unittest-dbengine"
+ return rrdhost_find_or_create(
+ name
+ , name
+ , name
, os_type
, netdata_configured_timezone
, config_get(CONFIG_SECTION_BACKEND, "host tags", "")
@@ -1624,15 +1602,33 @@ int test_dbengine(void)
, default_rrdpush_send_charts_matching
, NULL
);
- if (NULL == host)
- return 1;
+}
+
+// costants for test_dbengine
+static const int CHARTS = 64;
+static const int DIMS = 16; // That gives us 64 * 16 = 1024 metrics
+#define REGIONS (3) // 3 regions of update_every
+// first region update_every is 2, second is 3, third is 1
+static const int REGION_UPDATE_EVERY[REGIONS] = {2, 3, 1};
+static const int REGION_POINTS[REGIONS] = {
+ 16384, // This produces 64MiB of metric data for the first region: update_every = 2
+ 16384, // This produces 64MiB of metric data for the second region: update_every = 3
+ 16384, // This produces 64MiB of metric data for the third region: update_every = 1
+};
+static const int QUERY_BATCH = 4096;
+
+static void test_dbengine_create_charts(RRDHOST *host, RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
+ int update_every)
+{
+ int i, j;
+ char name[101];
for (i = 0 ; i < CHARTS ; ++i) {
snprintfz(name, 100, "dbengine-chart-%d", i);
// create the chart
st[i] = rrdset_create(host, "netdata", name, name, "netdata", NULL, "Unit Testing", "a value", "unittest",
- NULL, 1, 1, RRDSET_TYPE_LINE);
+ NULL, 1, update_every, RRDSET_TYPE_LINE);
rrdset_flag_set(st[i], RRDSET_FLAG_DEBUG);
rrdset_flag_set(st[i], RRDSET_FLAG_STORE_FIRST);
for (j = 0 ; j < DIMS ; ++j) {
@@ -1642,50 +1638,103 @@ int test_dbengine(void)
}
}
+ // Initialize DB with the very first entries
+ for (i = 0 ; i < CHARTS ; ++i) {
+ for (j = 0 ; j < DIMS ; ++j) {
+ rd[i][j]->last_collected_time.tv_sec =
+ st[i]->last_collected_time.tv_sec = st[i]->last_updated.tv_sec = 2 * API_RELATIVE_TIME_MAX - 1;
+ rd[i][j]->last_collected_time.tv_usec =
+ st[i]->last_collected_time.tv_usec = st[i]->last_updated.tv_usec = 0;
+ }
+ }
+ for (i = 0 ; i < CHARTS ; ++i) {
+ st[i]->usec_since_last_update = USEC_PER_SEC;
+
+ for (j = 0; j < DIMS; ++j) {
+ rrddim_set_by_pointer_fake_time(rd[i][j], 69, 2 * API_RELATIVE_TIME_MAX); // set first value to 69
+ }
+ rrdset_done(st[i]);
+ }
+ // Fluh pages for subsequent real values
+ for (i = 0 ; i < CHARTS ; ++i) {
+ for (j = 0; j < DIMS; ++j) {
+ rrdeng_store_metric_flush_current_page(rd[i][j]);
+ }
+ }
+}
+
+// Feeds the database region with test data, returns last timestamp of region
+static time_t test_dbengine_create_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
+ int current_region, time_t time_start)
+{
+ time_t time_now;
+ int i, j, c, update_every;
+ collected_number next;
+
+ update_every = REGION_UPDATE_EVERY[current_region];
+ time_now = time_start + update_every;
// feed it with the test data
- time_now = 1;
- last = 0;
for (i = 0 ; i < CHARTS ; ++i) {
for (j = 0 ; j < DIMS ; ++j) {
rd[i][j]->last_collected_time.tv_sec =
st[i]->last_collected_time.tv_sec = st[i]->last_updated.tv_sec = time_now;
rd[i][j]->last_collected_time.tv_usec =
- st[i]->last_collected_time.tv_usec = st[i]->last_updated.tv_usec = 0;
+ st[i]->last_collected_time.tv_usec = st[i]->last_updated.tv_usec = 0;
}
}
- for(c = 0; c < POINTS ; ++c) {
- ++time_now; // time_now = c + 2
+ for (c = 0; c < REGION_POINTS[current_region] ; ++c) {
+ time_now += update_every; // time_now = start + (c + 2) * update_every
for (i = 0 ; i < CHARTS ; ++i) {
- st[i]->usec_since_last_update = USEC_PER_SEC;
+ st[i]->usec_since_last_update = USEC_PER_SEC * update_every;
for (j = 0; j < DIMS; ++j) {
- last = i * DIMS * POINTS + j * POINTS + c;
- rrddim_set_by_pointer_fake_time(rd[i][j], last, time_now);
+ next = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c;
+ rrddim_set_by_pointer_fake_time(rd[i][j], next, time_now);
}
rrdset_done(st[i]);
}
}
+ return time_now; //time_end
+}
- // check the result
+// Checks the metric data for the given region, returns number of errors
+static int test_dbengine_check_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
+ int current_region, time_t time_start)
+{
+ uint8_t same;
+ time_t time_now, time_retrieved;
+ int i, j, k, c, errors, update_every;
+ collected_number last;
+ calculated_number value, expected;
+ storage_number n;
+ struct rrddim_query_handle handle;
+
+ update_every = REGION_UPDATE_EVERY[current_region];
errors = 0;
- for(c = 0; c < POINTS ; c += QUERY_BATCH) {
- time_now = c + 2;
+ // check the result
+ for (c = 0; c < REGION_POINTS[current_region] ; c += QUERY_BATCH) {
+ time_now = time_start + (c + 2) * update_every;
for (i = 0 ; i < CHARTS ; ++i) {
for (j = 0; j < DIMS; ++j) {
- rd[i][j]->state->query_ops.init(rd[i][j], &handle, time_now, time_now + QUERY_BATCH);
+ rd[i][j]->state->query_ops.init(rd[i][j], &handle, time_now, time_now + QUERY_BATCH * update_every);
for (k = 0; k < QUERY_BATCH; ++k) {
- last = i * DIMS * POINTS + j * POINTS + c + k;
+ last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c + k;
expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_EXISTS));
- n = rd[i][j]->state->query_ops.next_metric(&handle);
+ n = rd[i][j]->state->query_ops.next_metric(&handle, &time_retrieved);
value = unpack_storage_number(n);
same = (calculated_number_round(value * 10000000.0) == calculated_number_round(expected * 10000000.0)) ? 1 : 0;
if(!same) {
fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value "
CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", ### E R R O R ###\n",
- st[i]->name, rd[i][j]->name, (unsigned long)time_now + k, expected, value);
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now + k * update_every, expected, value);
+ errors++;
+ }
+ if(time_retrieved != time_now + k * update_every) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found timestamp %lu ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now + k * update_every, (unsigned long)time_retrieved);
errors++;
}
}
@@ -1693,7 +1742,184 @@ int test_dbengine(void)
}
}
}
+ return errors;
+}
+
+// Check rrdr transformations
+static int test_dbengine_check_rrdr(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
+ int current_region, time_t time_start, time_t time_end)
+{
+ uint8_t same;
+ time_t time_now, time_retrieved;
+ int i, j, errors, update_every;
+ long c;
+ collected_number last;
+ calculated_number value, expected;
+
+ errors = 0;
+ update_every = REGION_UPDATE_EVERY[current_region];
+ long points = (time_end - time_start) / update_every - 1;
+ for (i = 0 ; i < CHARTS ; ++i) {
+ RRDR *r = rrd2rrdr(st[i], points, time_start + update_every, time_end, RRDR_GROUPING_AVERAGE, 0, 0, NULL);
+ if (!r) {
+ fprintf(stderr, " DB-engine unittest %s: empty RRDR ### E R R O R ###\n", st[i]->name);
+ return ++errors;
+ } else {
+ assert(r->st == st[i]);
+ for (c = 0; c != rrdr_rows(r) ; ++c) {
+ RRDDIM *d;
+ time_now = time_start + (c + 2) * update_every;
+ time_retrieved = r->t[c];
+
+ // for each dimension
+ for (j = 0, d = r->st->dimensions ; d && j < r->d ; ++j, d = d->next) {
+ calculated_number *cn = &r->v[ c * r->d ];
+ value = cn[j];
+ assert(rd[i][j] == d);
+
+ last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c;
+ expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_EXISTS));
+
+ same = (calculated_number_round(value * 10000000.0) == calculated_number_round(expected * 10000000.0)) ? 1 : 0;
+ if(!same) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value "
+ CALCULATED_NUMBER_FORMAT ", RRDR found " CALCULATED_NUMBER_FORMAT ", ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now, expected, value);
+ errors++;
+ }
+ if(time_retrieved != time_now) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found RRDR timestamp %lu ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now, (unsigned long)time_retrieved);
+ errors++;
+ }
+ }
+ }
+ rrdr_free(r);
+ }
+ }
+ return errors;
+}
+
+int test_dbengine(void)
+{
+ int i, j, errors, update_every, current_region;
+ RRDHOST *host = NULL;
+ RRDSET *st[CHARTS];
+ RRDDIM *rd[CHARTS][DIMS];
+ time_t time_start[REGIONS], time_end[REGIONS];
+
+ error_log_limit_unlimited();
+ fprintf(stderr, "\nRunning DB-engine test\n");
+
+ default_rrd_memory_mode = RRD_MEMORY_MODE_DBENGINE;
+
+ debug(D_RRDHOST, "Initializing localhost with hostname 'unittest-dbengine'");
+ host = dbengine_rrdhost_find_or_create("unittest-dbengine");
+ if (NULL == host)
+ return 1;
+
+ current_region = 0; // this is the first region of data
+ update_every = REGION_UPDATE_EVERY[current_region]; // set data collection frequency to 2 seconds
+ test_dbengine_create_charts(host, st, rd, update_every);
+
+ time_start[current_region] = 2 * API_RELATIVE_TIME_MAX;
+ time_end[current_region] = test_dbengine_create_metrics(st,rd, current_region, time_start[current_region]);
+
+ errors = test_dbengine_check_metrics(st, rd, current_region, time_start[current_region]);
+ if (errors)
+ goto error_out;
+
+ current_region = 1; //this is the second region of data
+ update_every = REGION_UPDATE_EVERY[current_region]; // set data collection frequency to 3 seconds
+ // Align pages for frequency change
+ for (i = 0 ; i < CHARTS ; ++i) {
+ st[i]->update_every = update_every;
+ for (j = 0; j < DIMS; ++j) {
+ rrdeng_store_metric_flush_current_page(rd[i][j]);
+ }
+ }
+
+ time_start[current_region] = time_end[current_region - 1] + update_every;
+ if (0 != time_start[current_region] % update_every) // align to update_every
+ time_start[current_region] += update_every - time_start[current_region] % update_every;
+ time_end[current_region] = test_dbengine_create_metrics(st,rd, current_region, time_start[current_region]);
+
+ errors = test_dbengine_check_metrics(st, rd, current_region, time_start[current_region]);
+ if (errors)
+ goto error_out;
+
+ current_region = 2; //this is the third region of data
+ update_every = REGION_UPDATE_EVERY[current_region]; // set data collection frequency to 1 seconds
+ // Align pages for frequency change
+ for (i = 0 ; i < CHARTS ; ++i) {
+ st[i]->update_every = update_every;
+ for (j = 0; j < DIMS; ++j) {
+ rrdeng_store_metric_flush_current_page(rd[i][j]);
+ }
+ }
+
+ time_start[current_region] = time_end[current_region - 1] + update_every;
+ if (0 != time_start[current_region] % update_every) // align to update_every
+ time_start[current_region] += update_every - time_start[current_region] % update_every;
+ time_end[current_region] = test_dbengine_create_metrics(st,rd, current_region, time_start[current_region]);
+ errors = test_dbengine_check_metrics(st, rd, current_region, time_start[current_region]);
+ if (errors)
+ goto error_out;
+
+ for (current_region = 0 ; current_region < REGIONS ; ++current_region) {
+ errors = test_dbengine_check_rrdr(st, rd, current_region, time_start[current_region], time_end[current_region]);
+ if (errors)
+ goto error_out;
+ }
+
+ current_region = 1;
+ update_every = REGION_UPDATE_EVERY[current_region]; // use the maximum update_every = 3
+ errors = 0;
+ long points = (time_end[REGIONS - 1] - time_start[0]) / update_every - 1; // cover all time regions with RRDR
+ long point_offset = (time_start[current_region] - time_start[0]) / update_every;
+ for (i = 0 ; i < CHARTS ; ++i) {
+ RRDR *r = rrd2rrdr(st[i], points, time_start[0] + update_every, time_end[REGIONS - 1], RRDR_GROUPING_AVERAGE, 0, 0, NULL);
+ if (!r) {
+ fprintf(stderr, " DB-engine unittest %s: empty RRDR ### E R R O R ###\n", st[i]->name);
+ ++errors;
+ } else {
+ long c;
+
+ assert(r->st == st[i]);
+ // test current region values only, since they must be left unchanged
+ for (c = point_offset ; c < point_offset + rrdr_rows(r) / REGIONS / 2 ; ++c) {
+ RRDDIM *d;
+ time_t time_now = time_start[current_region] + (c - point_offset + 2) * update_every;
+ time_t time_retrieved = r->t[c];
+
+ // for each dimension
+ for(j = 0, d = r->st->dimensions ; d && j < r->d ; ++j, d = d->next) {
+ calculated_number *cn = &r->v[ c * r->d ];
+ calculated_number value = cn[j];
+ assert(rd[i][j] == d);
+
+ collected_number last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c - point_offset;
+ calculated_number expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_EXISTS));
+
+ uint8_t same = (calculated_number_round(value * 10000000.0) == calculated_number_round(expected * 10000000.0)) ? 1 : 0;
+ if(!same) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value "
+ CALCULATED_NUMBER_FORMAT ", RRDR found " CALCULATED_NUMBER_FORMAT ", ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now, expected, value);
+ errors++;
+ }
+ if(time_retrieved != time_now) {
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found RRDR timestamp %lu ### E R R O R ###\n",
+ st[i]->name, rd[i][j]->name, (unsigned long)time_now, (unsigned long)time_retrieved);
+ errors++;
+ }
+ }
+ }
+ rrdr_free(r);
+ }
+ }
+error_out:
rrdeng_exit(host->rrdeng_ctx);
rrd_wrlock();
rrdhost_delete_charts(host);
@@ -1704,43 +1930,25 @@ int test_dbengine(void)
void generate_dbengine_dataset(unsigned history_seconds)
{
- const int DIMS = 128;
+ const int DSET_DIMS = 128;
const uint64_t EXPECTED_COMPRESSION_RATIO = 94;
- int j;
+ int j, update_every = 1;
RRDHOST *host = NULL;
RRDSET *st;
- RRDDIM *rd[DIMS];
+ RRDDIM *rd[DSET_DIMS];
char name[101];
time_t time_current, time_present;
default_rrd_memory_mode = RRD_MEMORY_MODE_DBENGINE;
default_rrdeng_page_cache_mb = 128;
- /* Worst case for uncompressible data */
- default_rrdeng_disk_quota_mb = (((uint64_t)DIMS) * sizeof(storage_number) * history_seconds) / (1024 * 1024);
+ // Worst case for uncompressible data
+ default_rrdeng_disk_quota_mb = (((uint64_t)DSET_DIMS) * sizeof(storage_number) * history_seconds) / (1024 * 1024);
default_rrdeng_disk_quota_mb -= default_rrdeng_disk_quota_mb * EXPECTED_COMPRESSION_RATIO / 100;
error_log_limit_unlimited();
debug(D_RRDHOST, "Initializing localhost with hostname 'dbengine-dataset'");
- host = rrdhost_find_or_create(
- "dbengine-dataset"
- , "dbengine-dataset"
- , "dbengine-dataset"
- , os_type
- , netdata_configured_timezone
- , config_get(CONFIG_SECTION_BACKEND, "host tags", "")
- , program_name
- , program_version
- , default_rrd_update_every
- , default_rrd_history_entries
- , RRD_MEMORY_MODE_DBENGINE
- , default_health_enabled
- , default_rrdpush_enabled
- , default_rrdpush_destination
- , default_rrdpush_api_key
- , default_rrdpush_send_charts_matching
- , NULL
- );
+ host = dbengine_rrdhost_find_or_create("dbengine-dataset");
if (NULL == host)
return;
@@ -1748,8 +1956,8 @@ void generate_dbengine_dataset(unsigned history_seconds)
// create the chart
st = rrdset_create(host, "example", "random", "random", "example", NULL, "random", "random", "random",
- NULL, 1, 1, RRDSET_TYPE_LINE);
- for (j = 0 ; j < DIMS ; ++j) {
+ NULL, 1, update_every, RRDSET_TYPE_LINE);
+ for (j = 0 ; j < DSET_DIMS ; ++j) {
snprintfz(name, 100, "random%d", j);
rd[j] = rrddim_add(st, name, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
@@ -1758,7 +1966,7 @@ void generate_dbengine_dataset(unsigned history_seconds)
time_present = now_realtime_sec();
// feed it with the test data
time_current = time_present - history_seconds;
- for (j = 0 ; j < DIMS ; ++j) {
+ for (j = 0 ; j < DSET_DIMS ; ++j) {
rd[j]->last_collected_time.tv_sec =
st->last_collected_time.tv_sec = st->last_updated.tv_sec = time_current;
rd[j]->last_collected_time.tv_usec =
@@ -1767,7 +1975,7 @@ void generate_dbengine_dataset(unsigned history_seconds)
for( ; time_current < time_present; ++time_current) {
st->usec_since_last_update = USEC_PER_SEC;
- for (j = 0; j < DIMS; ++j) {
+ for (j = 0; j < DSET_DIMS; ++j) {
rrddim_set_by_pointer_fake_time(rd[j], (time_current + j) % 128, time_current);
}
rrdset_done(st);
diff --git a/database/Makefile.in b/database/Makefile.in
new file mode 100644
index 00000000..e2328021
--- /dev/null
+++ b/database/Makefile.in
@@ -0,0 +1,698 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = database
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ engine \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu database/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu database/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/database/README.md b/database/README.md
index de0aa9b5..2fcb69b6 100644
--- a/database/README.md
+++ b/database/README.md
@@ -3,7 +3,7 @@
Although `netdata` does all its calculations using `long double`, it stores all values using
a [custom-made 32-bit number](../libnetdata/storage_number/).
-So, for each dimension of a chart, netdata will need: `4 bytes for the value * the entries
+So, for each dimension of a chart, Netdata will need: `4 bytes for the value * the entries
of its history`. It will not store any other data for each value in the time series database.
Since all its values are stored in a time series with fixed step, the time each value
corresponds can be calculated at run time, using the position of a value in the round robin database.
@@ -23,33 +23,35 @@ use the **[Database Engine](engine/)**.
## Memory modes
-Currently netdata supports 6 memory modes:
+Currently Netdata supports 6 memory modes:
-1. `ram`, data are purely in memory. Data are never saved on disk. This mode uses `mmap()` and
- supports [KSM](#ksm).
+1. `ram`, data are purely in memory. Data are never saved on disk. This mode uses `mmap()` and
+ supports [KSM](#ksm).
-2. `save`, (the default) data are only in RAM while netdata runs and are saved to / loaded from
- disk on netdata restart. It also uses `mmap()` and supports [KSM](#ksm).
+2. `save`, (the default) data are only in RAM while Netdata runs and are saved to / loaded from
+ disk on Netdata restart. It also uses `mmap()` and supports [KSM](#ksm).
-3. `map`, data are in memory mapped files. This works like the swap. Keep in mind though, this
- will have a constant write on your disk. When netdata writes data on its memory, the Linux kernel
- marks the related memory pages as dirty and automatically starts updating them on disk.
- Unfortunately we cannot control how frequently this works. The Linux kernel uses exactly the
- same algorithm it uses for its swap memory. Check below for additional information on running a
- dedicated central netdata server. This mode uses `mmap()` but does not support [KSM](#ksm).
+3. `map`, data are in memory mapped files. This works like the swap. Keep in mind though, this
+ will have a constant write on your disk. When Netdata writes data on its memory, the Linux kernel
+ marks the related memory pages as dirty and automatically starts updating them on disk.
+ Unfortunately we cannot control how frequently this works. The Linux kernel uses exactly the
+ same algorithm it uses for its swap memory. Check below for additional information on running a
+ dedicated central Netdata server. This mode uses `mmap()` but does not support [KSM](#ksm).
-4. `none`, without a database (collected metrics can only be streamed to another netdata).
+4. `none`, without a database (collected metrics can only be streamed to another Netdata).
-5. `alloc`, like `ram` but it uses `calloc()` and does not support [KSM](#ksm). This mode is the
- fallback for all others except `none`.
+5. `alloc`, like `ram` but it uses `calloc()` and does not support [KSM](#ksm). This mode is the
+ fallback for all others except `none`.
-6. `dbengine`, data are in database files. The [Database Engine](engine/) works like a traditional
- database. There is some amount of RAM dedicated to data caching and indexing and the rest of
- the data reside compressed on disk. The number of history entries is not fixed in this case,
- but depends on the configured disk space and the effective compression ratio of the data stored.
- For more details see [here](engine/).
+6. `dbengine`, data are in database files. The [Database Engine](engine/) works like a traditional
+ database. There is some amount of RAM dedicated to data caching and indexing and the rest of
+ the data reside compressed on disk. The number of history entries is not fixed in this case,
+ but depends on the configured disk space and the effective compression ratio of the data stored.
+ This is the **only mode** that supports changing the data collection update frequency
+ (`update_every`) **without losing** the previously stored metrics.
+ For more details see [here](engine/).
-You can select the memory mode by editing netdata.conf and setting:
+You can select the memory mode by editing `netdata.conf` and setting:
```
[global]
@@ -60,63 +62,62 @@ You can select the memory mode by editing netdata.conf and setting:
cache directory = /var/cache/netdata
```
-## Running netdata in embedded devices
+## Running Netdata in embedded devices
Embedded devices usually have very limited RAM resources available.
There are 2 settings for you to tweak:
-1. `update every`, which controls the data collection frequency
-2. `history`, which controls the size of the database in RAM
+1. `update every`, which controls the data collection frequency
+2. `history`, which controls the size of the database in RAM
By default `update every = 1` and `history = 3600`. This gives you an hour of data with per
second updates.
If you set `update every = 2` and `history = 1800`, you will still have an hour of data, but
collected once every 2 seconds. This will **cut in half** both CPU and RAM resources consumed
-by netdata. Of course experiment a bit. On very weak devices you might have to use
+by Netdata. Of course experiment a bit. On very weak devices you might have to use
`update every = 5` and `history = 720` (still 1 hour of data, but 1/5 of the CPU and RAM resources).
You can also disable [data collection plugins](../collectors) you don't need.
Disabling such plugins will also free both CPU and RAM resources.
-## Running a dedicated central netdata server
+## Running a dedicated central Netdata server
-Netdata allows streaming data between netdata nodes. This allows us to have a central netdata
+Netdata allows streaming data between Netdata nodes. This allows us to have a central Netdata
server that will maintain the entire database for all nodes, and will also run health checks/alarms
for all nodes.
-For this central netdata, memory size can be a problem. Fortunately, netdata supports several
+For this central Netdata, memory size can be a problem. Fortunately, Netdata supports several
memory modes. **One interesting option** for this setup is `memory mode = map`.
### map
-In this mode, the database of netdata is stored in memory mapped files. netdata continues to read
+In this mode, the database of Netdata is stored in memory mapped files. Netdata continues to read
and write the database in memory, but the kernel automatically loads and saves memory pages from/to
disk.
**We suggest _not_ to use this mode on nodes that run other applications.** There will always be
dirty memory to be synced and this syncing process may influence the way other applications work.
-This mode however is useful when we need a central netdata server that would normally need huge
+This mode however is useful when we need a central Netdata server that would normally need huge
amounts of memory. Using memory mode `map` we can overcome all memory restrictions.
There are a few kernel options that provide finer control on the way this syncing works. But before
-explaining them, a brief introduction of how netdata database works is needed.
+explaining them, a brief introduction of how Netdata database works is needed.
-For each chart, netdata maps the following files:
+For each chart, Netdata maps the following files:
-1. `chart/main.db`, this is the file that maintains chart information. Every time data are collected
- for a chart, this is updated.
-
-2. `chart/dimension_name.db`, this is the file for each dimension. At its beginning there is a
- header, followed by the round robin database where metrics are stored.
+1. `chart/main.db`, this is the file that maintains chart information. Every time data are collected
+ for a chart, this is updated.
+2. `chart/dimension_name.db`, this is the file for each dimension. At its beginning there is a
+ header, followed by the round robin database where metrics are stored.
-So, every time netdata collects data, the following pages will become dirty:
+So, every time Netdata collects data, the following pages will become dirty:
-1. the chart file
-2. the header part of all dimension files
-3. if the collected metrics are stored far enough in the dimension file, another page will
- become dirty, for each dimension
+1. the chart file
+2. the header part of all dimension files
+3. if the collected metrics are stored far enough in the dimension file, another page will
+ become dirty, for each dimension
Each page in Linux is 4KB. So, with 200 charts and 1000 dimensions, there will be 1200 to 2200 4KB
pages dirty pages every second. Of course 1200 of them will always be dirty (the chart header and
@@ -143,12 +144,12 @@ get an abnormal shutdown.
There are 2 more options to tweak:
-1. `dirty_background_ratio`, by default `10`.
-2. `dirty_ratio`, by default `20`.
+1. `dirty_background_ratio`, by default `10`.
+2. `dirty_ratio`, by default `20`.
These control the amount of memory that should be dirty for disk syncing to be triggered.
-On dedicated netdata servers, you can use: `80` and `90` respectively, so that all RAM is given
-to netdata.
+On dedicated Netdata servers, you can use: `80` and `90` respectively, so that all RAM is given
+to Netdata.
With these settings, you can expect a little `iowait` spike once every 10 minutes and in case
of system crash, data on disk will be up to 10 minutes old.
@@ -169,7 +170,7 @@ for this setup** is `memory mode = dbengine`.
### dbengine
-In this mode, the database of netdata is stored in database files. The [Database Engine](engine/)
+In this mode, the database of Netdata is stored in database files. The [Database Engine](engine/)
works like a traditional database. There is some amount of RAM dedicated to data caching and
indexing and the rest of the data reside compressed on disk. The number of history entries is not
fixed in this case, but depends on the configured disk space and the effective compression ratio
@@ -187,10 +188,10 @@ Netdata offers all its round robin database to kernel for deduplication
In the past KSM has been criticized for consuming a lot of CPU resources.
Although this is true when KSM is used for deduplicating certain applications, it is not true with
-netdata, since the netdata memory is written very infrequently (if you have 24 hours of metrics in
+netdata, since the Netdata memory is written very infrequently (if you have 24 hours of metrics in
netdata, each byte at the in-memory database will be updated just once per day).
-KSM is a solution that will provide 60+% memory savings to netdata.
+KSM is a solution that will provide 60+% memory savings to Netdata.
### Enable KSM in kernel
@@ -206,9 +207,9 @@ So, if you build a kernel with `CONFIG_KSM=y` you will just get a few files in `
The files that `CONFIG_KSM=y` offers include:
-- `/sys/kernel/mm/ksm/run` by default `0`. You have to set this to `1` for the kernel to spawn `ksmd`.
-- `/sys/kernel/mm/ksm/sleep_millisecs`, by default `20`. The frequency ksmd should evaluate memory for deduplication.
-- `/sys/kernel/mm/ksm/pages_to_scan`, by default `100`. The amount of pages ksmd will evaluate on each run.
+- `/sys/kernel/mm/ksm/run` by default `0`. You have to set this to `1` for the kernel to spawn `ksmd`.
+- `/sys/kernel/mm/ksm/sleep_millisecs`, by default `20`. The frequency ksmd should evaluate memory for deduplication.
+- `/sys/kernel/mm/ksm/pages_to_scan`, by default `100`. The amount of pages ksmd will evaluate on each run.
So, by default `ksmd` is just disabled. It will not harm performance and the user/admin can control the CPU resources he/she is willing `ksmd` to use.
@@ -231,4 +232,4 @@ Netdata will create charts for kernel memory de-duplication performance, like th
![image](https://cloud.githubusercontent.com/assets/2662304/11998786/eb23ae54-aab6-11e5-94d4-e848e8a5c56a.png)
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdatabase%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdatabase%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/database/engine/Makefile.in b/database/engine/Makefile.in
new file mode 100644
index 00000000..862693d9
--- /dev/null
+++ b/database/engine/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = database/engine
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu database/engine/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu database/engine/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/database/engine/README.md b/database/engine/README.md
index adc69ffd..7791a549 100644
--- a/database/engine/README.md
+++ b/database/engine/README.md
@@ -4,6 +4,8 @@ The Database Engine works like a traditional
database. There is some amount of RAM dedicated to data caching and indexing and the rest of
the data reside compressed on disk. The number of history entries is not fixed in this case,
but depends on the configured disk space and the effective compression ratio of the data stored.
+This is the **only mode** that supports changing the data collection update frequency
+(`update_every`) **without losing** the previously stored metrics.
## Files
@@ -18,18 +20,18 @@ journalfile-1-0000000002.njf
datafile-1-0000000003.ndf
journalfile-1-0000000003.njf
...
-```
+```
They are located under their host's cache directory in the directory `./dbengine`
(e.g. for localhost the default location is `/var/cache/netdata/dbengine/*`). The higher
numbered filenames contain more recent metric data. The user can safely delete some pairs
-of files when netdata is stopped to manually free up some space.
+of files when Netdata is stopped to manually free up some space.
-*Users should* **back up** *their `./dbengine` folders if they consider this data to be important.*
+_Users should_ **back up** _their `./dbengine` folders if they consider this data to be important._
## Configuration
-There is one DB engine instance per netdata host/node. That is, there is one `./dbengine` folder
+There is one DB engine instance per Netdata host/node. That is, there is one `./dbengine` folder
per node, and all charts of `dbengine` memory mode in such a host share the same storage space
and DB engine instance memory state. You can select the memory mode for localhost by editing
netdata.conf and setting:
@@ -59,10 +61,10 @@ quota. Both numbers are in **MiB**. All DB engine instances will allocate the co
separately.
The `page cache size` option determines the amount of RAM in **MiB** that is dedicated to caching
-netdata metric values themselves.
+Netdata metric values themselves.
The `dbengine disk space` option determines the amount of disk space in **MiB** that is dedicated
-to storing netdata metric values and all related metadata describing them.
+to storing Netdata metric values and all related metadata describing them.
## Operation
@@ -72,7 +74,7 @@ the **Page Cache**.
When those pages fill up they are slowly compressed and flushed to disk.
It can take `4096 / 4 = 1024 seconds = 17 minutes`, for a chart dimension that is being collected
-every 1 second, to fill a page. Pages can be cut short when we stop netdata or the DB engine
+every 1 second, to fill a page. Pages can be cut short when we stop Netdata or the DB engine
instance so as to not lose the data. When we query the DB engine for data we trigger disk read
I/O requests that fill the Page Cache with the requested pages and potentially evict cold
(not recently used) pages.
@@ -91,17 +93,17 @@ applications.
Using memory mode `dbengine` we can overcome most memory restrictions and store a dataset that
is much larger than the available memory.
-There are explicit memory requirements **per** DB engine **instance**, meaning **per** netdata
+There are explicit memory requirements **per** DB engine **instance**, meaning **per** Netdata
**node** (e.g. localhost and streaming recipient nodes):
-- `page cache size` must be at least `#dimensions-being-collected x 4096 x 2` bytes.
+- `page cache size` must be at least `#dimensions-being-collected x 4096 x 2` bytes.
-- an additional `#pages-on-disk x 4096 x 0.03` bytes of RAM are allocated for metadata.
+- an additional `#pages-on-disk x 4096 x 0.03` bytes of RAM are allocated for metadata.
- - roughly speaking this is 3% of the uncompressed disk space taken by the DB files.
+ - roughly speaking this is 3% of the uncompressed disk space taken by the DB files.
- - for very highly compressible data (compression ratio > 90%) this RAM overhead
- is comparable to the disk space footprint.
+ - for very highly compressible data (compression ratio > 90%) this RAM overhead
+ is comparable to the disk space footprint.
An important observation is that RAM usage depends on both the `page cache size` and the
`dbengine disk space` options.
@@ -115,11 +117,11 @@ file descriptors available per `dbengine` instance.
Netdata allocates 25% of the available file descriptors to its Database Engine instances. This means that only 25%
of the file descriptors that are available to the Netdata service are accessible by dbengine instances.
You should take that into account when configuring your service
-or system-wide file descriptor limits. You can roughly estimate that the netdata service needs 2048 file
+or system-wide file descriptor limits. You can roughly estimate that the Netdata service needs 2048 file
descriptors for every 10 streaming slave hosts when streaming is configured to use `memory mode = dbengine`.
-If for example one wants to allocate 65536 file descriptors to the netdata service on a systemd system
-one needs to override the netdata service by running `sudo systemctl edit netdata` and creating a
+If for example one wants to allocate 65536 file descriptors to the Netdata service on a systemd system
+one needs to override the Netdata service by running `sudo systemctl edit netdata` and creating a
file with contents:
```
@@ -128,18 +130,24 @@ LimitNOFILE=65536
```
For other types of services one can add the line:
+
```
ulimit -n 65536
```
+
at the beginning of the service file. Alternatively you can change the system-wide limits of the kernel by changing `/etc/sysctl.conf`. For linux that would be:
+
```
fs.file-max = 65536
```
+
In FreeBSD and OS X you change the lines like this:
+
```
kern.maxfilesperproc=65536
kern.maxfiles=65536
```
+
You can apply the settings by running `sysctl -p` or by rebooting.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdatabase%2Fengine%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdatabase%2Fengine%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/database/engine/journalfile.c b/database/engine/journalfile.c
index d6e4f317..fac680aa 100644
--- a/database/engine/journalfile.c
+++ b/database/engine/journalfile.c
@@ -269,7 +269,6 @@ static void restore_extent_metadata(struct rrdengine_instance *ctx, struct rrden
extent = mallocz(sizeof(*extent) + count * sizeof(extent->pages[0]));
extent->offset = jf_metric_data->extent_offset;
extent->size = jf_metric_data->extent_size;
- extent->number_of_pages = count;
extent->datafile = journalfile->datafile;
extent->next = NULL;
@@ -282,7 +281,6 @@ static void restore_extent_metadata(struct rrdengine_instance *ctx, struct rrden
error("Unknown page type encountered.");
continue;
}
- ++valid_pages;
temp_id = (uuid_t *)jf_metric_data->descr[i].uuid;
uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
@@ -308,11 +306,16 @@ static void restore_extent_metadata(struct rrdengine_instance *ctx, struct rrden
descr->end_time = jf_metric_data->descr[i].end_time;
descr->id = &page_index->id;
descr->extent = extent;
- extent->pages[i] = descr;
+ extent->pages[valid_pages++] = descr;
pg_cache_insert(ctx, page_index, descr);
}
+
+ extent->number_of_pages = valid_pages;
+
if (likely(valid_pages))
df_extent_insert(extent);
+ else
+ freez(extent);
}
/*
diff --git a/database/engine/pagecache.c b/database/engine/pagecache.c
index 124f2448..1bd4c943 100644
--- a/database/engine/pagecache.c
+++ b/database/engine/pagecache.c
@@ -419,6 +419,35 @@ static inline int is_point_in_time_in_page(struct rrdeng_page_descr *descr, usec
return (point_in_time >= descr->start_time && point_in_time <= descr->end_time);
}
+/* The caller must hold the page index lock */
+static inline struct rrdeng_page_descr *
+ find_first_page_in_time_range(struct pg_cache_page_index *page_index, usec_t start_time, usec_t end_time)
+{
+ struct rrdeng_page_descr *descr = NULL;
+ Pvoid_t *PValue;
+ Word_t Index;
+
+ Index = (Word_t)(start_time / USEC_PER_SEC);
+ PValue = JudyLLast(page_index->JudyL_array, &Index, PJE0);
+ if (likely(NULL != PValue)) {
+ descr = *PValue;
+ if (is_page_in_time_range(descr, start_time, end_time)) {
+ return descr;
+ }
+ }
+
+ Index = (Word_t)(start_time / USEC_PER_SEC);
+ PValue = JudyLFirst(page_index->JudyL_array, &Index, PJE0);
+ if (likely(NULL != PValue)) {
+ descr = *PValue;
+ if (is_page_in_time_range(descr, start_time, end_time)) {
+ return descr;
+ }
+ }
+
+ return NULL;
+}
+
/* Update metric oldest and latest timestamps efficiently when adding new values */
void pg_cache_add_new_metric_time(struct pg_cache_page_index *page_index, struct rrdeng_page_descr *descr)
{
@@ -510,70 +539,144 @@ void pg_cache_insert(struct rrdengine_instance *ctx, struct pg_cache_page_index
uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
}
-/*
- * Searches for a page and triggers disk I/O if necessary and possible.
+usec_t pg_cache_oldest_time_in_range(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct rrdeng_page_descr *descr = NULL;
+ Pvoid_t *PValue;
+ struct pg_cache_page_index *page_index;
+
+ uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, id, sizeof(uuid_t));
+ if (likely(NULL != PValue)) {
+ page_index = *PValue;
+ }
+ uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
+ if (NULL == PValue) {
+ return INVALID_TIME;
+ }
+
+ uv_rwlock_rdlock(&page_index->lock);
+ descr = find_first_page_in_time_range(page_index, start_time, end_time);
+ if (NULL == descr) {
+ uv_rwlock_rdunlock(&page_index->lock);
+ return INVALID_TIME;
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+ return descr->start_time;
+}
+
+/**
+ * Return page information for the first page before point_in_time that satisfies the filter.
+ * @param ctx DB context
+ * @param page_index page index of a metric
+ * @param point_in_time the pages that are searched must be older than this timestamp
+ * @param filter decides if the page satisfies the caller's criteria
+ * @param page_info the result of the search is set in this pointer
+ */
+void pg_cache_get_filtered_info_prev(struct rrdengine_instance *ctx, struct pg_cache_page_index *page_index,
+ usec_t point_in_time, pg_cache_page_info_filter_t *filter,
+ struct rrdeng_page_info *page_info)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct rrdeng_page_descr *descr = NULL;
+ Pvoid_t *PValue;
+ Word_t Index;
+
+ (void)pg_cache;
+ assert(NULL != page_index);
+
+ Index = (Word_t)(point_in_time / USEC_PER_SEC);
+ uv_rwlock_rdlock(&page_index->lock);
+ do {
+ PValue = JudyLPrev(page_index->JudyL_array, &Index, PJE0);
+ descr = unlikely(NULL == PValue) ? NULL : *PValue;
+ } while (descr != NULL && !filter(descr));
+ if (unlikely(NULL == descr)) {
+ page_info->page_length = 0;
+ page_info->start_time = INVALID_TIME;
+ page_info->end_time = INVALID_TIME;
+ } else {
+ page_info->page_length = descr->page_length;
+ page_info->start_time = descr->start_time;
+ page_info->end_time = descr->end_time;
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+}
+/**
+ * Searches for pages in a time range and triggers disk I/O if necessary and possible.
* Does not get a reference.
- * Returns page index pointer for given metric UUID.
+ * @param ctx DB context
+ * @param id UUID
+ * @param start_time inclusive starting time in usec
+ * @param end_time inclusive ending time in usec
+ * @param page_info_arrayp It allocates (*page_arrayp) and populates it with information of pages that overlap
+ * with the time range [start_time,end_time]. The caller must free (*page_info_arrayp) with freez().
+ * If page_info_arrayp is set to NULL nothing was allocated.
+ * @param ret_page_indexp Sets the page index pointer (*ret_page_indexp) for the given UUID.
+ * @return the number of pages that overlap with the time range [start_time,end_time].
*/
-struct pg_cache_page_index *
- pg_cache_preload(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time)
+unsigned pg_cache_preload(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time,
+ struct rrdeng_page_info **page_info_arrayp, struct pg_cache_page_index **ret_page_indexp)
{
struct page_cache *pg_cache = &ctx->pg_cache;
struct rrdeng_page_descr *descr = NULL, *preload_array[PAGE_CACHE_MAX_PRELOAD_PAGES];
struct page_cache_descr *pg_cache_descr = NULL;
- int i, j, k, count, found;
+ unsigned i, j, k, preload_count, count, page_info_array_max_size;
unsigned long flags;
Pvoid_t *PValue;
struct pg_cache_page_index *page_index;
Word_t Index;
uint8_t failed_to_reserve;
+ assert(NULL != ret_page_indexp);
+
uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, id, sizeof(uuid_t));
if (likely(NULL != PValue)) {
- page_index = *PValue;
+ *ret_page_indexp = page_index = *PValue;
}
uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
if (NULL == PValue) {
debug(D_RRDENGINE, "%s: No page was found to attempt preload.", __func__);
- return NULL;
+ *ret_page_indexp = NULL;
+ return 0;
}
uv_rwlock_rdlock(&page_index->lock);
- /* Find first page in range */
- found = 0;
- Index = (Word_t)(start_time / USEC_PER_SEC);
- PValue = JudyLLast(page_index->JudyL_array, &Index, PJE0);
- if (likely(NULL != PValue)) {
- descr = *PValue;
- if (is_page_in_time_range(descr, start_time, end_time)) {
- found = 1;
- }
- }
- if (!found) {
- Index = (Word_t)(start_time / USEC_PER_SEC);
- PValue = JudyLFirst(page_index->JudyL_array, &Index, PJE0);
- if (likely(NULL != PValue)) {
- descr = *PValue;
- if (is_page_in_time_range(descr, start_time, end_time)) {
- found = 1;
- }
- }
- }
- if (!found) {
+ descr = find_first_page_in_time_range(page_index, start_time, end_time);
+ if (NULL == descr) {
uv_rwlock_rdunlock(&page_index->lock);
debug(D_RRDENGINE, "%s: No page was found to attempt preload.", __func__);
- return page_index;
+ *ret_page_indexp = NULL;
+ return 0;
+ } else {
+ Index = (Word_t)(descr->start_time / USEC_PER_SEC);
+ }
+ if (page_info_arrayp) {
+ page_info_array_max_size = PAGE_CACHE_MAX_PRELOAD_PAGES * sizeof(struct rrdeng_page_info);
+ *page_info_arrayp = mallocz(page_info_array_max_size);
}
- for (count = 0 ;
- descr != NULL && is_page_in_time_range(descr, start_time, end_time);
+ for (count = 0, preload_count = 0 ;
+ descr != NULL && is_page_in_time_range(descr, start_time, end_time) ;
PValue = JudyLNext(page_index->JudyL_array, &Index, PJE0),
descr = unlikely(NULL == PValue) ? NULL : *PValue) {
/* Iterate all pages in range */
if (unlikely(0 == descr->page_length))
continue;
+ if (page_info_arrayp) {
+ if (unlikely(count >= page_info_array_max_size / sizeof(struct rrdeng_page_info))) {
+ page_info_array_max_size += PAGE_CACHE_MAX_PRELOAD_PAGES * sizeof(struct rrdeng_page_info);
+ *page_info_arrayp = reallocz(*page_info_arrayp, page_info_array_max_size);
+ }
+ (*page_info_arrayp)[count].start_time = descr->start_time;
+ (*page_info_arrayp)[count].end_time = descr->end_time;
+ (*page_info_arrayp)[count].page_length = descr->page_length;
+ }
+ ++count;
+
rrdeng_page_descr_mutex_lock(ctx, descr);
pg_cache_descr = descr->pg_cache_descr;
flags = pg_cache_descr->flags;
@@ -586,8 +689,8 @@ struct pg_cache_page_index *
}
}
if (!(flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 1)) {
- preload_array[count++] = descr;
- if (PAGE_CACHE_MAX_PRELOAD_PAGES == count) {
+ preload_array[preload_count++] = descr;
+ if (PAGE_CACHE_MAX_PRELOAD_PAGES == preload_count) {
rrdeng_page_descr_mutex_unlock(ctx, descr);
break;
}
@@ -598,7 +701,7 @@ struct pg_cache_page_index *
uv_rwlock_rdunlock(&page_index->lock);
failed_to_reserve = 0;
- for (i = 0 ; i < count && !failed_to_reserve ; ++i) {
+ for (i = 0 ; i < preload_count && !failed_to_reserve ; ++i) {
struct rrdeng_cmd cmd;
struct rrdeng_page_descr *next;
@@ -614,7 +717,7 @@ struct pg_cache_page_index *
cmd.read_extent.page_cache_descr[0] = descr;
/* don't use this page again */
preload_array[i] = NULL;
- for (j = 0, k = 1 ; j < count ; ++j) {
+ for (j = 0, k = 1 ; j < preload_count ; ++j) {
next = preload_array[j];
if (NULL == next) {
continue;
@@ -635,7 +738,7 @@ struct pg_cache_page_index *
}
if (failed_to_reserve) {
debug(D_RRDENGINE, "%s: Failed to reserve enough memory, canceling I/O.", __func__);
- for (i = 0 ; i < count ; ++i) {
+ for (i = 0 ; i < preload_count ; ++i) {
descr = preload_array[i];
if (NULL == descr) {
continue;
@@ -643,11 +746,15 @@ struct pg_cache_page_index *
pg_cache_put(ctx, descr);
}
}
- if (!count) {
+ if (!preload_count) {
/* no such page */
debug(D_RRDENGINE, "%s: No page was eligible to attempt preload.", __func__);
}
- return page_index;
+ if (unlikely(0 == count && page_info_arrayp)) {
+ freez(*page_info_arrayp);
+ *page_info_arrayp = NULL;
+ }
+ return count;
}
/*
@@ -757,6 +864,105 @@ struct rrdeng_page_descr *
return descr;
}
+/*
+ * Searches for the first page between start_time and end_time and gets a reference.
+ * start_time and end_time are inclusive.
+ * If index is NULL lookup by UUID (id).
+ */
+struct rrdeng_page_descr *
+pg_cache_lookup_next(struct rrdengine_instance *ctx, struct pg_cache_page_index *index, uuid_t *id,
+ usec_t start_time, usec_t end_time)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct rrdeng_page_descr *descr = NULL;
+ struct page_cache_descr *pg_cache_descr = NULL;
+ unsigned long flags;
+ Pvoid_t *PValue;
+ struct pg_cache_page_index *page_index;
+ uint8_t page_not_in_cache;
+
+ if (unlikely(NULL == index)) {
+ uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, id, sizeof(uuid_t));
+ if (likely(NULL != PValue)) {
+ page_index = *PValue;
+ }
+ uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
+ if (NULL == PValue) {
+ return NULL;
+ }
+ } else {
+ page_index = index;
+ }
+ pg_cache_reserve_pages(ctx, 1);
+
+ page_not_in_cache = 0;
+ uv_rwlock_rdlock(&page_index->lock);
+ while (1) {
+ descr = find_first_page_in_time_range(page_index, start_time, end_time);
+ if (NULL == descr || 0 == descr->page_length) {
+ /* non-empty page not found */
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ pg_cache_release_pages(ctx, 1);
+ return NULL;
+ }
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_descr = descr->pg_cache_descr;
+ flags = pg_cache_descr->flags;
+ if ((flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 0)) {
+ /* success */
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ debug(D_RRDENGINE, "%s: Page was found in memory.", __func__);
+ break;
+ }
+ if (!(flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 1)) {
+ struct rrdeng_cmd cmd;
+
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ cmd.opcode = RRDENG_READ_PAGE;
+ cmd.read_page.page_cache_descr = descr;
+ rrdeng_enq_cmd(&ctx->worker_config, &cmd);
+
+ debug(D_RRDENGINE, "%s: Waiting for page to be asynchronously read from disk:", __func__);
+ if(unlikely(debug_flags & D_RRDENGINE))
+ print_page_cache_descr(descr);
+ while (!(pg_cache_descr->flags & RRD_PAGE_POPULATED)) {
+ pg_cache_wait_event_unsafe(descr);
+ }
+ /* success */
+ /* Downgrade exclusive reference to allow other readers */
+ pg_cache_descr->flags &= ~RRD_PAGE_LOCKED;
+ pg_cache_wake_up_waiters_unsafe(descr);
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ rrd_stat_atomic_add(&ctx->stats.pg_cache_misses, 1);
+ return descr;
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+ debug(D_RRDENGINE, "%s: Waiting for page to be unlocked:", __func__);
+ if(unlikely(debug_flags & D_RRDENGINE))
+ print_page_cache_descr(descr);
+ if (!(flags & RRD_PAGE_POPULATED))
+ page_not_in_cache = 1;
+ pg_cache_wait_event_unsafe(descr);
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+
+ /* reset scan to find again */
+ uv_rwlock_rdlock(&page_index->lock);
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ if (!(flags & RRD_PAGE_DIRTY))
+ pg_cache_replaceQ_set_hot(ctx, descr);
+ pg_cache_release_pages(ctx, 1);
+ if (page_not_in_cache)
+ rrd_stat_atomic_add(&ctx->stats.pg_cache_misses, 1);
+ else
+ rrd_stat_atomic_add(&ctx->stats.pg_cache_hits, 1);
+ return descr;
+}
+
struct pg_cache_page_index *create_page_index(uuid_t *id)
{
struct pg_cache_page_index *page_index;
diff --git a/database/engine/pagecache.h b/database/engine/pagecache.h
index b5670f82..d464211e 100644
--- a/database/engine/pagecache.h
+++ b/database/engine/pagecache.h
@@ -48,9 +48,6 @@ struct page_cache_descr {
* number of descriptor users | DESTROY | LOCKED | ALLOCATED |
*/
struct rrdeng_page_descr {
- uint32_t page_length;
- usec_t start_time;
- usec_t end_time;
uuid_t *id; /* never changes */
struct extent_info *extent;
@@ -59,8 +56,25 @@ struct rrdeng_page_descr {
/* Compare-And-Swap target for page cache descriptor allocation algorithm */
volatile unsigned long pg_cache_descr_state;
+
+ /* page information */
+ usec_t start_time;
+ usec_t end_time;
+ uint32_t page_length;
};
+#define PAGE_INFO_SCRATCH_SZ (8)
+struct rrdeng_page_info {
+ uint8_t scratch[PAGE_INFO_SCRATCH_SZ]; /* scratch area to be used by page-cache users */
+
+ usec_t start_time;
+ usec_t end_time;
+ uint32_t page_length;
+};
+
+/* returns 1 for success, 0 for failure */
+typedef int pg_cache_page_info_filter_t(struct rrdeng_page_descr *);
+
#define PAGE_CACHE_MAX_PRELOAD_PAGES (256)
/* maps time ranges to pages */
@@ -149,11 +163,20 @@ extern void pg_cache_put(struct rrdengine_instance *ctx, struct rrdeng_page_desc
extern void pg_cache_insert(struct rrdengine_instance *ctx, struct pg_cache_page_index *index,
struct rrdeng_page_descr *descr);
extern void pg_cache_punch_hole(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr, uint8_t remove_dirty);
-extern struct pg_cache_page_index *
- pg_cache_preload(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time);
+extern usec_t pg_cache_oldest_time_in_range(struct rrdengine_instance *ctx, uuid_t *id,
+ usec_t start_time, usec_t end_time);
+extern void pg_cache_get_filtered_info_prev(struct rrdengine_instance *ctx, struct pg_cache_page_index *page_index,
+ usec_t point_in_time, pg_cache_page_info_filter_t *filter,
+ struct rrdeng_page_info *page_info);
+extern unsigned
+ pg_cache_preload(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time,
+ struct rrdeng_page_info **page_info_arrayp, struct pg_cache_page_index **ret_page_indexp);
extern struct rrdeng_page_descr *
pg_cache_lookup(struct rrdengine_instance *ctx, struct pg_cache_page_index *index, uuid_t *id,
usec_t point_in_time);
+extern struct rrdeng_page_descr *
+ pg_cache_lookup_next(struct rrdengine_instance *ctx, struct pg_cache_page_index *index, uuid_t *id,
+ usec_t start_time, usec_t end_time);
extern struct pg_cache_page_index *create_page_index(uuid_t *id);
extern void init_page_cache(struct rrdengine_instance *ctx);
extern void free_page_cache(struct rrdengine_instance *ctx);
diff --git a/database/engine/rrdengine.c b/database/engine/rrdengine.c
index 221216bb..39d14857 100644
--- a/database/engine/rrdengine.c
+++ b/database/engine/rrdengine.c
@@ -24,6 +24,9 @@ void sanity_check(void)
/* page count must fit in 8 bits */
BUILD_BUG_ON(MAX_PAGES_PER_EXTENT > 255);
+
+ /* page info scratch space must be able to hold 2 32-bit integers */
+ BUILD_BUG_ON(sizeof(((struct rrdeng_page_info *)0)->scratch) < 2 * sizeof(uint32_t));
}
void read_extent_cb(uv_fs_t* req)
diff --git a/database/engine/rrdengineapi.c b/database/engine/rrdengineapi.c
index a87ce6d6..bf373f31 100644
--- a/database/engine/rrdengineapi.c
+++ b/database/engine/rrdengineapi.c
@@ -218,6 +218,208 @@ void rrdeng_store_metric_finalize(RRDDIM *rd)
}
}
+/* Returns 1 if the data collection interval is well defined, 0 otherwise */
+static int metrics_with_known_interval(struct rrdeng_page_descr *descr)
+{
+ unsigned page_entries;
+
+ if (unlikely(INVALID_TIME == descr->start_time || INVALID_TIME == descr->end_time))
+ return 0;
+ page_entries = descr->page_length / sizeof(storage_number);
+ if (likely(page_entries > 1)) {
+ return 1;
+ }
+ return 0;
+}
+
+static inline uint32_t *pginfo_to_dt(struct rrdeng_page_info *page_info)
+{
+ return (uint32_t *)&page_info->scratch[0];
+}
+
+static inline uint32_t *pginfo_to_points(struct rrdeng_page_info *page_info)
+{
+ return (uint32_t *)&page_info->scratch[sizeof(uint32_t)];
+}
+
+/**
+ * Calculates the regions of different data collection intervals in a netdata chart in the time range
+ * [start_time,end_time]. This call takes the netdata chart read lock.
+ * @param st the netdata chart whose data collection interval boundaries are calculated.
+ * @param start_time inclusive starting time in usec
+ * @param end_time inclusive ending time in usec
+ * @param region_info_arrayp It allocates (*region_info_arrayp) and populates it with information of regions of a
+ * reference dimension that that have different data collection intervals and overlap with the time range
+ * [start_time,end_time]. The caller must free (*region_info_arrayp) with freez(). If region_info_arrayp is set
+ * to NULL nothing was allocated.
+ * @param max_intervalp is derefenced and set to be the largest data collection interval of all regions.
+ * @return number of regions with different data collection intervals.
+ */
+unsigned rrdeng_variable_step_boundaries(RRDSET *st, time_t start_time, time_t end_time,
+ struct rrdeng_region_info **region_info_arrayp, unsigned *max_intervalp)
+{
+ struct pg_cache_page_index *page_index;
+ struct rrdengine_instance *ctx;
+ unsigned pages_nr;
+ RRDDIM *rd_iter, *rd;
+ struct rrdeng_page_info *page_info_array, *curr, *prev, *old_prev;
+ unsigned i, j, page_entries, region_points, page_points, regions, max_interval;
+ time_t now;
+ usec_t dt, current_position_time, max_time = 0, min_time, curr_time, first_valid_time_in_page;
+ struct rrdeng_region_info *region_info_array;
+ uint8_t is_first_region_initialized;
+
+ ctx = st->rrdhost->rrdeng_ctx;
+ regions = 1;
+ *max_intervalp = max_interval = 0;
+ region_info_array = NULL;
+ *region_info_arrayp = NULL;
+ page_info_array = NULL;
+
+ rrdset_rdlock(st);
+ for(rd_iter = st->dimensions, rd = NULL, min_time = (usec_t)-1 ; rd_iter ; rd_iter = rd_iter->next) {
+ /*
+ * Choose oldest dimension as reference. This is not equivalent to the union of all dimensions
+ * but it is a best effort approximation with a bias towards older metrics in a chart. It
+ * matches netdata behaviour in the sense that dimensions are generally aligned in a chart
+ * and older dimensions contain more information about the time range. It does not work well
+ * for metrics that have recently stopped being collected.
+ */
+ curr_time = pg_cache_oldest_time_in_range(ctx, rd_iter->state->rrdeng_uuid,
+ start_time * USEC_PER_SEC, end_time * USEC_PER_SEC);
+ if (INVALID_TIME != curr_time && curr_time < min_time) {
+ rd = rd_iter;
+ min_time = curr_time;
+ }
+ }
+ rrdset_unlock(st);
+ if (NULL == rd) {
+ return 1;
+ }
+ pages_nr = pg_cache_preload(ctx, rd->state->rrdeng_uuid, start_time * USEC_PER_SEC, end_time * USEC_PER_SEC,
+ &page_info_array, &page_index);
+ if (pages_nr) {
+ /* conservative allocation, will reduce the size later if necessary */
+ region_info_array = mallocz(sizeof(*region_info_array) * pages_nr);
+ }
+ is_first_region_initialized = 0;
+ region_points = 0;
+
+ /* pages loop */
+ for (i = 0, curr = NULL, prev = NULL ; i < pages_nr ; ++i) {
+ old_prev = prev;
+ prev = curr;
+ curr = &page_info_array[i];
+ *pginfo_to_points(curr) = 0; /* initialize to invalid page */
+ *pginfo_to_dt(curr) = 0; /* no known data collection interval yet */
+ if (unlikely(INVALID_TIME == curr->start_time || INVALID_TIME == curr->end_time)) {
+ info("Ignoring page with invalid timestamp.");
+ prev = old_prev;
+ continue;
+ }
+ page_entries = curr->page_length / sizeof(storage_number);
+ assert(0 != page_entries);
+ if (likely(1 != page_entries)) {
+ dt = (curr->end_time - curr->start_time) / (page_entries - 1);
+ *pginfo_to_dt(curr) = ROUND_USEC_TO_SEC(dt);
+ if (unlikely(0 == *pginfo_to_dt(curr)))
+ *pginfo_to_dt(curr) = 1;
+ } else {
+ dt = 0;
+ }
+ for (j = 0, page_points = 0 ; j < page_entries ; ++j) {
+ uint8_t is_metric_out_of_order, is_metric_earlier_than_range;
+
+ is_metric_earlier_than_range = 0;
+ is_metric_out_of_order = 0;
+
+ current_position_time = curr->start_time + j * dt;
+ now = current_position_time / USEC_PER_SEC;
+ if (now > end_time) { /* there will be no more pages in the time range */
+ break;
+ }
+ if (now < start_time)
+ is_metric_earlier_than_range = 1;
+ if (unlikely(current_position_time < max_time)) /* just went back in time */
+ is_metric_out_of_order = 1;
+ if (is_metric_earlier_than_range || unlikely(is_metric_out_of_order)) {
+ if (unlikely(is_metric_out_of_order))
+ info("Ignoring metric with out of order timestamp.");
+ continue; /* next entry */
+ }
+ /* here is a valid metric */
+ ++page_points;
+ region_info_array[regions - 1].points = ++region_points;
+ max_time = current_position_time;
+ if (1 == page_points)
+ first_valid_time_in_page = current_position_time;
+ if (unlikely(!is_first_region_initialized)) {
+ assert(1 == regions);
+ /* this is the first region */
+ region_info_array[0].start_time = current_position_time;
+ is_first_region_initialized = 1;
+ }
+ }
+ *pginfo_to_points(curr) = page_points;
+ if (0 == page_points) {
+ prev = old_prev;
+ continue;
+ }
+
+ if (unlikely(0 == dt)) { /* unknown data collection interval */
+ assert(1 == page_points);
+
+ if (likely(NULL != prev)) { /* get interval from previous page */
+ *pginfo_to_dt(curr) = *pginfo_to_dt(prev);
+ } else { /* there is no previous page in the query */
+ struct rrdeng_page_info db_page_info;
+
+ /* go to database */
+ pg_cache_get_filtered_info_prev(ctx, page_index, curr->start_time,
+ metrics_with_known_interval, &db_page_info);
+ if (unlikely(db_page_info.start_time == INVALID_TIME || db_page_info.end_time == INVALID_TIME ||
+ 0 == db_page_info.page_length)) { /* nothing in the database, default to update_every */
+ *pginfo_to_dt(curr) = rd->update_every;
+ } else {
+ unsigned db_entries;
+ usec_t db_dt;
+
+ db_entries = db_page_info.page_length / sizeof(storage_number);
+ db_dt = (db_page_info.end_time - db_page_info.start_time) / (db_entries - 1);
+ *pginfo_to_dt(curr) = ROUND_USEC_TO_SEC(db_dt);
+ if (unlikely(0 == *pginfo_to_dt(curr)))
+ *pginfo_to_dt(curr) = 1;
+
+ }
+ }
+ }
+ if (likely(prev) && unlikely(*pginfo_to_dt(curr) != *pginfo_to_dt(prev))) {
+ info("Data collection interval change detected in query: %"PRIu32" -> %"PRIu32,
+ *pginfo_to_dt(prev), *pginfo_to_dt(curr));
+ region_info_array[regions++ - 1].points -= page_points;
+ region_info_array[regions - 1].points = region_points = page_points;
+ region_info_array[regions - 1].start_time = first_valid_time_in_page;
+ }
+ if (*pginfo_to_dt(curr) > max_interval)
+ max_interval = *pginfo_to_dt(curr);
+ region_info_array[regions - 1].update_every = *pginfo_to_dt(curr);
+ }
+ if (page_info_array)
+ freez(page_info_array);
+ if (region_info_array) {
+ if (likely(is_first_region_initialized)) {
+ /* free unnecessary memory */
+ region_info_array = reallocz(region_info_array, sizeof(*region_info_array) * regions);
+ *region_info_arrayp = region_info_array;
+ *max_intervalp = max_interval;
+ } else {
+ /* empty result */
+ freez(region_info_array);
+ }
+ }
+ return regions;
+}
+
/*
* Gets a handle for loading metrics from the database.
* The handle must be released with rrdeng_load_metric_final().
@@ -226,80 +428,108 @@ void rrdeng_load_metric_init(RRDDIM *rd, struct rrddim_query_handle *rrdimm_hand
{
struct rrdeng_query_handle *handle;
struct rrdengine_instance *ctx;
+ unsigned pages_nr;
ctx = rd->rrdset->rrdhost->rrdeng_ctx;
rrdimm_handle->start_time = start_time;
rrdimm_handle->end_time = end_time;
handle = &rrdimm_handle->rrdeng;
+ handle->next_page_time = start_time;
handle->now = start_time;
- handle->dt = rd->rrdset->update_every;
+ handle->position = 0;
handle->ctx = ctx;
handle->descr = NULL;
- handle->page_index = pg_cache_preload(ctx, rd->state->rrdeng_uuid,
- start_time * USEC_PER_SEC, end_time * USEC_PER_SEC);
+ pages_nr = pg_cache_preload(ctx, rd->state->rrdeng_uuid, start_time * USEC_PER_SEC, end_time * USEC_PER_SEC,
+ NULL, &handle->page_index);
+ if (unlikely(NULL == handle->page_index || 0 == pages_nr))
+ /* there are no metrics to load */
+ handle->next_page_time = INVALID_TIME;
}
-storage_number rrdeng_load_metric_next(struct rrddim_query_handle *rrdimm_handle)
+/* Returns the metric and sets its timestamp into current_time */
+storage_number rrdeng_load_metric_next(struct rrddim_query_handle *rrdimm_handle, time_t *current_time)
{
struct rrdeng_query_handle *handle;
struct rrdengine_instance *ctx;
struct rrdeng_page_descr *descr;
storage_number *page, ret;
- unsigned position;
- usec_t point_in_time;
+ unsigned position, entries;
+ usec_t next_page_time, current_position_time;
handle = &rrdimm_handle->rrdeng;
- if (unlikely(INVALID_TIME == handle->now)) {
+ if (unlikely(INVALID_TIME == handle->next_page_time)) {
return SN_EMPTY_SLOT;
}
ctx = handle->ctx;
- point_in_time = handle->now * USEC_PER_SEC;
- descr = handle->descr;
-
- if (unlikely(NULL == handle->page_index)) {
- ret = SN_EMPTY_SLOT;
- goto out;
+ if (unlikely(NULL == (descr = handle->descr))) {
+ /* it's the first call */
+ next_page_time = handle->next_page_time * USEC_PER_SEC;
}
+ position = handle->position + 1;
+
if (unlikely(NULL == descr ||
- point_in_time < descr->start_time ||
- point_in_time > descr->end_time)) {
+ position >= (descr->page_length / sizeof(storage_number)))) {
+ /* We need to get a new page */
if (descr) {
+ /* Drop old page's reference */
+ handle->next_page_time = (descr->end_time / USEC_PER_SEC) + 1;
+ if (unlikely(handle->next_page_time > rrdimm_handle->end_time)) {
+ goto no_more_metrics;
+ }
+ next_page_time = handle->next_page_time * USEC_PER_SEC;
#ifdef NETDATA_INTERNAL_CHECKS
rrd_stat_atomic_add(&ctx->stats.metric_API_consumers, -1);
#endif
pg_cache_put(ctx, descr);
handle->descr = NULL;
}
- descr = pg_cache_lookup(ctx, handle->page_index, &handle->page_index->id, point_in_time);
+ descr = pg_cache_lookup_next(ctx, handle->page_index, &handle->page_index->id,
+ next_page_time, rrdimm_handle->end_time * USEC_PER_SEC);
if (NULL == descr) {
- ret = SN_EMPTY_SLOT;
- goto out;
+ goto no_more_metrics;
}
#ifdef NETDATA_INTERNAL_CHECKS
rrd_stat_atomic_add(&ctx->stats.metric_API_consumers, 1);
#endif
handle->descr = descr;
- }
- if (unlikely(INVALID_TIME == descr->start_time ||
- INVALID_TIME == descr->end_time)) {
- ret = SN_EMPTY_SLOT;
- goto out;
+ if (unlikely(INVALID_TIME == descr->start_time ||
+ INVALID_TIME == descr->end_time)) {
+ goto no_more_metrics;
+ }
+ if (unlikely(descr->start_time != descr->end_time && next_page_time > descr->start_time)) {
+ /* we're in the middle of the page somewhere */
+ entries = descr->page_length / sizeof(storage_number);
+ position = ((uint64_t)(next_page_time - descr->start_time)) * entries /
+ (descr->end_time - descr->start_time + 1);
+ } else {
+ position = 0;
+ }
}
page = descr->pg_cache_descr->page;
- if (unlikely(descr->start_time == descr->end_time)) {
- ret = page[0];
- goto out;
- }
- position = ((uint64_t)(point_in_time - descr->start_time)) * (descr->page_length / sizeof(storage_number)) /
- (descr->end_time - descr->start_time + 1);
ret = page[position];
+ entries = descr->page_length / sizeof(storage_number);
+ if (entries > 1) {
+ usec_t dt;
-out:
- handle->now += handle->dt;
- if (unlikely(handle->now > rrdimm_handle->end_time)) {
- handle->now = INVALID_TIME;
+ dt = (descr->end_time - descr->start_time) / (entries - 1);
+ current_position_time = descr->start_time + position * dt;
+ } else {
+ current_position_time = descr->start_time;
}
+ handle->position = position;
+ handle->now = current_position_time / USEC_PER_SEC;
+/* assert(handle->now >= rrdimm_handle->start_time && handle->now <= rrdimm_handle->end_time);
+ The above assertion is an approximation and needs to take update_every into account */
+ if (unlikely(handle->now >= rrdimm_handle->end_time)) {
+ /* next calls will not load any more metrics */
+ handle->next_page_time = INVALID_TIME;
+ }
+ *current_time = handle->now;
return ret;
+
+no_more_metrics:
+ handle->next_page_time = INVALID_TIME;
+ return SN_EMPTY_SLOT;
}
int rrdeng_load_metric_is_finished(struct rrddim_query_handle *rrdimm_handle)
@@ -307,7 +537,7 @@ int rrdeng_load_metric_is_finished(struct rrddim_query_handle *rrdimm_handle)
struct rrdeng_query_handle *handle;
handle = &rrdimm_handle->rrdeng;
- return (INVALID_TIME == handle->now);
+ return (INVALID_TIME == handle->next_page_time);
}
/*
diff --git a/database/engine/rrdengineapi.h b/database/engine/rrdengineapi.h
index e52aabcb..9b1ab187 100644
--- a/database/engine/rrdengineapi.h
+++ b/database/engine/rrdengineapi.h
@@ -15,6 +15,12 @@
extern int default_rrdeng_page_cache_mb;
extern int default_rrdeng_disk_quota_mb;
+struct rrdeng_region_info {
+ time_t start_time;
+ int update_every;
+ unsigned points;
+};
+
extern void *rrdeng_create_page(struct rrdengine_instance *ctx, uuid_t *id, struct rrdeng_page_descr **ret_descr);
extern void rrdeng_commit_page(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr,
Word_t page_correlation_id);
@@ -25,9 +31,12 @@ extern void rrdeng_store_metric_init(RRDDIM *rd);
extern void rrdeng_store_metric_flush_current_page(RRDDIM *rd);
extern void rrdeng_store_metric_next(RRDDIM *rd, usec_t point_in_time, storage_number number);
extern void rrdeng_store_metric_finalize(RRDDIM *rd);
+extern unsigned
+ rrdeng_variable_step_boundaries(RRDSET *st, time_t start_time, time_t end_time,
+ struct rrdeng_region_info **region_info_arrayp, unsigned *max_intervalp);
extern void rrdeng_load_metric_init(RRDDIM *rd, struct rrddim_query_handle *rrdimm_handle,
time_t start_time, time_t end_time);
-extern storage_number rrdeng_load_metric_next(struct rrddim_query_handle *rrdimm_handle);
+extern storage_number rrdeng_load_metric_next(struct rrddim_query_handle *rrdimm_handle, time_t *current_time);
extern int rrdeng_load_metric_is_finished(struct rrddim_query_handle *rrdimm_handle);
extern void rrdeng_load_metric_finalize(struct rrddim_query_handle *rrdimm_handle);
extern time_t rrdeng_metric_latest_time(RRDDIM *rd);
diff --git a/database/engine/rrdenginelib.h b/database/engine/rrdenginelib.h
index 36d414e8..5685b65a 100644
--- a/database/engine/rrdenginelib.h
+++ b/database/engine/rrdenginelib.h
@@ -23,6 +23,8 @@ struct rrdeng_page_descr;
#define ALIGN_BYTES_FLOOR(x) (((x) / RRDENG_BLOCK_SIZE) * RRDENG_BLOCK_SIZE)
#define ALIGN_BYTES_CEILING(x) ((((x) + RRDENG_BLOCK_SIZE - 1) / RRDENG_BLOCK_SIZE) * RRDENG_BLOCK_SIZE)
+#define ROUND_USEC_TO_SEC(x) (((x) + USEC_PER_SEC / 2 - 1) / USEC_PER_SEC)
+
typedef uintptr_t rrdeng_stats_t;
#ifdef __ATOMIC_RELAXED
diff --git a/database/rrd.h b/database/rrd.h
index 5b09c2dd..39e88125 100644
--- a/database/rrd.h
+++ b/database/rrd.h
@@ -273,8 +273,9 @@ struct rrddim_query_handle {
struct rrdeng_page_descr *descr;
struct rrdengine_instance *ctx;
struct pg_cache_page_index *page_index;
- time_t now; //TODO: remove now to implement next point iteration
- time_t dt; //TODO: remove dt to implement next point iteration
+ time_t next_page_time;
+ time_t now;
+ unsigned position;
} rrdeng; // state the database engine uses
#endif
};
@@ -307,7 +308,7 @@ struct rrddim_volatile {
void (*init)(RRDDIM *rd, struct rrddim_query_handle *handle, time_t start_time, time_t end_time);
// run this to load each metric number from the database
- storage_number (*next_metric)(struct rrddim_query_handle *handle);
+ storage_number (*next_metric)(struct rrddim_query_handle *handle, time_t *current_time);
// run this to test if the series of next_metric() database queries is finished
int (*is_finished)(struct rrddim_query_handle *handle);
diff --git a/database/rrddim.c b/database/rrddim.c
index 09f364b0..019ca34a 100644
--- a/database/rrddim.c
+++ b/database/rrddim.c
@@ -118,11 +118,12 @@ static void rrddim_query_init(RRDDIM *rd, struct rrddim_query_handle *handle, ti
handle->slotted.finished = 0;
}
-static storage_number rrddim_query_next_metric(struct rrddim_query_handle *handle) {
+static storage_number rrddim_query_next_metric(struct rrddim_query_handle *handle, time_t *current_time) {
RRDDIM *rd = handle->rd;
long entries = rd->rrdset->entries;
long slot = handle->slotted.slot;
+ (void)current_time;
if (unlikely(handle->slotted.slot == handle->slotted.last_slot))
handle->slotted.finished = 1;
storage_number n = rd->values[slot++];
diff --git a/database/rrdvar.c b/database/rrdvar.c
index 95ab6859..0858d0bd 100644
--- a/database/rrdvar.c
+++ b/database/rrdvar.c
@@ -248,6 +248,7 @@ int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, cal
struct variable2json_helper {
BUFFER *buf;
size_t counter;
+ RRDVAR_OPTIONS options;
};
static int single_variable2json(void *entry, void *data) {
@@ -255,22 +256,37 @@ static int single_variable2json(void *entry, void *data) {
RRDVAR *rv = (RRDVAR *)entry;
calculated_number value = rrdvar2number(rv);
- if(unlikely(isnan(value) || isinf(value)))
- buffer_sprintf(helper->buf, "%s\n\t\t\"%s\": null", helper->counter?",":"", rv->name);
- else
- buffer_sprintf(helper->buf, "%s\n\t\t\"%s\": %0.5" LONG_DOUBLE_MODIFIER, helper->counter?",":"", rv->name, (LONG_DOUBLE)value);
+ if (helper->options == RRDVAR_OPTION_DEFAULT || rv->options & helper->options) {
+ if(unlikely(isnan(value) || isinf(value)))
+ buffer_sprintf(helper->buf, "%s\n\t\t\"%s\": null", helper->counter?",":"", rv->name);
+ else
+ buffer_sprintf(helper->buf, "%s\n\t\t\"%s\": %0.5" LONG_DOUBLE_MODIFIER, helper->counter?",":"", rv->name, (LONG_DOUBLE)value);
- helper->counter++;
+ helper->counter++;
+ }
return 0;
}
+void health_api_v1_chart_custom_variables2json(RRDSET *st, BUFFER *buf) {
+ struct variable2json_helper helper = {
+ .buf = buf,
+ .counter = 0,
+ .options = RRDVAR_OPTION_CUSTOM_CHART_VAR
+ };
+
+ buffer_sprintf(buf, "{");
+ avl_traverse_lock(&st->rrdvar_root_index, single_variable2json, (void *)&helper);
+ buffer_strcat(buf, "\n\t\t\t}");
+}
+
void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *buf) {
RRDHOST *host = st->rrdhost;
struct variable2json_helper helper = {
.buf = buf,
- .counter = 0
+ .counter = 0,
+ .options = RRDVAR_OPTION_DEFAULT
};
buffer_sprintf(buf, "{\n\t\"chart\": \"%s\",\n\t\"chart_name\": \"%s\",\n\t\"chart_context\": \"%s\",\n\t\"chart_variables\": {", st->id, st->name, st->context);
diff --git a/depcomp b/depcomp
new file mode 100755
index 00000000..b39f98f9
--- /dev/null
+++ b/depcomp
@@ -0,0 +1,791 @@
+#! /bin/sh
+# depcomp - compile a program generating dependencies as side-effects
+
+scriptversion=2016-01-11.22; # UTC
+
+# Copyright (C) 1999-2017 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Originally written by Alexandre Oliva <oliva@dcc.unicamp.br>.
+
+case $1 in
+ '')
+ echo "$0: No command. Try '$0 --help' for more information." 1>&2
+ exit 1;
+ ;;
+ -h | --h*)
+ cat <<\EOF
+Usage: depcomp [--help] [--version] PROGRAM [ARGS]
+
+Run PROGRAMS ARGS to compile a file, generating dependencies
+as side-effects.
+
+Environment variables:
+ depmode Dependency tracking mode.
+ source Source file read by 'PROGRAMS ARGS'.
+ object Object file output by 'PROGRAMS ARGS'.
+ DEPDIR directory where to store dependencies.
+ depfile Dependency file to output.
+ tmpdepfile Temporary file to use when outputting dependencies.
+ libtool Whether libtool is used (yes/no).
+
+Report bugs to <bug-automake@gnu.org>.
+EOF
+ exit $?
+ ;;
+ -v | --v*)
+ echo "depcomp $scriptversion"
+ exit $?
+ ;;
+esac
+
+# Get the directory component of the given path, and save it in the
+# global variables '$dir'. Note that this directory component will
+# be either empty or ending with a '/' character. This is deliberate.
+set_dir_from ()
+{
+ case $1 in
+ */*) dir=`echo "$1" | sed -e 's|/[^/]*$|/|'`;;
+ *) dir=;;
+ esac
+}
+
+# Get the suffix-stripped basename of the given path, and save it the
+# global variable '$base'.
+set_base_from ()
+{
+ base=`echo "$1" | sed -e 's|^.*/||' -e 's/\.[^.]*$//'`
+}
+
+# If no dependency file was actually created by the compiler invocation,
+# we still have to create a dummy depfile, to avoid errors with the
+# Makefile "include basename.Plo" scheme.
+make_dummy_depfile ()
+{
+ echo "#dummy" > "$depfile"
+}
+
+# Factor out some common post-processing of the generated depfile.
+# Requires the auxiliary global variable '$tmpdepfile' to be set.
+aix_post_process_depfile ()
+{
+ # If the compiler actually managed to produce a dependency file,
+ # post-process it.
+ if test -f "$tmpdepfile"; then
+ # Each line is of the form 'foo.o: dependency.h'.
+ # Do two passes, one to just change these to
+ # $object: dependency.h
+ # and one to simply output
+ # dependency.h:
+ # which is needed to avoid the deleted-header problem.
+ { sed -e "s,^.*\.[$lower]*:,$object:," < "$tmpdepfile"
+ sed -e "s,^.*\.[$lower]*:[$tab ]*,," -e 's,$,:,' < "$tmpdepfile"
+ } > "$depfile"
+ rm -f "$tmpdepfile"
+ else
+ make_dummy_depfile
+ fi
+}
+
+# A tabulation character.
+tab=' '
+# A newline character.
+nl='
+'
+# Character ranges might be problematic outside the C locale.
+# These definitions help.
+upper=ABCDEFGHIJKLMNOPQRSTUVWXYZ
+lower=abcdefghijklmnopqrstuvwxyz
+digits=0123456789
+alpha=${upper}${lower}
+
+if test -z "$depmode" || test -z "$source" || test -z "$object"; then
+ echo "depcomp: Variables source, object and depmode must be set" 1>&2
+ exit 1
+fi
+
+# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po.
+depfile=${depfile-`echo "$object" |
+ sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`}
+tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`}
+
+rm -f "$tmpdepfile"
+
+# Avoid interferences from the environment.
+gccflag= dashmflag=
+
+# Some modes work just like other modes, but use different flags. We
+# parameterize here, but still list the modes in the big case below,
+# to make depend.m4 easier to write. Note that we *cannot* use a case
+# here, because this file can only contain one case statement.
+if test "$depmode" = hp; then
+ # HP compiler uses -M and no extra arg.
+ gccflag=-M
+ depmode=gcc
+fi
+
+if test "$depmode" = dashXmstdout; then
+ # This is just like dashmstdout with a different argument.
+ dashmflag=-xM
+ depmode=dashmstdout
+fi
+
+cygpath_u="cygpath -u -f -"
+if test "$depmode" = msvcmsys; then
+ # This is just like msvisualcpp but w/o cygpath translation.
+ # Just convert the backslash-escaped backslashes to single forward
+ # slashes to satisfy depend.m4
+ cygpath_u='sed s,\\\\,/,g'
+ depmode=msvisualcpp
+fi
+
+if test "$depmode" = msvc7msys; then
+ # This is just like msvc7 but w/o cygpath translation.
+ # Just convert the backslash-escaped backslashes to single forward
+ # slashes to satisfy depend.m4
+ cygpath_u='sed s,\\\\,/,g'
+ depmode=msvc7
+fi
+
+if test "$depmode" = xlc; then
+ # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency information.
+ gccflag=-qmakedep=gcc,-MF
+ depmode=gcc
+fi
+
+case "$depmode" in
+gcc3)
+## gcc 3 implements dependency tracking that does exactly what
+## we want. Yay! Note: for some reason libtool 1.4 doesn't like
+## it if -MD -MP comes after the -MF stuff. Hmm.
+## Unfortunately, FreeBSD c89 acceptance of flags depends upon
+## the command line argument order; so add the flags where they
+## appear in depend2.am. Note that the slowdown incurred here
+## affects only configure: in makefiles, %FASTDEP% shortcuts this.
+ for arg
+ do
+ case $arg in
+ -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;;
+ *) set fnord "$@" "$arg" ;;
+ esac
+ shift # fnord
+ shift # $arg
+ done
+ "$@"
+ stat=$?
+ if test $stat -ne 0; then
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ mv "$tmpdepfile" "$depfile"
+ ;;
+
+gcc)
+## Note that this doesn't just cater to obsosete pre-3.x GCC compilers.
+## but also to in-use compilers like IMB xlc/xlC and the HP C compiler.
+## (see the conditional assignment to $gccflag above).
+## There are various ways to get dependency output from gcc. Here's
+## why we pick this rather obscure method:
+## - Don't want to use -MD because we'd like the dependencies to end
+## up in a subdir. Having to rename by hand is ugly.
+## (We might end up doing this anyway to support other compilers.)
+## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like
+## -MM, not -M (despite what the docs say). Also, it might not be
+## supported by the other compilers which use the 'gcc' depmode.
+## - Using -M directly means running the compiler twice (even worse
+## than renaming).
+ if test -z "$gccflag"; then
+ gccflag=-MD,
+ fi
+ "$@" -Wp,"$gccflag$tmpdepfile"
+ stat=$?
+ if test $stat -ne 0; then
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ # The second -e expression handles DOS-style file names with drive
+ # letters.
+ sed -e 's/^[^:]*: / /' \
+ -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile"
+## This next piece of magic avoids the "deleted header file" problem.
+## The problem is that when a header file which appears in a .P file
+## is deleted, the dependency causes make to die (because there is
+## typically no way to rebuild the header). We avoid this by adding
+## dummy dependencies for each header file. Too bad gcc doesn't do
+## this for us directly.
+## Some versions of gcc put a space before the ':'. On the theory
+## that the space means something, we add a space to the output as
+## well. hp depmode also adds that space, but also prefixes the VPATH
+## to the object. Take care to not repeat it in the output.
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly. Breaking it into two sed invocations is a workaround.
+ tr ' ' "$nl" < "$tmpdepfile" \
+ | sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \
+ | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+hp)
+ # This case exists only to let depend.m4 do its work. It works by
+ # looking at the text of this script. This case will never be run,
+ # since it is checked for above.
+ exit 1
+ ;;
+
+sgi)
+ if test "$libtool" = yes; then
+ "$@" "-Wp,-MDupdate,$tmpdepfile"
+ else
+ "$@" -MDupdate "$tmpdepfile"
+ fi
+ stat=$?
+ if test $stat -ne 0; then
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+
+ if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files
+ echo "$object : \\" > "$depfile"
+ # Clip off the initial element (the dependent). Don't try to be
+ # clever and replace this with sed code, as IRIX sed won't handle
+ # lines with more than a fixed number of characters (4096 in
+ # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines;
+ # the IRIX cc adds comments like '#:fec' to the end of the
+ # dependency line.
+ tr ' ' "$nl" < "$tmpdepfile" \
+ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' \
+ | tr "$nl" ' ' >> "$depfile"
+ echo >> "$depfile"
+ # The second pass generates a dummy entry for each header file.
+ tr ' ' "$nl" < "$tmpdepfile" \
+ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \
+ >> "$depfile"
+ else
+ make_dummy_depfile
+ fi
+ rm -f "$tmpdepfile"
+ ;;
+
+xlc)
+ # This case exists only to let depend.m4 do its work. It works by
+ # looking at the text of this script. This case will never be run,
+ # since it is checked for above.
+ exit 1
+ ;;
+
+aix)
+ # The C for AIX Compiler uses -M and outputs the dependencies
+ # in a .u file. In older versions, this file always lives in the
+ # current directory. Also, the AIX compiler puts '$object:' at the
+ # start of each line; $object doesn't have directory information.
+ # Version 6 uses the directory in both cases.
+ set_dir_from "$object"
+ set_base_from "$object"
+ if test "$libtool" = yes; then
+ tmpdepfile1=$dir$base.u
+ tmpdepfile2=$base.u
+ tmpdepfile3=$dir.libs/$base.u
+ "$@" -Wc,-M
+ else
+ tmpdepfile1=$dir$base.u
+ tmpdepfile2=$dir$base.u
+ tmpdepfile3=$dir$base.u
+ "$@" -M
+ fi
+ stat=$?
+ if test $stat -ne 0; then
+ rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+ exit $stat
+ fi
+
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+ do
+ test -f "$tmpdepfile" && break
+ done
+ aix_post_process_depfile
+ ;;
+
+tcc)
+ # tcc (Tiny C Compiler) understand '-MD -MF file' since version 0.9.26
+ # FIXME: That version still under development at the moment of writing.
+ # Make that this statement remains true also for stable, released
+ # versions.
+ # It will wrap lines (doesn't matter whether long or short) with a
+ # trailing '\', as in:
+ #
+ # foo.o : \
+ # foo.c \
+ # foo.h \
+ #
+ # It will put a trailing '\' even on the last line, and will use leading
+ # spaces rather than leading tabs (at least since its commit 0394caf7
+ # "Emit spaces for -MD").
+ "$@" -MD -MF "$tmpdepfile"
+ stat=$?
+ if test $stat -ne 0; then
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+ # Each non-empty line is of the form 'foo.o : \' or ' dep.h \'.
+ # We have to change lines of the first kind to '$object: \'.
+ sed -e "s|.*:|$object :|" < "$tmpdepfile" > "$depfile"
+ # And for each line of the second kind, we have to emit a 'dep.h:'
+ # dummy dependency, to avoid the deleted-header problem.
+ sed -n -e 's|^ *\(.*\) *\\$|\1:|p' < "$tmpdepfile" >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+## The order of this option in the case statement is important, since the
+## shell code in configure will try each of these formats in the order
+## listed in this file. A plain '-MD' option would be understood by many
+## compilers, so we must ensure this comes after the gcc and icc options.
+pgcc)
+ # Portland's C compiler understands '-MD'.
+ # Will always output deps to 'file.d' where file is the root name of the
+ # source file under compilation, even if file resides in a subdirectory.
+ # The object file name does not affect the name of the '.d' file.
+ # pgcc 10.2 will output
+ # foo.o: sub/foo.c sub/foo.h
+ # and will wrap long lines using '\' :
+ # foo.o: sub/foo.c ... \
+ # sub/foo.h ... \
+ # ...
+ set_dir_from "$object"
+ # Use the source, not the object, to determine the base name, since
+ # that's sadly what pgcc will do too.
+ set_base_from "$source"
+ tmpdepfile=$base.d
+
+ # For projects that build the same source file twice into different object
+ # files, the pgcc approach of using the *source* file root name can cause
+ # problems in parallel builds. Use a locking strategy to avoid stomping on
+ # the same $tmpdepfile.
+ lockdir=$base.d-lock
+ trap "
+ echo '$0: caught signal, cleaning up...' >&2
+ rmdir '$lockdir'
+ exit 1
+ " 1 2 13 15
+ numtries=100
+ i=$numtries
+ while test $i -gt 0; do
+ # mkdir is a portable test-and-set.
+ if mkdir "$lockdir" 2>/dev/null; then
+ # This process acquired the lock.
+ "$@" -MD
+ stat=$?
+ # Release the lock.
+ rmdir "$lockdir"
+ break
+ else
+ # If the lock is being held by a different process, wait
+ # until the winning process is done or we timeout.
+ while test -d "$lockdir" && test $i -gt 0; do
+ sleep 1
+ i=`expr $i - 1`
+ done
+ fi
+ i=`expr $i - 1`
+ done
+ trap - 1 2 13 15
+ if test $i -le 0; then
+ echo "$0: failed to acquire lock after $numtries attempts" >&2
+ echo "$0: check lockdir '$lockdir'" >&2
+ exit 1
+ fi
+
+ if test $stat -ne 0; then
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+ # Each line is of the form `foo.o: dependent.h',
+ # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'.
+ # Do two passes, one to just change these to
+ # `$object: dependent.h' and one to simply `dependent.h:'.
+ sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile"
+ # Some versions of the HPUX 10.20 sed can't process this invocation
+ # correctly. Breaking it into two sed invocations is a workaround.
+ sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" \
+ | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+hp2)
+ # The "hp" stanza above does not work with aCC (C++) and HP's ia64
+ # compilers, which have integrated preprocessors. The correct option
+ # to use with these is +Maked; it writes dependencies to a file named
+ # 'foo.d', which lands next to the object file, wherever that
+ # happens to be.
+ # Much of this is similar to the tru64 case; see comments there.
+ set_dir_from "$object"
+ set_base_from "$object"
+ if test "$libtool" = yes; then
+ tmpdepfile1=$dir$base.d
+ tmpdepfile2=$dir.libs/$base.d
+ "$@" -Wc,+Maked
+ else
+ tmpdepfile1=$dir$base.d
+ tmpdepfile2=$dir$base.d
+ "$@" +Maked
+ fi
+ stat=$?
+ if test $stat -ne 0; then
+ rm -f "$tmpdepfile1" "$tmpdepfile2"
+ exit $stat
+ fi
+
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2"
+ do
+ test -f "$tmpdepfile" && break
+ done
+ if test -f "$tmpdepfile"; then
+ sed -e "s,^.*\.[$lower]*:,$object:," "$tmpdepfile" > "$depfile"
+ # Add 'dependent.h:' lines.
+ sed -ne '2,${
+ s/^ *//
+ s/ \\*$//
+ s/$/:/
+ p
+ }' "$tmpdepfile" >> "$depfile"
+ else
+ make_dummy_depfile
+ fi
+ rm -f "$tmpdepfile" "$tmpdepfile2"
+ ;;
+
+tru64)
+ # The Tru64 compiler uses -MD to generate dependencies as a side
+ # effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'.
+ # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put
+ # dependencies in 'foo.d' instead, so we check for that too.
+ # Subdirectories are respected.
+ set_dir_from "$object"
+ set_base_from "$object"
+
+ if test "$libtool" = yes; then
+ # Libtool generates 2 separate objects for the 2 libraries. These
+ # two compilations output dependencies in $dir.libs/$base.o.d and
+ # in $dir$base.o.d. We have to check for both files, because
+ # one of the two compilations can be disabled. We should prefer
+ # $dir$base.o.d over $dir.libs/$base.o.d because the latter is
+ # automatically cleaned when .libs/ is deleted, while ignoring
+ # the former would cause a distcleancheck panic.
+ tmpdepfile1=$dir$base.o.d # libtool 1.5
+ tmpdepfile2=$dir.libs/$base.o.d # Likewise.
+ tmpdepfile3=$dir.libs/$base.d # Compaq CCC V6.2-504
+ "$@" -Wc,-MD
+ else
+ tmpdepfile1=$dir$base.d
+ tmpdepfile2=$dir$base.d
+ tmpdepfile3=$dir$base.d
+ "$@" -MD
+ fi
+
+ stat=$?
+ if test $stat -ne 0; then
+ rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+ exit $stat
+ fi
+
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+ do
+ test -f "$tmpdepfile" && break
+ done
+ # Same post-processing that is required for AIX mode.
+ aix_post_process_depfile
+ ;;
+
+msvc7)
+ if test "$libtool" = yes; then
+ showIncludes=-Wc,-showIncludes
+ else
+ showIncludes=-showIncludes
+ fi
+ "$@" $showIncludes > "$tmpdepfile"
+ stat=$?
+ grep -v '^Note: including file: ' "$tmpdepfile"
+ if test $stat -ne 0; then
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ # The first sed program below extracts the file names and escapes
+ # backslashes for cygpath. The second sed program outputs the file
+ # name when reading, but also accumulates all include files in the
+ # hold buffer in order to output them again at the end. This only
+ # works with sed implementations that can handle large buffers.
+ sed < "$tmpdepfile" -n '
+/^Note: including file: *\(.*\)/ {
+ s//\1/
+ s/\\/\\\\/g
+ p
+}' | $cygpath_u | sort -u | sed -n '
+s/ /\\ /g
+s/\(.*\)/'"$tab"'\1 \\/p
+s/.\(.*\) \\/\1:/
+H
+$ {
+ s/.*/'"$tab"'/
+ G
+ p
+}' >> "$depfile"
+ echo >> "$depfile" # make sure the fragment doesn't end with a backslash
+ rm -f "$tmpdepfile"
+ ;;
+
+msvc7msys)
+ # This case exists only to let depend.m4 do its work. It works by
+ # looking at the text of this script. This case will never be run,
+ # since it is checked for above.
+ exit 1
+ ;;
+
+#nosideeffect)
+ # This comment above is used by automake to tell side-effect
+ # dependency tracking mechanisms from slower ones.
+
+dashmstdout)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout, regardless of -o.
+ "$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test "X$1" != 'X--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
+ # Remove '-o $object'.
+ IFS=" "
+ for arg
+ do
+ case $arg in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift # fnord
+ shift # $arg
+ ;;
+ esac
+ done
+
+ test -z "$dashmflag" && dashmflag=-M
+ # Require at least two characters before searching for ':'
+ # in the target name. This is to cope with DOS-style filenames:
+ # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise.
+ "$@" $dashmflag |
+ sed "s|^[$tab ]*[^:$tab ][^:][^:]*:[$tab ]*|$object: |" > "$tmpdepfile"
+ rm -f "$depfile"
+ cat < "$tmpdepfile" > "$depfile"
+ # Some versions of the HPUX 10.20 sed can't process this sed invocation
+ # correctly. Breaking it into two sed invocations is a workaround.
+ tr ' ' "$nl" < "$tmpdepfile" \
+ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \
+ | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+dashXmstdout)
+ # This case only exists to satisfy depend.m4. It is never actually
+ # run, as this mode is specially recognized in the preamble.
+ exit 1
+ ;;
+
+makedepend)
+ "$@" || exit $?
+ # Remove any Libtool call
+ if test "$libtool" = yes; then
+ while test "X$1" != 'X--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+ # X makedepend
+ shift
+ cleared=no eat=no
+ for arg
+ do
+ case $cleared in
+ no)
+ set ""; shift
+ cleared=yes ;;
+ esac
+ if test $eat = yes; then
+ eat=no
+ continue
+ fi
+ case "$arg" in
+ -D*|-I*)
+ set fnord "$@" "$arg"; shift ;;
+ # Strip any option that makedepend may not understand. Remove
+ # the object too, otherwise makedepend will parse it as a source file.
+ -arch)
+ eat=yes ;;
+ -*|$object)
+ ;;
+ *)
+ set fnord "$@" "$arg"; shift ;;
+ esac
+ done
+ obj_suffix=`echo "$object" | sed 's/^.*\././'`
+ touch "$tmpdepfile"
+ ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@"
+ rm -f "$depfile"
+ # makedepend may prepend the VPATH from the source file name to the object.
+ # No need to regex-escape $object, excess matching of '.' is harmless.
+ sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile"
+ # Some versions of the HPUX 10.20 sed can't process the last invocation
+ # correctly. Breaking it into two sed invocations is a workaround.
+ sed '1,2d' "$tmpdepfile" \
+ | tr ' ' "$nl" \
+ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \
+ | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile" "$tmpdepfile".bak
+ ;;
+
+cpp)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout.
+ "$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test "X$1" != 'X--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
+ # Remove '-o $object'.
+ IFS=" "
+ for arg
+ do
+ case $arg in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift # fnord
+ shift # $arg
+ ;;
+ esac
+ done
+
+ "$@" -E \
+ | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
+ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
+ | sed '$ s: \\$::' > "$tmpdepfile"
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ cat < "$tmpdepfile" >> "$depfile"
+ sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+msvisualcpp)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout.
+ "$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test "X$1" != 'X--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
+ IFS=" "
+ for arg
+ do
+ case "$arg" in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
+ "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI")
+ set fnord "$@"
+ shift
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift
+ shift
+ ;;
+ esac
+ done
+ "$@" -E 2>/dev/null |
+ sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile"
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile"
+ echo "$tab" >> "$depfile"
+ sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+msvcmsys)
+ # This case exists only to let depend.m4 do its work. It works by
+ # looking at the text of this script. This case will never be run,
+ # since it is checked for above.
+ exit 1
+ ;;
+
+none)
+ exec "$@"
+ ;;
+
+*)
+ echo "Unknown depmode $depmode" 1>&2
+ exit 1
+ ;;
+esac
+
+exit 0
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC0"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/diagrams/Makefile.in b/diagrams/Makefile.in
new file mode 100644
index 00000000..cd19e014
--- /dev/null
+++ b/diagrams/Makefile.in
@@ -0,0 +1,532 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = diagrams
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_SCRIPTS) \
+ $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SCRIPTS = $(dist_noinst_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ config.puml \
+ registry.puml \
+ netdata-for-ephemeral-nodes.xml \
+ netdata-proxies-example.xml \
+ netdata-overview.xml \
+ data_structures/netdata_config.svg \
+ data_structures/README.md \
+ data_structures/registry.svg \
+ data_structures/rrd.svg \
+ data_structures/web.svg \
+ data_structures/src/netdata_config.xml \
+ data_structures/src/registry.xml \
+ data_structures/src/rrd.xml \
+ data_structures/src/web.xml \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ build.sh \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu diagrams/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu diagrams/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/diagrams/data_structures/README.md b/diagrams/data_structures/README.md
index bb56ca16..378271d2 100644
--- a/diagrams/data_structures/README.md
+++ b/diagrams/data_structures/README.md
@@ -1,13 +1,13 @@
-# Data structures
-
-These are the main internal data structures of `netdata`. Created with `draw.io`.
-
-![Config](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/netdata_config.svg?sanitize=true)
-
-![Registry](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/registry.svg?sanitize=true)
-
-![RRD](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/rrd.svg?sanitize=true)
-
-![Web](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/web.svg?sanitize=true)
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdiagrams%2Fdata_structures%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+# Data structures
+
+These are the main internal data structures of `netdata`. Created with `draw.io`.
+
+![Config](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/netdata_config.svg?sanitize=true)
+
+![Registry](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/registry.svg?sanitize=true)
+
+![RRD](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/rrd.svg?sanitize=true)
+
+![Web](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/web.svg?sanitize=true)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdiagrams%2Fdata_structures%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Add-more-charts-to-netdata.md b/docs/Add-more-charts-to-netdata.md
index 285713b0..3b1bb962 100644
--- a/docs/Add-more-charts-to-netdata.md
+++ b/docs/Add-more-charts-to-netdata.md
@@ -4,36 +4,36 @@ Netdata collects system metrics by itself. It has many [internal plugins](../col
To collect non-system metrics, Netdata supports a plugin architecture. The following are the currently available external plugins:
-- **[Web Servers](#web-servers)**, such as apache, nginx, nginx_plus, tomcat, litespeed
-- **[Web Logs](#web-log-parsers)**, such as apache, nginx, lighttpd, gunicorn, squid access logs, apache cache.log
-- **[Load Balancers](#load-balancers)**, like haproxy
-- **[Message Brokers](#message-brokers)**, like rabbitmq, beanstalkd
-- **[Database Servers](#database-servers)**, such as mysql, mariadb, postgres, couchdb, mongodb, rethinkdb
-- **[Social Sharing Servers](#social-sharing-servers)**, like retroshare
-- **[Proxy Servers](#proxy-servers)**, like squid
-- **[HTTP accelerators](#http-accelerators)**, like varnish cache
-- **[Search engines](#search-engines)**, like elasticsearch
-- **[Name Servers](#name-servers)** (DNS), like bind, nsd, powerdns, dnsdist
-- **[DHCP Servers](#dhcp-servers)**, like ISC DHCP
-- **[UPS](#ups)**, such as APC UPS, NUT
-- **[RAID](#raid)**, such as MegaRAID
-- **[Mail Servers](#mail-servers)**, like postfix, exim, dovecot
-- **[File Servers](#file-servers)**, like samba, NFS, ftp, sftp, WebDAV
-- **[Print Servers](#print-servers)**, like CUPS
-- **[Hypervisors](#hypervisors)**, like XenServer, XCP-ng
-- **[System](#system)**, for processes and other system metrics
-- **[Sensors](#sensors)**, like temperature, fans speed, voltage, humidity, HDD/SSD S.M.A.R.T attributes
-- **[Network](#network)**, such as SNMP devices, `fping`, access points, dns_query_time, nfacct
-- **[Time Servers](#time-servers)**, like chrony
-- **[Security](#security)**, like FreeRADIUS, OpenVPN, Fail2ban
-- **[Telephony Servers](#telephony-servers)**, like openSIPS
-- **[Go applications](#go-applications)**
-- **[Household appliances](#household-appliances)**, like SMA WebBox (solar power), Fronius Symo solar power, Stiebel Eltron heating
-- **[Java Processes](#java-processes)**, via JMX or Spring Boot Actuator
-- **[Provisioning Systems](#provisioning-systems)**, like Puppet
-- **[Game Servers](#game-servers)**, like SpigotMC
-- **[Distributed Computing Clients](#distributed-computing-clients)**, like BOINC
-- **[Skeleton Plugins](#skeleton-plugins)**, for writing your own data collectors
+- **[Web Servers](#web-servers)**, such as apache, nginx, nginx_plus, tomcat, litespeed
+- **[Web Logs](#web-log-parsers)**, such as apache, nginx, lighttpd, gunicorn, squid access logs, apache cache.log
+- **[Load Balancers](#load-balancers)**, like haproxy
+- **[Message Brokers](#message-brokers)**, like rabbitmq, beanstalkd
+- **[Database Servers](#database-servers)**, such as mysql, mariadb, postgres, couchdb, mongodb, rethinkdb
+- **[Social Sharing Servers](#social-sharing-servers)**, like retroshare
+- **[Proxy Servers](#proxy-servers)**, like squid
+- **[HTTP accelerators](#http-accelerators)**, like varnish cache
+- **[Search engines](#search-engines)**, like elasticsearch
+- **[Name Servers](#name-servers)** (DNS), like bind, nsd, powerdns, dnsdist
+- **[DHCP Servers](#dhcp-servers)**, like ISC DHCP
+- **[UPS](#ups)**, such as APC UPS, NUT
+- **[RAID](#raid)**, such as MegaRAID
+- **[Mail Servers](#mail-servers)**, like postfix, exim, dovecot
+- **[File Servers](#file-servers)**, like samba, NFS, ftp, sftp, WebDAV
+- **[Print Servers](#print-servers)**, like CUPS
+- **[Hypervisors](#hypervisors)**, like XenServer, XCP-ng
+- **[System](#system)**, for processes and other system metrics
+- **[Sensors](#sensors)**, like temperature, fans speed, voltage, humidity, HDD/SSD S.M.A.R.T attributes
+- **[Network](#network)**, such as SNMP devices, `fping`, access points, dns_query_time, nfacct
+- **[Time Servers](#time-servers)**, like chrony
+- **[Security](#security)**, like FreeRADIUS, OpenVPN, Fail2ban
+- **[Telephony Servers](#telephony-servers)**, like openSIPS
+- **[Go applications](#go-applications)**
+- **[Household appliances](#household-appliances)**, like SMA WebBox (solar power), Fronius Symo solar power, Stiebel Eltron heating
+- **[Java Processes](#java-processes)**, via JMX or Spring Boot Actuator
+- **[Provisioning Systems](#provisioning-systems)**, like Puppet
+- **[Game Servers](#game-servers)**, like SpigotMC
+- **[Distributed Computing Clients](#distributed-computing-clients)**, like BOINC
+- **[Skeleton Plugins](#skeleton-plugins)**, for writing your own data collectors
Check also [Third Party Plugins](Third-Party-Plugins.md) for a list of plugins distributed by third parties.
@@ -41,8 +41,8 @@ Check also [Third Party Plugins](Third-Party-Plugins.md) for a list of plugins d
Netdata comes with **internal** and **external** plugins:
-1. The **internal** ones are written in `C` and run as threads within the Netdata daemon.
-2. The **external** ones can be written in any computer language. The Netdata daemon spawns these as processes (shown with `ps fax`) and reads their metrics using pipes (so the `stdout` of external plugins is connected to Netdata for metrics collection and the `stderr` of external plugins is connected to `/var/log/netdata/error.log`).
+1. The **internal** ones are written in `C` and run as threads within the Netdata daemon.
+2. The **external** ones can be written in any computer language. The Netdata daemon spawns these as processes (shown with `ps fax`) and reads their metrics using pipes (so the `stdout` of external plugins is connected to Netdata for metrics collection and the `stderr` of external plugins is connected to `/var/log/netdata/error.log`).
To make it easier to develop plugins, and minimize the number of threads and processes running, Netdata supports **plugin orchestrators**, each of them supporting one or more data collection **modules**. Currently we ship plugin orchestrators for 4 languages: `C`, `python`, `node.js` and `bash` and 2 more are under development (`go` and `java`).
@@ -107,27 +107,26 @@ This is a map of the all supported configuration options:
#### map of configuration files
-plugin | language | plugin<br/>configuration | modules<br/>configuration |
----:|:---:|:---:|:---|
-`apps.plugin`<br/>(external plugin for monitoring the process tree on Linux and FreeBSD)|`C`|`netdata.conf` section `[plugin:apps]`|Custom configuration for the processes to be monitored at `apps_groups.conf`
-`freebsd.plugin`<br/>(internal plugin for monitoring FreeBSD system resources)|`C`|`netdata.conf` section `[plugin:freebsd]`|one section for each module `[plugin:freebsd:MODULE]`. Each module may provide additional sections in the form of `[plugin:freebsd:MODULE:SUBSECTION]`.
-`cgroups.plugin`<br/>(internal plugin for monitoring Linux containers, VMs and systemd services)|`C`|`netdata.conf` section `[plugin:cgroups]`|N/A
-`charts.d.plugin`<br/>(external plugin orchestrator for BASH modules)|`BASH`|`charts.d.conf`|a file for each module in `/etc/netdata/charts.d/`
-`diskspace.plugin`<br/>(internal plugin for collecting Linux mount points usage)|`C`|`netdata.conf` section `[plugin:diskspace]`|N/A
-`fping.plugin`<br/>(external plugin for collecting network latencies)|`C`|`fping.conf`|This plugin is a wrapper for the `fping` command.
-`ioping.plugin`<br/>(external plugin for collecting disk latencies)|`C`|`ioping.conf`|This plugin is a wrapper for the `ioping` command.
-`freeipmi.plugin`<br/>(external plugin for collecting IPMI h/w sensors)|`C`|`netdata.conf` section `[plugin:freeipmi]`
-`nfacct.plugin`<br/>(external plugin for monitoring netfilter firewall and connection tracker)|`C`|`netdata.conf` section `[plugin:nfacct]`|N/A
-`xenstat.plugin`<br/>(external plugin for monitoring XCP-ng and XenServer)|`C`|`netdata.conf` section `[plugin:xenstat]`|N/A
-`perf.plugin`<br/>(external plugin for monitoring CPU performance on Linux)|`C`|`netdata.conf` section `[plugin:perf]`|N/A
-`idlejitter.plugin`<br/>(internal plugin for monitoring CPU jitter)|`C`|N/A|N/A
-`macos.plugin`<br/>(internal plugin for monitoring MacOS system resources)|`C`|`netdata.conf` section `[plugin:macos]`|one section for each module `[plugin:macos:MODULE]`. Each module may provide additional sections in the form of `[plugin:macos:MODULE:SUBSECTION]`.
-`node.d.plugin`<br/>(external plugin orchestrator of node.js modules)|`node.js`|`node.d.conf`|a file for each module in `/etc/netdata/node.d/`.
-`proc.plugin`<br/>(internal plugin for monitoring Linux system resources)|`C`|`netdata.conf` section `[plugin:proc]`|one section for each module `[plugin:proc:MODULE]`. Each module may provide additional sections in the form of `[plugin:proc:MODULE:SUBSECTION]`.
-`python.d.plugin`<br/>(external plugin orchestrator for running python modules)|`python`<br/>v2 or v3<br/>both are supported|`python.d.conf`|a file for each module in `/etc/netdata/python.d/`.
-`statsd.plugin`<br/>(internal plugin for collecting statsd metrics)|`C`|`netdata.conf` section `[statsd]`|Synthetic statsd charts can be configured with files in `/etc/netdata/statsd.d/`.
-`tc.plugin`<br/>(internal plugin for collecting Linux traffic QoS)|`C`|`netdata.conf` section `[plugin:tc]`|The plugin runs an external helper called `tc-qos-helper.sh` to interface with the `tc` command. This helper supports a few additional options using `tc-qos-helper.conf`.
-
+| plugin | language | plugin<br/>configuration | modules<br/>configuration |
+|-----:|:------:|:----------------------:|:------------------------|
+| `apps.plugin`<br/>(external plugin for monitoring the process tree on Linux and FreeBSD)|`C`|`netdata.conf` section `[plugin:apps]`|Custom configuration for the processes to be monitored at `apps_groups.conf`|
+| `freebsd.plugin`<br/>(internal plugin for monitoring FreeBSD system resources)|`C`|`netdata.conf` section `[plugin:freebsd]`|one section for each module `[plugin:freebsd:MODULE]`. Each module may provide additional sections in the form of `[plugin:freebsd:MODULE:SUBSECTION]`.|
+| `cgroups.plugin`<br/>(internal plugin for monitoring Linux containers, VMs and systemd services)|`C`|`netdata.conf` section `[plugin:cgroups]`|N/A|
+| `charts.d.plugin`<br/>(external plugin orchestrator for BASH modules)|`BASH`|`charts.d.conf`|a file for each module in `/etc/netdata/charts.d/`|
+| `diskspace.plugin`<br/>(internal plugin for collecting Linux mount points usage)|`C`|`netdata.conf` section `[plugin:diskspace]`|N/A|
+| `fping.plugin`<br/>(external plugin for collecting network latencies)|`C`|`fping.conf`|This plugin is a wrapper for the `fping` command.|
+| `ioping.plugin`<br/>(external plugin for collecting disk latencies)|`C`|`ioping.conf`|This plugin is a wrapper for the `ioping` command.|
+| `freeipmi.plugin`<br/>(external plugin for collecting IPMI h/w sensors)|`C`|`netdata.conf` section `[plugin:freeipmi]`||
+| `nfacct.plugin`<br/>(external plugin for monitoring netfilter firewall and connection tracker)|`C`|`netdata.conf` section `[plugin:nfacct]`|N/A|
+| `xenstat.plugin`<br/>(external plugin for monitoring XCP-ng and XenServer)|`C`|`netdata.conf` section `[plugin:xenstat]`|N/A|
+| `perf.plugin`<br/>(external plugin for monitoring CPU performance on Linux)|`C`|`netdata.conf` section `[plugin:perf]`|N/A|
+| `idlejitter.plugin`<br/>(internal plugin for monitoring CPU jitter)|`C`|N/A|N/A|
+| `macos.plugin`<br/>(internal plugin for monitoring MacOS system resources)|`C`|`netdata.conf` section `[plugin:macos]`|one section for each module `[plugin:macos:MODULE]`. Each module may provide additional sections in the form of `[plugin:macos:MODULE:SUBSECTION]`.|
+| `node.d.plugin`<br/>(external plugin orchestrator of node.js modules)|`node.js`|`node.d.conf`|a file for each module in `/etc/netdata/node.d/`.|
+| `proc.plugin`<br/>(internal plugin for monitoring Linux system resources)|`C`|`netdata.conf` section `[plugin:proc]`|one section for each module `[plugin:proc:MODULE]`. Each module may provide additional sections in the form of `[plugin:proc:MODULE:SUBSECTION]`.|
+| `python.d.plugin`<br/>(external plugin orchestrator for running python modules)|`python`<br/>v2 or v3<br/>both are supported|`python.d.conf`|a file for each module in `/etc/netdata/python.d/`.|
+| `statsd.plugin`<br/>(internal plugin for collecting statsd metrics)|`C`|`netdata.conf` section `[statsd]`|Synthetic statsd charts can be configured with files in `/etc/netdata/statsd.d/`.|
+| `tc.plugin`<br/>(internal plugin for collecting Linux traffic QoS)|`C`|`netdata.conf` section `[plugin:tc]`|The plugin runs an external helper called `tc-qos-helper.sh` to interface with the `tc` command. This helper supports a few additional options using `tc-qos-helper.conf`.|
## writing data collection modules
@@ -141,317 +140,296 @@ These are all the data collection plugins currently available.
### Web Servers
-application|language|notes|
-:---------:|:------:|:----|
-apache|python<br/>v2 or v3|Connects to multiple apache servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [apache.chart.py](../collectors/python.d.plugin/apache)<br/>configuration file: [python.d/apache.conf](../collectors/python.d.plugin/apache)|
-apache|BASH<br/>Shell Script|Connects to an apache server (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [apache.chart.sh](../collectors/charts.d.plugin/apache)<br/>configuration file: [charts.d/apache.conf](../collectors/charts.d.plugin/apache)|
-ipfs|python<br/>v2 or v3|Connects to multiple ipfs servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [ipfs.chart.py](../collectors/python.d.plugin/ipfs)<br/>configuration file: [python.d/ipfs.conf](../collectors/python.d.plugin/ipfs)|
-litespeed|python<br/>v2 or v3|reads the litespeed `rtreport` files to collect metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [litespeed.chart.py](../collectors/python.d.plugin/litespeed)<br/>configuration file: [python.d/litespeed.conf](../collectors/python.d.plugin/litespeed)
-nginx|python<br/>v2 or v3|Connects to multiple nginx servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [nginx.chart.py](../collectors/python.d.plugin/nginx)<br/>configuration file: [python.d/nginx.conf](../collectors/python.d.plugin/nginx)|
-nginx_plus|python<br/>v2 or v3|Connects to multiple nginx_plus servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [nginx_plus.chart.py](../collectors/python.d.plugin/nginx_plus)<br/>configuration file: [python.d/nginx_plus.conf](../collectors/python.d.plugin/nginx_plus)|
-nginx|BASH<br/>Shell Script|Connects to an nginx server (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [nginx.chart.sh](../collectors/charts.d.plugin/nginx)<br/>configuration file: [charts.d/nginx.conf](../collectors/charts.d.plugin/nginx)|
-phpfpm|python<br/>v2 or v3|Connects to multiple phpfpm servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [phpfpm.chart.py](../collectors/python.d.plugin/phpfpm)<br/>configuration file: [python.d/phpfpm.conf](../collectors/python.d.plugin/phpfpm)|
-phpfpm|BASH<br/>Shell Script|Connects to one or more phpfpm servers (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [phpfpm.chart.sh](../collectors/charts.d.plugin/phpfpm)<br/>configuration file: [charts.d/phpfpm.conf](../collectors/charts.d.plugin/phpfpm)|
-tomcat|python<br/>v2 or v3|Connects to multiple tomcat servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [tomcat.chart.py](../collectors/python.d.plugin/tomcat)<br/>configuration file: [python.d/tomcat.conf](../collectors/python.d.plugin/tomcat)|
-tomcat|BASH<br/>Shell Script|Connects to a tomcat server (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [tomcat.chart.sh](../collectors/charts.d.plugin/tomcat)<br/>configuration file: [charts.d/tomcat.conf](../collectors/charts.d.plugin/tomcat)|
-
+| application | language | notes |
+|:---------:|:------:|:----|
+| apache|python<br/>v2 or v3|Connects to multiple apache servers (local or remote) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [apache.chart.py](../collectors/python.d.plugin/apache)<br/>configuration file: [python.d/apache.conf](../collectors/python.d.plugin/apache)|
+| apache|BASH<br/>Shell Script|Connects to an apache server (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [apache.chart.sh](../collectors/charts.d.plugin/apache)<br/>configuration file: [charts.d/apache.conf](../collectors/charts.d.plugin/apache)|
+| ipfs|python<br/>v2 or v3|Connects to multiple ipfs servers (local or remote) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [ipfs.chart.py](../collectors/python.d.plugin/ipfs)<br/>configuration file: [python.d/ipfs.conf](../collectors/python.d.plugin/ipfs)|
+| litespeed|python<br/>v2 or v3|reads the litespeed `rtreport` files to collect metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [litespeed.chart.py](../collectors/python.d.plugin/litespeed)<br/>configuration file: [python.d/litespeed.conf](../collectors/python.d.plugin/litespeed)|
+| nginx|python<br/>v2 or v3|Connects to multiple nginx servers (local or remote) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [nginx.chart.py](../collectors/python.d.plugin/nginx)<br/>configuration file: [python.d/nginx.conf](../collectors/python.d.plugin/nginx)|
+| nginx_plus|python<br/>v2 or v3|Connects to multiple nginx_plus servers (local or remote) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [nginx_plus.chart.py](../collectors/python.d.plugin/nginx_plus)<br/>configuration file: [python.d/nginx_plus.conf](../collectors/python.d.plugin/nginx_plus)|
+| nginx|BASH<br/>Shell Script|Connects to an nginx server (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [nginx.chart.sh](../collectors/charts.d.plugin/nginx)<br/>configuration file: [charts.d/nginx.conf](../collectors/charts.d.plugin/nginx)|
+| phpfpm|python<br/>v2 or v3|Connects to multiple phpfpm servers (local or remote) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [phpfpm.chart.py](../collectors/python.d.plugin/phpfpm)<br/>configuration file: [python.d/phpfpm.conf](../collectors/python.d.plugin/phpfpm)|
+| phpfpm|BASH<br/>Shell Script|Connects to one or more phpfpm servers (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [phpfpm.chart.sh](../collectors/charts.d.plugin/phpfpm)<br/>configuration file: [charts.d/phpfpm.conf](../collectors/charts.d.plugin/phpfpm)|
+| tomcat|python<br/>v2 or v3|Connects to multiple tomcat servers (local or remote) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [tomcat.chart.py](../collectors/python.d.plugin/tomcat)<br/>configuration file: [python.d/tomcat.conf](../collectors/python.d.plugin/tomcat)|
+| tomcat|BASH<br/>Shell Script|Connects to a tomcat server (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [tomcat.chart.sh](../collectors/charts.d.plugin/tomcat)<br/>configuration file: [charts.d/tomcat.conf](../collectors/charts.d.plugin/tomcat)|
---
### Web Log Parsers
-application|language|notes|
-:---------:|:------:|:----|
-web_log|python<br/>v2 or v3|powerful plugin, capable of incrementally parsing any number of web server log files <br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [web_log.chart.py](../collectors/python.d.plugin/web_log)<br/>configuration file: [python.d/web_log.conf](../collectors/python.d.plugin/web_log)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| web_log|python<br/>v2 or v3|powerful plugin, capable of incrementally parsing any number of web server log files <br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [web_log.chart.py](../collectors/python.d.plugin/web_log)<br/>configuration file: [python.d/web_log.conf](../collectors/python.d.plugin/web_log)|
---
### Database Servers
-application|language|notes|
-:---------:|:------:|:----|
-couchdb|python<br/>v2 or v3|Connects to multiple couchdb servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [couchdb.chart.py](../collectors/python.d.plugin/couchdb)<br/>configuration file: [python.d/couchdb.conf](../collectors/python.d.plugin/couchdb)|
-memcached|python<br/>v2 or v3|Connects to multiple memcached servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [memcached.chart.py](../collectors/python.d.plugin/memcached)<br/>configuration file: [python.d/memcached.conf](../collectors/python.d.plugin/memcached)|
-mongodb|python<br/>v2 or v3|Connects to multiple `mongodb` servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Requires package `python-pymongo`.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [mongodb.chart.py](../collectors/python.d.plugin/mongodb)<br/>configuration file: [python.d/mongodb.conf](../collectors/python.d.plugin/mongodb)|
-mysql<br/>mariadb|python<br/>v2 or v3|Connects to multiple mysql or mariadb servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Requires package `python-mysqldb` (faster and preferred), or `python-pymysql`. <br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [mysql.chart.py](../collectors/python.d.plugin/mysql)<br/>configuration file: [python.d/mysql.conf](../collectors/python.d.plugin/mysql)|
-mysql<br/>mariadb|BASH<br/>Shell Script|Connects to multiple mysql or mariadb servers (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [mysql.chart.sh](../collectors/charts.d.plugin/mysql)<br/>configuration file: [charts.d/mysql.conf](../collectors/charts.d.plugin/mysql)|
-postgres|python<br/>v2 or v3|Connects to multiple postgres servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Requires package `python-psycopg2`.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [postgres.chart.py](../collectors/python.d.plugin/postgres)<br/>configuration file: [python.d/postgres.conf](../collectors/python.d.plugin/postgres)|
-redis|python<br/>v2 or v3|Connects to multiple redis servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [redis.chart.py](../collectors/python.d.plugin/redis)<br/>configuration file: [python.d/redis.conf](../collectors/python.d.plugin/redis)|
-rethinkdb|python<br/>v2 or v3|Connects to multiple rethinkdb servers (local or remote) to collect real-time metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [rethinkdb.chart.py](../collectors/python.d.plugin/rethinkdbs)<br/>configuration file: [python.d/rethinkdb.conf](../collectors/python.d.plugin/rethinkdbs)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| couchdb|python<br/>v2 or v3|Connects to multiple couchdb servers (local or remote) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [couchdb.chart.py](../collectors/python.d.plugin/couchdb)<br/>configuration file: [python.d/couchdb.conf](../collectors/python.d.plugin/couchdb)|
+| memcached|python<br/>v2 or v3|Connects to multiple memcached servers (local or remote) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [memcached.chart.py](../collectors/python.d.plugin/memcached)<br/>configuration file: [python.d/memcached.conf](../collectors/python.d.plugin/memcached)|
+| mongodb|python<br/>v2 or v3|Connects to multiple `mongodb` servers (local or remote) to collect real-time performance metrics.<br/> <br/>Requires package `python-pymongo`.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [mongodb.chart.py](../collectors/python.d.plugin/mongodb)<br/>configuration file: [python.d/mongodb.conf](../collectors/python.d.plugin/mongodb)|
+| mysql<br/>mariadb|python<br/>v2 or v3|Connects to multiple mysql or mariadb servers (local or remote) to collect real-time performance metrics.<br/> <br/>Requires package `python-mysqldb` (faster and preferred), or `python-pymysql`. <br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [mysql.chart.py](../collectors/python.d.plugin/mysql)<br/>configuration file: [python.d/mysql.conf](../collectors/python.d.plugin/mysql)|
+| mysql<br/>mariadb|BASH<br/>Shell Script|Connects to multiple mysql or mariadb servers (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [mysql.chart.sh](../collectors/charts.d.plugin/mysql)<br/>configuration file: [charts.d/mysql.conf](../collectors/charts.d.plugin/mysql)|
+| postgres|python<br/>v2 or v3|Connects to multiple postgres servers (local or remote) to collect real-time performance metrics.<br/> <br/>Requires package `python-psycopg2`.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [postgres.chart.py](../collectors/python.d.plugin/postgres)<br/>configuration file: [python.d/postgres.conf](../collectors/python.d.plugin/postgres)|
+| redis|python<br/>v2 or v3|Connects to multiple redis servers (local or remote) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [redis.chart.py](../collectors/python.d.plugin/redis)<br/>configuration file: [python.d/redis.conf](../collectors/python.d.plugin/redis)|
+| rethinkdb|python<br/>v2 or v3|Connects to multiple rethinkdb servers (local or remote) to collect real-time metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [rethinkdb.chart.py](../collectors/python.d.plugin/rethinkdbs)<br/>configuration file: [python.d/rethinkdb.conf](../collectors/python.d.plugin/rethinkdbs)|
---
### Social Sharing Servers
-application|language|notes|
-:---------:|:------:|:----|
-retroshare|python<br/>v2 or v3|Connects to multiple retroshare servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [retroshare.chart.py](../collectors/python.d.plugin/retroshare)<br/>configuration file: [python.d/retroshare.conf](../collectors/python.d.plugin/retroshare)|
-
+| application | language | notes |
+|:---------:|:------:|:----|
+| retroshare | python<br/>v2 or v3|Connects to multiple retroshare servers (local or remote) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [retroshare.chart.py](../collectors/python.d.plugin/retroshare)<br/>configuration file: [python.d/retroshare.conf](../collectors/python.d.plugin/retroshare)|
---
### Proxy Servers
-application|language|notes|
-:---------:|:------:|:----|
-squid|python<br/>v2 or v3|Connects to multiple squid servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [squid.chart.py](../collectors/python.d.plugin/squid)<br/>configuration file: [python.d/squid.conf](../collectors/python.d.plugin/squid)|
-squid|BASH<br/>Shell Script|Connects to a squid server (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [squid.chart.sh](../collectors/charts.d.plugin/squid)<br/>configuration file: [charts.d/squid.conf](../collectors/charts.d.plugin/squid)|
-
+|application|language|notes|
+|:---------:|:------:|:----|
+|squid|python<br/>v2 or v3|Connects to multiple squid servers (local or remote) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [squid.chart.py](../collectors/python.d.plugin/squid)<br/>configuration file: [python.d/squid.conf](../collectors/python.d.plugin/squid)|
+|squid|BASH<br/>Shell Script|Connects to a squid server (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [squid.chart.sh](../collectors/charts.d.plugin/squid)<br/>configuration file: [charts.d/squid.conf](../collectors/charts.d.plugin/squid)|
---
### HTTP Accelerators
-application|language|notes|
-:---------:|:------:|:----|
-varnish|python<br/>v2 or v3|Uses the varnishstat command to provide varnish cache statistics (client metrics, cache perfomance, thread-related metrics, backend health, memory usage etc.).<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [varnish.chart.py](../collectors/python.d.plugin/varnish)<br/>configuration file: [python.d/varnish.conf](../collectors/python.d.plugin/varnish)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| varnish|python<br/>v2 or v3|Uses the varnishstat command to provide varnish cache statistics (client metrics, cache perfomance, thread-related metrics, backend health, memory usage etc.).<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [varnish.chart.py](../collectors/python.d.plugin/varnish)<br/>configuration file: [python.d/varnish.conf](../collectors/python.d.plugin/varnish)|
---
### Search Engines
-application|language|notes|
-:---------:|:------:|:----|
-elasticsearch|python<br/>v2 or v3|Monitor elasticsearch performance and health metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [elasticsearch.chart.py](../collectors/python.d.plugin/elasticsearch)<br/>configuration file: [python.d/elasticsearch.conf](../collectors/python.d.plugin/elasticsearch)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| elasticsearch|python<br/>v2 or v3|Monitor elasticsearch performance and health metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [elasticsearch.chart.py](../collectors/python.d.plugin/elasticsearch)<br/>configuration file: [python.d/elasticsearch.conf](../collectors/python.d.plugin/elasticsearch)|
---
### Name Servers
-application|language|notes|
-:---------:|:------:|:----|
-named|node.js|Connects to multiple named (ISC-Bind) servers (local or remote) to collect real-time performance metrics. All versions of bind after 9.9.10 are supported.<br/>&nbsp;<br/>Netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)<br/>plugin module: [named.node.js](../collectors/node.d.plugin/named)<br/>configuration file: [node.d/named.conf](../collectors/node.d.plugin/named)|
-bind_rndc|python<br/>v2 or v3|Parses named.stats dump file to collect real-time performance metrics. All versions of bind after 9.6 are supported.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [bind_rndc.chart.py](../collectors/python.d.plugin/bind_rndc)<br/>configuration file: [python.d/bind_rndc.conf](../collectors/python.d.plugin/bind_rndc)|
-nsd|python<br/>v2 or v3|Charts the nsd received queries and zones.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [nsd.chart.py](../collectors/python.d.plugin/nsd)<br/>configuration file: [python.d/nsd.conf](../collectors/python.d.plugin/nsd)
-powerdns|python<br/>v2 or v3|Monitors powerdns performance and health metrics <br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [powerdns.chart.py](../collectors/python.d.plugin/powerdns)<br/>configuration file: [python.d/powerdns.conf](../collectors/python.d.plugin/powerdns)|
-dnsdist|python<br/>v2 or v3|Monitors dnsdist performance and health metrics <br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [dnsdist.chart.py](../collectors/python.d.plugin/dnsdist)<br/>configuration file: [python.d/dnsdist.conf](../collectors/python.d.plugin/dnsdist)|
-unbound|python<br/>v2 or v3|Monitors Unbound performance and resource usage metrics <br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [unbound.chart.py](../collectors/python.d.plugin/unbound)<br/>configuration file: [python.d/unbound.conf](../collectors/python.d.plugin/unbound)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| named|node.js|Connects to multiple named (ISC-Bind) servers (local or remote) to collect real-time performance metrics. All versions of bind after 9.9.10 are supported.<br/> <br/>Netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)<br/>plugin module: [named.node.js](../collectors/node.d.plugin/named)<br/>configuration file: [node.d/named.conf](../collectors/node.d.plugin/named)|
+| bind_rndc|python<br/>v2 or v3|Parses named.stats dump file to collect real-time performance metrics. All versions of bind after 9.6 are supported.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [bind_rndc.chart.py](../collectors/python.d.plugin/bind_rndc)<br/>configuration file: [python.d/bind_rndc.conf](../collectors/python.d.plugin/bind_rndc)|
+| nsd|python<br/>v2 or v3|Charts the nsd received queries and zones.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [nsd.chart.py](../collectors/python.d.plugin/nsd)<br/>configuration file: [python.d/nsd.conf](../collectors/python.d.plugin/nsd)|
+| powerdns|python<br/>v2 or v3|Monitors powerdns performance and health metrics <br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [powerdns.chart.py](../collectors/python.d.plugin/powerdns)<br/>configuration file: [python.d/powerdns.conf](../collectors/python.d.plugin/powerdns)|
+| dnsdist|python<br/>v2 or v3|Monitors dnsdist performance and health metrics <br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [dnsdist.chart.py](../collectors/python.d.plugin/dnsdist)<br/>configuration file: [python.d/dnsdist.conf](../collectors/python.d.plugin/dnsdist)|
+| unbound|python<br/>v2 or v3|Monitors Unbound performance and resource usage metrics <br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [unbound.chart.py](../collectors/python.d.plugin/unbound)<br/>configuration file: [python.d/unbound.conf](../collectors/python.d.plugin/unbound)|
---
### DHCP Servers
-application|language|notes|
-:---------:|:------:|:----|
-isc dhcp|python<br/>v2 or v3|Monitor lease database to show all active leases.<br/>&nbsp;<br/>Python v2 requires package `python-ipaddress`.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [isc-dhcpd.chart.py](../collectors/python.d.plugin/isc_dhcpd)<br/>configuration file: [python.d/isc-dhcpd.conf](../collectors/python.d.plugin/isc_dhcpd)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| isc dhcp|python<br/>v2 or v3|Monitor lease database to show all active leases.<br/> <br/>Python v2 requires package `python-ipaddress`.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [isc-dhcpd.chart.py](../collectors/python.d.plugin/isc_dhcpd)<br/>configuration file: [python.d/isc-dhcpd.conf](../collectors/python.d.plugin/isc_dhcpd)|
---
### Load Balancers
-application|language|notes|
-:---------:|:------:|:----|
-haproxy|python<br/>v2 or v3|Monitor frontend, backend and health metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [haproxy.chart.py](../collectors/python.d.plugin/haproxy)<br/>configuration file: [python.d/haproxy.conf](../collectors/python.d.plugin/haproxy)|
-traefik|python<br/>v2 or v3|Connects to multiple traefik instances (local or remote) to collect API metrics (response status code, response time, average response time and server uptime).<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [traefik.chart.py](../collectors/python.d.plugin/traefik)<br/>configuration file: [python.d/traefik.conf](../collectors/python.d.plugin/traefik)|
+| application|language|notes|
+|:---------:|:------:|:----|
+| haproxy|python<br/>v2 or v3|Monitor frontend, backend and health metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [haproxy.chart.py](../collectors/python.d.plugin/haproxy)<br/>configuration file: [python.d/haproxy.conf](../collectors/python.d.plugin/haproxy)|
+| traefik|python<br/>v2 or v3|Connects to multiple traefik instances (local or remote) to collect API metrics (response status code, response time, average response time and server uptime).<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [traefik.chart.py](../collectors/python.d.plugin/traefik)<br/>configuration file: [python.d/traefik.conf](../collectors/python.d.plugin/traefik)|
---
### Message Brokers
-application|language|notes|
-:---------:|:------:|:----|
-rabbitmq|python<br/>v2 or v3|Monitor rabbitmq performance and health metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [rabbitmq.chart.py](../collectors/python.d.plugin/rabbitmq)<br/>configuration file: [python.d/rabbitmq.conf](../collectors/python.d.plugin/rabbitmq)|
-beanstalkd|python<br/>v2 or v3|Provides server and tube level statistics.<br/>&nbsp;<br/>Requires beanstalkc python package (`pip install beanstalkc` or install package `python-beanstalkc`, which also installs `python-yaml`).<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [beanstalk.chart.py](../collectors/python.d.plugin/beanstalk)<br/>configuration file: [python.d/beanstalk.conf](../collectors/python.d.plugin/beanstalk)|
-
+| application | language|notes|
+|:---------:|:------:|:----|
+| rabbitmq | python<br/>v2 or v3|Monitor rabbitmq performance and health metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [rabbitmq.chart.py](../collectors/python.d.plugin/rabbitmq)<br/>configuration file: [python.d/rabbitmq.conf](../collectors/python.d.plugin/rabbitmq)|
+| beanstalkd | python<br/>v2 or v3|Provides server and tube level statistics.<br/> <br/>Requires beanstalkc python package (`pip install beanstalkc` or install package `python-beanstalkc`, which also installs `python-yaml`).<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [beanstalk.chart.py](../collectors/python.d.plugin/beanstalk)<br/>configuration file: [python.d/beanstalk.conf](../collectors/python.d.plugin/beanstalk)|
---
### UPS
-application|language|notes|
-:---------:|:------:|:----|
-apcupsd|BASH<br/>Shell Script|Connects to an apcupsd server to collect real-time statistics of an APC UPS.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [apcupsd.chart.sh](../collectors/charts.d.plugin/apcupsd)<br/>configuration file: [charts.d/apcupsd.conf](../collectors/charts.d.plugin/apcupsd)|
-nut|BASH<br/>Shell Script|Connects to a nut server (upsd) to collect real-time UPS statistics.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [nut.chart.sh](../collectors/charts.d.plugin/nut)<br/>configuration file: [charts.d/nut.conf](../collectors/charts.d.plugin/nut)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| apcupsd|BASH<br/>Shell Script|Connects to an apcupsd server to collect real-time statistics of an APC UPS.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [apcupsd.chart.sh](../collectors/charts.d.plugin/apcupsd)<br/>configuration file: [charts.d/apcupsd.conf](../collectors/charts.d.plugin/apcupsd)|
+| nut|BASH<br/>Shell Script|Connects to a nut server (upsd) to collect real-time UPS statistics.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [nut.chart.sh](../collectors/charts.d.plugin/nut)<br/>configuration file: [charts.d/nut.conf](../collectors/charts.d.plugin/nut)|
---
### RAID
-application|language|notes|
-:---------:|:------:|:----|
-megacli|python<br/>v2 or v3|Collects adapter, physical drives and battery stats..<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [megacli.chart.py](../collectors/python.d.plugin/megacli)<br/>configuration file: [python.d/megacli.conf](../collectors/python.d.plugin/megacli)|
+|application|language|notes|
+|:---------:|:------:|:----|
+|megacli|python<br/>v2 or v3|Collects adapter, physical drives and battery stats..<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [megacli.chart.py](../collectors/python.d.plugin/megacli)<br/>configuration file: [python.d/megacli.conf](../collectors/python.d.plugin/megacli)|
---
### Mail Servers
-application|language|notes|
-:---------:|:------:|:----|
-dovecot|python<br/>v2 or v3|Connects to multiple dovecot servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [dovecot.chart.py](../collectors/python.d.plugin/dovecot)<br/>configuration file: [python.d/dovecot.conf](../collectors/python.d.plugin/dovecot)|
-exim|python<br/>v2 or v3|Charts the exim queue size.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [exim.chart.py](../collectors/python.d.plugin/exim)<br/>configuration file: [python.d/exim.conf](../collectors/python.d.plugin/exim)|
-exim|BASH<br/>Shell Script|Charts the exim queue size.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [exim.chart.sh](../collectors/charts.d.plugin/exim)<br/>configuration file: [charts.d/exim.conf](../collectors/charts.d.plugin/exim)|
-postfix|python<br/>v2 or v3|Charts the postfix queue size (supports multiple queues).<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [postfix.chart.py](../collectors/python.d.plugin/postfix)<br/>configuration file: [python.d/postfix.conf](../collectors/python.d.plugin/postfix)|
-postfix|BASH<br/>Shell Script|Charts the postfix queue size.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [postfix.chart.sh](../collectors/charts.d.plugin/postfix)<br/>configuration file: [charts.d/postfix.conf](../collectors/charts.d.plugin/postfix)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| dovecot|python<br/>v2 or v3|Connects to multiple dovecot servers (local or remote) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [dovecot.chart.py](../collectors/python.d.plugin/dovecot)<br/>configuration file: [python.d/dovecot.conf](../collectors/python.d.plugin/dovecot)|
+| exim|python<br/>v2 or v3|Charts the exim queue size.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [exim.chart.py](../collectors/python.d.plugin/exim)<br/>configuration file: [python.d/exim.conf](../collectors/python.d.plugin/exim)|
+| exim|BASH<br/>Shell Script|Charts the exim queue size.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [exim.chart.sh](../collectors/charts.d.plugin/exim)<br/>configuration file: [charts.d/exim.conf](../collectors/charts.d.plugin/exim)|
+| postfix|python<br/>v2 or v3|Charts the postfix queue size (supports multiple queues).<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [postfix.chart.py](../collectors/python.d.plugin/postfix)<br/>configuration file: [python.d/postfix.conf](../collectors/python.d.plugin/postfix)|
+| postfix|BASH<br/>Shell Script|Charts the postfix queue size.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [postfix.chart.sh](../collectors/charts.d.plugin/postfix)<br/>configuration file: [charts.d/postfix.conf](../collectors/charts.d.plugin/postfix)|
---
### File Servers
-application|language|notes|
-:---------:|:------:|:----|
-NFS Client|`C`|This is handled entirely by the Netdata daemon.<br/>&nbsp;<br/>Configuration: `netdata.conf`, section `[plugin:proc:/proc/net/rpc/nfs]`.
-NFS Server|`C`|This is handled entirely by the netdata daemon.<br/>&nbsp;<br/>Configuration: `netdata.conf`, section `[plugin:proc:/proc/net/rpc/nfsd]`.
-samba|python<br/>v2 or v3|Performance metrics of Samba SMB2 file sharing.<br/>&nbsp;<br/>documentation page: [python.d.plugin module samba](../collectors/python.d.plugin/samba)<br/>netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [samba.chart.py](../collectors/python.d.plugin/samba)<br/>configuration file: [python.d/samba.conf](../collectors/python.d.plugin/samba)|
+| application|language|notes|
+|:---------:|:------:|:----|
+| NFS Client|`C`|This is handled entirely by the Netdata daemon.<br/> <br/>Configuration: `netdata.conf`, section `[plugin:proc:/proc/net/rpc/nfs]`.|
+| NFS Server|`C`|This is handled entirely by the `netdata` daemon.<br/> <br/>Configuration: `netdata.conf`, section `[plugin:proc:/proc/net/rpc/nfsd]`.|
+| samba|python<br/>v2 or v3|Performance metrics of Samba SMB2 file sharing.<br/> <br/>documentation page: [python.d.plugin module samba](../collectors/python.d.plugin/samba)<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [samba.chart.py](../collectors/python.d.plugin/samba)<br/>configuration file: [python.d/samba.conf](../collectors/python.d.plugin/samba)|
---
### Print Servers
-application|language|notes|
-:---------:|:------:|:----|
-CUPS|C|Charts metrics of printers, jobs and other cups destinations.<br/>&nbsp;<br/>netdata plugin: [cups.plugin](../collectors/cups.plugin)
+| application|language|notes|
+|:---------:|:------:|:----|
+| CUPS|C|Charts metrics of printers, jobs and other cups destinations.<br/> <br/>Netdata plugin: [cups.plugin](../collectors/cups.plugin)|
---
### Hypervisors
-application|language|notes|
-:---------:|:------:|:----|
-xenstat|C|Collects host and domain statistics for XenServer or XCP-ng hypervisors.<br/>&nbsp;<br/>netdata plugin: [xenstat.plugin](../collectors/xenstat.plugin)
+| application|language|notes|
+|:---------:|:------:|:----|
+| xenstat|C|Collects host and domain statistics for XenServer or XCP-ng hypervisors.<br/> <br/>Netdata plugin: [xenstat.plugin](../collectors/xenstat.plugin)|
---
### System
-application|language|notes|
-:---------:|:------:|:----|
-apps|C|`apps.plugin` collects resource usage statistics for all processes running in the system. It groups the entire process tree and reports dozens of metrics for CPU utilization, memory footprint, disk I/O, swap memory, network connections, open files and sockets, etc. It reports metrics for application groups, users and user groups.<br/>&nbsp;<br/>[Documentation of `apps.plugin`](../collectors/apps.plugin/).<br/>&nbsp;<br/>Netdata plugin: [`apps_plugin.c`](../collectors/apps.plugin)<br/>configuration file: [`apps_groups.conf`](../collectors/apps.plugin)|
-ioping|C|Charts disk latency statistics for a directory/file/device, using the `ioping` command. A recent (probably unreleased) version of ioping is required. The plugin supplied can install it in `/usr/local`.<br/>&nbsp;<br/>Netdata plugin: [ioping.plugin](../collectors/ioping.plugin) (this is a shell wrapper to start ioping - once ioping is started, Netdata and ioping communicate directly - it can also install the right version of ioping)<br/>configuration file: [ioping.conf](../collectors/ioping.plugin)|
-perf|C|`perf.plugin` collects CPU performance metrics using hardware performance monitoring units (PMU).<br/>&nbsp;<br/>[Documentation of `perf.plugin`](../collectors/perf.plugin/).<br/>&nbsp;<br/>Netdata plugin: [`perf_plugin.c`](../collectors/perf.plugin)|
-cpu_apps|BASH<br/>Shell Script|Collects the CPU utilization of select apps.<br/><br/>DEPRECATED IN FAVOR OF `apps.plugin`. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [cpu_apps.chart.sh](../collectors/charts.d.plugin/cpu_apps)<br/>configuration file: [charts.d/cpu_apps.conf](../collectors/charts.d.plugin/cpu_apps)|
-load_average|BASH<br/>Shell Script|Collects the current system load average.<br/><br/>DEPRECATED IN FAVOR OF THE NETDATA INTERNAL ONE. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [load_average.chart.sh](../collectors/charts.d.plugin/load_average)<br/>configuration file: [charts.d/load_average.conf](../collectors/charts.d.plugin/load_average)|
-mem_apps|BASH<br/>Shell Script|Collects the memory footprint of select applications.<br/><br/>DEPRECATED IN FAVOR OF `apps.plugin`. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [mem_apps.chart.sh](../collectors/charts.d.plugin/mem_apps)<br/>configuration file: [charts.d/mem_apps.conf](../collectors/charts.d.plugin/mem_apps)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| apps|C|`apps.plugin` collects resource usage statistics for all processes running in the system. It groups the entire process tree and reports dozens of metrics for CPU utilization, memory footprint, disk I/O, swap memory, network connections, open files and sockets, etc. It reports metrics for application groups, users and user groups.<br/> <br/>[Documentation of `apps.plugin`](../collectors/apps.plugin/).<br/> <br/>Netdata plugin: [`apps_plugin.c`](../collectors/apps.plugin)<br/>configuration file: [`apps_groups.conf`](../collectors/apps.plugin)|
+| ioping|C|Charts disk latency statistics for a directory/file/device, using the `ioping` command. A recent (probably unreleased) version of ioping is required. The plugin supplied can install it in `/usr/local`.<br/> <br/>Netdata plugin: [ioping.plugin](../collectors/ioping.plugin) (this is a shell wrapper to start ioping - once ioping is started, Netdata and ioping communicate directly - it can also install the right version of ioping)<br/>configuration file: [ioping.conf](../collectors/ioping.plugin)|
+| perf|C|`perf.plugin` collects CPU performance metrics using hardware performance monitoring units (PMU).<br/> <br/>[Documentation of `perf.plugin`](../collectors/perf.plugin/).<br/> <br/>Netdata plugin: [`perf_plugin.c`](../collectors/perf.plugin)|
+| cpu_apps|BASH<br/>Shell Script|Collects the CPU utilization of select apps.<br/><br/>DEPRECATED IN FAVOR OF `apps.plugin`. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [cpu_apps.chart.sh](../collectors/charts.d.plugin/cpu_apps)<br/>configuration file: [charts.d/cpu_apps.conf](../collectors/charts.d.plugin/cpu_apps)|
+| load_average|BASH<br/>Shell Script|Collects the current system load average.<br/><br/>DEPRECATED IN FAVOR OF THE NETDATA INTERNAL ONE. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [load_average.chart.sh](../collectors/charts.d.plugin/load_average)<br/>configuration file: [charts.d/load_average.conf](../collectors/charts.d.plugin/load_average)|
+| mem_apps|BASH<br/>Shell Script|Collects the memory footprint of select applications.<br/><br/>DEPRECATED IN FAVOR OF `apps.plugin`. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [mem_apps.chart.sh](../collectors/charts.d.plugin/mem_apps)<br/>configuration file: [charts.d/mem_apps.conf](../collectors/charts.d.plugin/mem_apps)|
---
### Sensors
-application|language|notes|
-:---------:|:------:|:----|
-cpufreq|BASH<br/>Shell Script|Collects current CPU frequency from `/sys/devices`.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [cpufreq.chart.sh](../collectors/charts.d.plugin/cpufreq)<br/>configuration file: [charts.d/cpufreq.conf](../collectors/charts.d.plugin/cpufreq)|
-IPMI|C|Collects temperatures, voltages, currents, power, fans and `SEL` events from IPMI using `libipmimonitoring`.<br/>Check [Monitoring IPMI](../collectors/freeipmi.plugin/) for more information<br/>&nbsp;<br/>Netdata plugin: [freeipmi.plugin](../collectors/freeipmi.plugin)<br/>configuration file: none required - to enable it, compile/install Netdata with `--enable-plugin-freeipmi`|
-hddtemp|python<br/>v2 or v3|Connects to multiple hddtemp servers (local or remote) to collect real-time performance metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [hddtemp.chart.py](../collectors/python.d.plugin/hddtemp)<br/>configuration file: [python.d/hddtemp.conf](../collectors/python.d.plugin/hddtemp)|
-hddtemp|BASH<br/>Shell Script|Connects to a hddtemp server (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [hddtemp.chart.sh](../collectors/charts.d.plugin/hddtemp)<br/>configuration file: [charts.d/hddtemp.conf](../collectors/charts.d.plugin/hddtemp)|
-sensors|BASH<br/>Shell Script|Collects sensors values from files in `/sys`.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [sensors.chart.sh](../collectors/charts.d.plugin/sensors)<br/>configuration file: [charts.d/sensors.conf](../collectors/charts.d.plugin/sensors)|
-sensors|python<br/>v2 or v3|Uses `lm-sensors` to collect sensor data.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [sensors.chart.py](../collectors/python.d.plugin/sensors)<br/>configuration file: [python.d/sensors.conf](../collectors/python.d.plugin/sensors)|
-smartd_log|python<br/>v2 or v3|Collects the S.M.A.R.T attributes from `smartd` log files.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [smartd_log.chart.py](../collectors/python.d.plugin/smartd_log)<br/>configuration file: [python.d/smartd_log.conf](../collectors/python.d.plugin/smartd_log)|
-w1sensor|python<br/>v2 or v3|Collects data from connected 1-Wire sensors.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [w1sensor.chart.py](../collectors/python.d.plugin/w1sensor)<br/>configuration file: [python.d/w1sensor.conf](../collectors/python.d.plugin/w1sensor)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| cpufreq|BASH<br/>Shell Script|Collects current CPU frequency from `/sys/devices`.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [cpufreq.chart.sh](../collectors/charts.d.plugin/cpufreq)<br/>configuration file: [charts.d/cpufreq.conf](../collectors/charts.d.plugin/cpufreq)|
+| IPMI|C|Collects temperatures, voltages, currents, power, fans and `SEL` events from IPMI using `libipmimonitoring`.<br/>Check [Monitoring IPMI](../collectors/freeipmi.plugin/) for more information<br/> <br/>Netdata plugin: [freeipmi.plugin](../collectors/freeipmi.plugin)<br/>configuration file: none required - to enable it, compile/install Netdata with `--enable-plugin-freeipmi`|
+| hddtemp|python<br/>v2 or v3|Connects to multiple hddtemp servers (local or remote) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [hddtemp.chart.py](../collectors/python.d.plugin/hddtemp)<br/>configuration file: [python.d/hddtemp.conf](../collectors/python.d.plugin/hddtemp)|
+| hddtemp|BASH<br/>Shell Script|Connects to a hddtemp server (local or remote) to collect real-time performance metrics.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [hddtemp.chart.sh](../collectors/charts.d.plugin/hddtemp)<br/>configuration file: [charts.d/hddtemp.conf](../collectors/charts.d.plugin/hddtemp)|
+| sensors|BASH<br/>Shell Script|Collects sensors values from files in `/sys`.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [sensors.chart.sh](../collectors/charts.d.plugin/sensors)<br/>configuration file: [charts.d/sensors.conf](../collectors/charts.d.plugin/sensors)|
+| sensors|python<br/>v2 or v3|Uses `lm-sensors` to collect sensor data.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [sensors.chart.py](../collectors/python.d.plugin/sensors)<br/>configuration file: [python.d/sensors.conf](../collectors/python.d.plugin/sensors)|
+| smartd_log|python<br/>v2 or v3|Collects the S.M.A.R.T attributes from `smartd` log files.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [smartd_log.chart.py](../collectors/python.d.plugin/smartd_log)<br/>configuration file: [python.d/smartd_log.conf](../collectors/python.d.plugin/smartd_log)|
+| w1sensor|python<br/>v2 or v3|Collects data from connected 1-Wire sensors.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [w1sensor.chart.py](../collectors/python.d.plugin/w1sensor)<br/>configuration file: [python.d/w1sensor.conf](../collectors/python.d.plugin/w1sensor)|
---
### Network
-application|language|notes|
-:---------:|:------:|:----|
-ap|BASH<br/>Shell Script|Uses the `iw` command to provide statistics of wireless clients connected to a wireless access point running on this host (works well with `hostapd`).<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [ap.chart.sh](../collectors/charts.d.plugin/ap)<br/>configuration file: [charts.d/ap.conf](../collectors/charts.d.plugin/ap)|
-fping|C|Charts network latency statistics for any number of nodes, using the `fping` command. A recent (probably unreleased) version of fping is required. The plugin supplied can install it in `/usr/local`.<br/>&nbsp;<br/>Netdata plugin: [fping.plugin](../collectors/fping.plugin) (this is a shell wrapper to start fping - once fping is started, Netdata and fping communicate directly - it can also install the right version of fping)<br/>configuration file: [fping.conf](../collectors/fping.plugin)|
-snmp|node.js|Connects to multiple snmp servers to collect real-time performance metrics.<br/>&nbsp;<br/>Netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)<br/>plugin module: [snmp.node.js](../collectors/node.d.plugin/snmp)<br/>configuration file: [node.d/snmp.conf](../collectors/node.d.plugin/snmp)|
-nfacct|C|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`|
-dns_query_time|python<br/>v2 or v3|Provides DNS query time statistics.<br/>&nbsp;<br/>Requires package `dnspython` (`pip install dnspython` or install package `python-dnspython`).<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [dns_query_time.chart.py](../collectors/python.d.plugin/dns_query_time)<br/>configuration file: [python.d/dns_query_time.conf](../collectors/python.d.plugin/dns_query_time)|
-http|python<br />v2 or v3|Monitors a generic web page for status code and returned content in HTML
-port|ptyhon<br />v2 or v3|Checks if a generic TCP port for its availability and response time
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| ap|BASH<br/>Shell Script|Uses the `iw` command to provide statistics of wireless clients connected to a wireless access point running on this host (works well with `hostapd`).<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [ap.chart.sh](../collectors/charts.d.plugin/ap)<br/>configuration file: [charts.d/ap.conf](../collectors/charts.d.plugin/ap)|
+| fping|C|Charts network latency statistics for any number of nodes, using the `fping` command. A recent (probably unreleased) version of fping is required. The plugin supplied can install it in `/usr/local`.<br/> <br/>Netdata plugin: [fping.plugin](../collectors/fping.plugin) (this is a shell wrapper to start fping - once fping is started, Netdata and fping communicate directly - it can also install the right version of fping)<br/>configuration file: [fping.conf](../collectors/fping.plugin)|
+| snmp|node.js|Connects to multiple snmp servers to collect real-time performance metrics.<br/> <br/>Netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)<br/>plugin module: [snmp.node.js](../collectors/node.d.plugin/snmp)<br/>configuration file: [node.d/snmp.conf](../collectors/node.d.plugin/snmp)|
+| nfacct|C|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`|
+| dns_query_time|python<br/>v2 or v3|Provides DNS query time statistics.<br/> <br/>Requires package `dnspython` (`pip install dnspython` or install package `python-dnspython`).<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [dns_query_time.chart.py](../collectors/python.d.plugin/dns_query_time)<br/>configuration file: [python.d/dns_query_time.conf](../collectors/python.d.plugin/dns_query_time)|
+| http|python<br />v2 or v3|Monitors a generic web page for status code and returned content in HTML|
+| port|ptyhon<br />v2 or v3|Checks if a generic TCP port for its availability and response time|
---
### Time Servers
-application|language|notes|
-:---------:|:------:|:----|
-chrony|python<br/>v2 or v3|Uses the chronyc command to provide chrony statistics (Frequency, Last offset, RMS offset, Residual freq, Root delay, Root dispersion, Skew, System time).<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [chrony.chart.py](../collectors/python.d.plugin/chrony)<br/>configuration file: [python.d/chrony.conf](../collectors/python.d.plugin/chrony)|
-ntpd|python<br/>v2 or v3|Connects to multiple ntpd servers (local or remote) to provide statistics of system variables and optional also peer variables (if enabled in the configuration).<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [ntpd.chart.py](../collectors/python.d.plugin/ntpd)<br/>configuration file: [python.d/ntpd.conf](../collectors/python.d.plugin/ntpd)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| chrony|python<br/>v2 or v3|Uses the chronyc command to provide chrony statistics (Frequency, Last offset, RMS offset, Residual freq, Root delay, Root dispersion, Skew, System time).<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [chrony.chart.py](../collectors/python.d.plugin/chrony)<br/>configuration file: [python.d/chrony.conf](../collectors/python.d.plugin/chrony)|
+| ntpd|python<br/>v2 or v3|Connects to multiple ntpd servers (local or remote) to provide statistics of system variables and optional also peer variables (if enabled in the configuration).<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [ntpd.chart.py](../collectors/python.d.plugin/ntpd)<br/>configuration file: [python.d/ntpd.conf](../collectors/python.d.plugin/ntpd)|
---
### Security
-application|language|notes|
-:---------:|:------:|:----|
-freeradius|python<br/>v2 or v3|Uses the radclient command to provide freeradius statistics (authentication, accounting, proxy-authentication, proxy-accounting).<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [freeradius.chart.py](../collectors/python.d.plugin/freeradius)<br/>configuration file: [python.d/freeradius.conf](../collectors/python.d.plugin/freeradius)|
-openvpn|python<br/>v2 or v3|All data from openvpn-status.log in your dashboard! <br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [ovpn_status_log.chart.py](../collectors/python.d.plugin/ovpn_status_log)<br/>configuration file: [python.d/ovpn_status_log.conf](../collectors/python.d.plugin/ovpn_status_log)|
-fail2ban|python<br/>v2 or v3|Monitor fail2ban log file to show all bans for all active jails <br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [fail2ban.chart.py](../collectors/python.d.plugin/fail2ban)<br/>configuration file: [python.d/fail2ban.conf](../collectors/python.d.plugin/fail2ban)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| freeradius|python<br/>v2 or v3|Uses the radclient command to provide freeradius statistics (authentication, accounting, proxy-authentication, proxy-accounting).<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [freeradius.chart.py](../collectors/python.d.plugin/freeradius)<br/>configuration file: [python.d/freeradius.conf](../collectors/python.d.plugin/freeradius)|
+| openvpn|python<br/>v2 or v3|All data from openvpn-status.log in your dashboard! <br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [ovpn_status_log.chart.py](../collectors/python.d.plugin/ovpn_status_log)<br/>configuration file: [python.d/ovpn_status_log.conf](../collectors/python.d.plugin/ovpn_status_log)|
+| fail2ban|python<br/>v2 or v3|Monitor fail2ban log file to show all bans for all active jails <br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [fail2ban.chart.py](../collectors/python.d.plugin/fail2ban)<br/>configuration file: [python.d/fail2ban.conf](../collectors/python.d.plugin/fail2ban)|
---
### Telephony Servers
-application|language|notes|
-:---------:|:------:|:----|
-opensips|BASH<br/>Shell Script|Connects to an opensips server (local only) to collect real-time performance metrics.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [opensips.chart.sh](../collectors/charts.d.plugin/opensips)<br/>configuration file: [charts.d/opensips.conf](../collectors/charts.d.plugin/opensips)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| opensips|BASH<br/>Shell Script|Connects to an opensips server (local only) to collect real-time performance metrics.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [opensips.chart.sh](../collectors/charts.d.plugin/opensips)<br/>configuration file: [charts.d/opensips.conf](../collectors/charts.d.plugin/opensips)|
---
### Go applications
-application|language|notes|
-:---------:|:------:|:----|
-go_expvar|python<br/>v2 or v3|Parses metrics exposed by applications written in the Go programming language using the [expvar package](https://golang.org/pkg/expvar/).<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [go_expvar.chart.py](../collectors/python.d.plugin/go_expvar)<br/>configuration file: [python.d/go_expvar.conf](../collectors/python.d.plugin/go_expvar)<br/>documentation: [Monitoring Go Applications](../collectors/python.d.plugin/go_expvar/)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| go_expvar|python<br/>v2 or v3|Parses metrics exposed by applications written in the Go programming language using the [expvar package](https://golang.org/pkg/expvar/).<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [go_expvar.chart.py](../collectors/python.d.plugin/go_expvar)<br/>configuration file: [python.d/go_expvar.conf](../collectors/python.d.plugin/go_expvar)<br/>documentation: [Monitoring Go Applications](../collectors/python.d.plugin/go_expvar/)|
---
### Household Appliances
-application|language|notes|
-:---------:|:------:|:----|
-sma_webbox|node.js|Connects to multiple remote SMA webboxes to collect real-time performance metrics of the photovoltaic (solar) power generation.<br/>&nbsp;<br/>Netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)<br/>plugin module: [sma_webbox.node.js](../collectors/node.d.plugin/sma_webbox)<br/>configuration file: [node.d/sma_webbox.conf](../collectors/node.d.plugin/sma_webbox)|
-fronius|node.js|Connects to multiple remote Fronius Symo servers to collect real-time performance metrics of the photovoltaic (solar) power generation.<br/>&nbsp;<br/>Netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)<br/>plugin module: [fronius.node.js](../collectors/node.d.plugin/fronius)<br/>configuration file: [node.d/fronius.conf](../collectors/node.d.plugin/fronius)|
-stiebeleltron|node.js|Collects the temperatures and other metrics from your Stiebel Eltron heating system using their Internet Service Gateway (ISG web).<br/>&nbsp;<br/>Netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)<br/>plugin module: [stiebeleltron.node.js](../collectors/node.d.plugin/stiebeleltron)<br/>configuration file: [node.d/stiebeleltron.conf](../collectors/node.d.plugin/stiebeleltron)|
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| sma_webbox|node.js|Connects to multiple remote SMA webboxes to collect real-time performance metrics of the photovoltaic (solar) power generation.<br/> <br/>Netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)<br/>plugin module: [sma_webbox.node.js](../collectors/node.d.plugin/sma_webbox)<br/>configuration file: [node.d/sma_webbox.conf](../collectors/node.d.plugin/sma_webbox)|
+| fronius|node.js|Connects to multiple remote Fronius Symo servers to collect real-time performance metrics of the photovoltaic (solar) power generation.<br/> <br/>Netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)<br/>plugin module: [fronius.node.js](../collectors/node.d.plugin/fronius)<br/>configuration file: [node.d/fronius.conf](../collectors/node.d.plugin/fronius)|
+| stiebeleltron|node.js|Collects the temperatures and other metrics from your Stiebel Eltron heating system using their Internet Service Gateway (ISG web).<br/> <br/>Netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)<br/>plugin module: [stiebeleltron.node.js](../collectors/node.d.plugin/stiebeleltron)<br/>configuration file: [node.d/stiebeleltron.conf](../collectors/node.d.plugin/stiebeleltron)|
---
### Java Processes
-application|language|notes|
-:---------:|:------:|:----|
-Spring Boot Application|java|Monitors running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [springboot](../collectors/python.d.plugin/springboot)<br/>configuration file: [python.d/springboot.conf](../collectors/python.d.plugin/springboot)
-
+| application|language|notes|
+|:---------:|:------:|:----|
+| Spring Boot Application|java|Monitors running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [springboot](../collectors/python.d.plugin/springboot)<br/>configuration file: [python.d/springboot.conf](../collectors/python.d.plugin/springboot)|
---
### Provisioning Systems
-application|language|notes|
-:---------:|:------:|:----|
-puppet|python<br/>v2 or v3|Connects to multiple Puppet Server and Puppet DB instances (local or remote) to collect real-time status metrics.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [puppet.chart.py](../collectors/python.d.plugin/puppet)<br/>configuration file: [python.d/puppet.conf](../collectors/python.d.plugin/puppet)|
+| application|language|notes|
+|:---------:|:------:|:----|
+| puppet|python<br/>v2 or v3|Connects to multiple Puppet Server and Puppet DB instances (local or remote) to collect real-time status metrics.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [puppet.chart.py](../collectors/python.d.plugin/puppet)<br/>configuration file: [python.d/puppet.conf](../collectors/python.d.plugin/puppet)|
---
### Game Servers
-application|language|notes|
-:---------:|:------:|:----|
-SpigotMC|Python<br/>v2 or v3|Monitors Spigot Minecraft server ticks per second and number of online players using the Minecraft remote console.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [spigotmc.chart.py](../collectors/python.d.plugin/spigotmc)<br/>configuration file: [python.d/spigotmc.conf](../collectors/python.d.plugin/spigotmc)|
+| application|language|notes|
+|:---------:|:------:|:----|
+| SpigotMC|Python<br/>v2 or v3|Monitors Spigot Minecraft server ticks per second and number of online players using the Minecraft remote console.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [spigotmc.chart.py](../collectors/python.d.plugin/spigotmc)<br/>configuration file: [python.d/spigotmc.conf](../collectors/python.d.plugin/spigotmc)|
---
### Distributed Computing Clients
-application|language|notes|
-:---------:|:------:|:----|
-BOINC|Python<br/>v2 or v3|Monitors task states for local and remote BOINC client software using the remote GUI RPC interface. Also provides alarms for a handful of error conditions. Requires manual configuration<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [boinc.chart.py](../collectors/python.d.plugin/boinc)<br/>configuration file: [python.d/boinc.conf](../collectors/python.d.plugin/boinc)|
+| application|language|notes|
+|:---------:|:------:|:----|
+| BOINC|Python<br/>v2 or v3|Monitors task states for local and remote BOINC client software using the remote GUI RPC interface. Also provides alarms for a handful of error conditions. Requires manual configuration<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [boinc.chart.py](../collectors/python.d.plugin/boinc)<br/>configuration file: [python.d/boinc.conf](../collectors/python.d.plugin/boinc)|
---
### Skeleton Plugins
-application|language|notes|
-:---------:|:------:|:----|
-example|BASH<br/>Shell Script|Skeleton plugin in BASH.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/>&nbsp;<br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [example.chart.sh](../collectors/charts.d.plugin/example)<br/>configuration file: [charts.d/example.conf](../collectors/charts.d.plugin/example)|
-example|python<br/>v2 or v3|Skeleton plugin in Python.<br/>&nbsp;<br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [example.chart.py](../collectors/python.d.plugin/example)<br/>configuration file: [python.d/example.conf](../collectors/python.d.plugin/example)|
+| application|language|notes|
+|:---------:|:------:|:----|
+| example|BASH<br/>Shell Script|Skeleton plugin in BASH.<br/><br/>DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.<br/> <br/>Netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)<br/>plugin module: [example.chart.sh](../collectors/charts.d.plugin/example)<br/>configuration file: [charts.d/example.conf](../collectors/charts.d.plugin/example)|
+| example|python<br/>v2 or v3|Skeleton plugin in Python.<br/> <br/>Netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [example.chart.py](../collectors/python.d.plugin/example)<br/>configuration file: [python.d/example.conf](../collectors/python.d.plugin/example)|
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FAdd-more-charts-to-netdata&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FAdd-more-charts-to-netdata&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Charts.md b/docs/Charts.md
index 42ac4453..70d360a8 100644
--- a/docs/Charts.md
+++ b/docs/Charts.md
@@ -24,4 +24,4 @@ A context is a grouping of identical charts, for each instance of the hardware o
For example, let's take the `net.packets` context. You will see on the dashboard as many charts with context net.packets as you have network interfaces (families). These charts will be named `net_packets.[family]`. For the example of the two interfaces `eth0` and `eth1`, you will see charts named `net_packets.eth0` and `net_packets.eth1`. Both of these charts show the exact same dimensions, but for different instances of a network interface.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FCharts&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FCharts&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Demo-Sites.md b/docs/Demo-Sites.md
index 5a2ae534..6bb501de 100644
--- a/docs/Demo-Sites.md
+++ b/docs/Demo-Sites.md
@@ -2,19 +2,19 @@
Live demo installations of Netdata are available at **[https://www.netdata.cloud](https://www.netdata.cloud/#live-demo)**:
-Location | Netdata demo URL | 60&nbsp;mins&nbsp;reqs | VM Donated by
-:-------:|:-----------------:|:----------:|:-------------
-London (UK)|**[london.my-netdata.io](https://london.my-netdata.io)**<br/>(this is the global Netdata **registry** and has **named** and **mysql** charts)|[![Requests Per Second](https://london.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://london.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)
-Atlanta (USA)|**[cdn77.my-netdata.io](https://cdn77.my-netdata.io)**<br/>(with **named** and **mysql** charts)|[![Requests Per Second](https://cdn77.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://cdn77.my-netdata.io)|[CDN77.com](https://www.cdn77.com/)
-Israel|**[octopuscs.my-netdata.io](https://octopuscs.my-netdata.io)**|[![Requests Per Second](https://octopuscs.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://octopuscs.my-netdata.io)|[OctopusCS.com](https://www.octopuscs.com)
-Madrid (Spain)|**[stackscale.my-netdata.io](https://stackscale.my-netdata.io)**|[![Requests Per Second](https://stackscale.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://stackscale.my-netdata.io)|[StackScale Spain](https://www.stackscale.es/)
-Bangalore (India)|**[bangalore.my-netdata.io](https://bangalore.my-netdata.io)**|[![Requests Per Second](https://bangalore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://bangalore.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)
-Frankfurt (Germany)|**[frankfurt.my-netdata.io](https://frankfurt.my-netdata.io)**|[![Requests Per Second](https://frankfurt.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://frankfurt.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)
-New York (USA)|**[newyork.my-netdata.io](https://newyork.my-netdata.io)**|[![Requests Per Second](https://newyork.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://newyork.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)
-San Francisco (USA)|**[sanfrancisco.my-netdata.io](https://sanfrancisco.my-netdata.io)**|[![Requests Per Second](https://sanfrancisco.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://sanfrancisco.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)
-Singapore|**[singapore.my-netdata.io](https://singapore.my-netdata.io)**|[![Requests Per Second](https://singapore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://singapore.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)
-Toronto (Canada)|**[toronto.my-netdata.io](https://toronto.my-netdata.io)**|[![Requests Per Second](https://toronto.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://toronto.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)
+| Location|Netdata demo URL|60 mins reqs|VM Donated by|
+|:------:|:--------------:|:----------:|:------------|
+| London (UK)|**[london.my-netdata.io](https://london.my-netdata.io)**<br/>(this is the global Netdata **registry** and has **named** and **mysql** charts)|[![Requests Per Second](https://london.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://london.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)|
+| Atlanta (USA)|**[cdn77.my-netdata.io](https://cdn77.my-netdata.io)**<br/>(with **named** and **mysql** charts)|[![Requests Per Second](https://cdn77.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://cdn77.my-netdata.io)|[CDN77.com](https://www.cdn77.com/)|
+| Israel|**[octopuscs.my-netdata.io](https://octopuscs.my-netdata.io)**|[![Requests Per Second](https://octopuscs.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://octopuscs.my-netdata.io)|[OctopusCS.com](https://www.octopuscs.com)|
+| Madrid (Spain)|**[stackscale.my-netdata.io](https://stackscale.my-netdata.io)**|[![Requests Per Second](https://stackscale.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://stackscale.my-netdata.io)|[StackScale Spain](https://www.stackscale.es/)|
+| Bangalore (India)|**[bangalore.my-netdata.io](https://bangalore.my-netdata.io)**|[![Requests Per Second](https://bangalore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://bangalore.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)|
+| Frankfurt (Germany)|**[frankfurt.my-netdata.io](https://frankfurt.my-netdata.io)**|[![Requests Per Second](https://frankfurt.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://frankfurt.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)|
+| New York (USA)|**[newyork.my-netdata.io](https://newyork.my-netdata.io)**|[![Requests Per Second](https://newyork.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://newyork.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)|
+| San Francisco (USA)|**[sanfrancisco.my-netdata.io](https://sanfrancisco.my-netdata.io)**|[![Requests Per Second](https://sanfrancisco.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://sanfrancisco.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)|
+| Singapore|**[singapore.my-netdata.io](https://singapore.my-netdata.io)**|[![Requests Per Second](https://singapore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://singapore.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)|
+| Toronto (Canada)|**[toronto.my-netdata.io](https://toronto.my-netdata.io)**|[![Requests Per Second](https://toronto.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://toronto.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745)|
-*Netdata dashboards are mobile and touch friendly.*
+_Netdata dashboards are mobile and touch friendly._
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FDemo-Sites&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FDemo-Sites&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Donations-netdata-has-received.md b/docs/Donations-netdata-has-received.md
index 062cb02b..8b46980a 100644
--- a/docs/Donations-netdata-has-received.md
+++ b/docs/Donations-netdata-has-received.md
@@ -2,17 +2,17 @@
This is a list of the donations we have received for Netdata (sorted alphabetically on their name):
-what donated|related links|who donated|description of the donation
-----:|:-----:|:---:|:-----------
-Packages Distribution|-|**[PackageCloud.io](https://packagecloud.io/)**|**PackageCloud.io** donated to a free open-source subscription to their awesome Package Distribution services.
-Cross Browser Testing|-|**[BrowserStack.com](https://www.browserstack.com/)**|**BrowserStack.com** donated a free subscription to their awesome Browser Testing services (all three of them: Live, Screenshots, Responsive).
-Cloud VM|[cdn77.my-netdata.io](http://cdn77.my-netdata.io)|**[CDN77.com](https://www.cdn77.com/)**|**CDN77.com** donated a VM with 2 CPU cores, 4GB RAM and 20GB HD, on their excellent CDN network.
-Localization Management|[Netdata localization project](https://crowdin.com/project/netdata) (check issue [#279](https://github.com/netdata/netdata/issues/279))|**[Crowdin.com](https://crowdin.com/)**|**Crowdin.com** donated an open source license to their Localization Management Platform.
-Cloud VMs|[london.my-netdata.io](https://london.my-netdata.io) (Several VMs)|**[DigitalOcean.com](https://www.digitalocean.com/)**|**DigitalOcean.com** donated 1000 USD to be used in their excellent Cloud Computing services. Many thanks to [Justin Paine](https://github.com/xxdesmus) for making this happen.
-Development IDE|-|**[JetBrains.com](https://www.jetbrains.com/)**|**JetBrains.com** donated an open source license for 4 developers for 1 year, to their excellent IDEs.
-Cloud VM|[octopuscs.my-netdata.io](https://octopuscs.my-netdata.io)|**[OctopusCS.com](https://octopuscs.com/)**|**OctopusCS.com** donated a VM with 4 CPU cores, 16GB RAM and 50GB HD in their excellent Cloud Computing services.
-Cloud VM|[ventureer.my-netdata.io](https://ventureer.my-netdata.io)|**[Ventureer.com](https://ventureer.com/)**|**Ventureer.com** donated a VM with 4 CPU cores, 8GB RAM and 50GB HD in their excellent Cloud Computing services.
-Cloud VM|[stackscale.my-netdata.io](https://stackscale.my-netdata.io)|**[stackscale.com](https://www.stackscale.com/)**|**StackScale.com** donated a VM with 4 CPU cores, 16GB RAM and 100GB HD in their excellent Cloud Computing services.
+| what donated|related links|who donated|description of the donation|
+|-----------:|:-----------:|:---------:|:--------------------------|
+| Packages Distribution|-|**[PackageCloud.io](https://packagecloud.io/)**|**PackageCloud.io** donated to a free open-source subscription to their awesome Package Distribution services.|
+| Cross Browser Testing|-|**[BrowserStack.com](https://www.browserstack.com/)**|**BrowserStack.com** donated a free subscription to their awesome Browser Testing services (all three of them: Live, Screenshots, Responsive).|
+| Cloud VM|[cdn77.my-netdata.io](http://cdn77.my-netdata.io)|**[CDN77.com](https://www.cdn77.com/)**|**CDN77.com** donated a VM with 2 CPU cores, 4GB RAM and 20GB HD, on their excellent CDN network.|
+| Localization Management|[Netdata localization project](https://crowdin.com/project/netdata) (check issue [#279](https://github.com/netdata/netdata/issues/279))|**[Crowdin.com](https://crowdin.com/)**|**Crowdin.com** donated an open source license to their Localization Management Platform.|
+| Cloud VMs|[london.my-netdata.io](https://london.my-netdata.io) (Several VMs)|**[DigitalOcean.com](https://www.digitalocean.com/)**|**DigitalOcean.com** donated 1000 USD to be used in their excellent Cloud Computing services. Many thanks to [Justin Paine](https://github.com/xxdesmus) for making this happen.|
+| Development IDE|-|**[JetBrains.com](https://www.jetbrains.com/)**|**JetBrains.com** donated an open source license for 4 developers for 1 year, to their excellent IDEs.|
+| Cloud VM|[octopuscs.my-netdata.io](https://octopuscs.my-netdata.io)|**[OctopusCS.com](https://octopuscs.com/)**|**OctopusCS.com** donated a VM with 4 CPU cores, 16GB RAM and 50GB HD in their excellent Cloud Computing services.|
+| Cloud VM|[ventureer.my-netdata.io](https://ventureer.my-netdata.io)|**[Ventureer.com](https://ventureer.com/)**|**Ventureer.com** donated a VM with 4 CPU cores, 8GB RAM and 50GB HD in their excellent Cloud Computing services.|
+| Cloud VM|[stackscale.my-netdata.io](https://stackscale.my-netdata.io)|**[stackscale.com](https://www.stackscale.com/)**|**StackScale.com** donated a VM with 4 CPU cores, 16GB RAM and 100GB HD in their excellent Cloud Computing services.|
Thank you!
@@ -22,4 +22,4 @@ Thank you!
Please contact me at costa@tsaousis.gr.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FDonations-netdata-has-received&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FDonations-netdata-has-received&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/GettingStarted.md b/docs/GettingStarted.md
index 3ddf4c38..7fd7a538 100644
--- a/docs/GettingStarted.md
+++ b/docs/GettingStarted.md
@@ -32,16 +32,16 @@ If still Netdata does not receive the requests, something is blocking them. A fi
</details>&nbsp;<br/>
-When you install multiple Netdata servers, all your servers will appear at the node menu at the top left of the dashboard. For this to work, you have to manually access just once, the dashboard of each of your netdata servers.
+When you install multiple Netdata servers, all your servers will appear at the node menu at the top left of the dashboard. For this to work, you have to manually access just once, the dashboard of each of your Netdata servers.
-The node menu is more than just browser bookmarks. When switching Netdata servers from that menu, any settings of the current view are propagated to the other netdata server:
+The node menu is more than just browser bookmarks. When switching Netdata servers from that menu, any settings of the current view are propagated to the other Netdata server:
-- the current charts panning (drag the charts left or right),
-- the current charts zooming (`SHIFT` + mouse wheel over a chart),
-- the highlighted time-frame (`ALT` + select an area on a chart),
-- the scrolling position of the dashboard,
-- the theme you use,
-- etc.
+- the current charts panning (drag the charts left or right),
+- the current charts zooming (`SHIFT` + mouse wheel over a chart),
+- the highlighted time-frame (`ALT` + select an area on a chart),
+- the scrolling position of the dashboard,
+- the theme you use,
+- etc.
are all sent over to other Netdata server, to allow you troubleshoot cross-server performance issues easily.
@@ -51,9 +51,9 @@ Netdata installer integrates Netdata to your init / systemd environment.
To start/stop Netdata, depending on your environment, you should use:
-- `systemctl start netdata` and `systemctl stop netdata`
-- `service netdata start` and `service netdata stop`
-- `/etc/init.d/netdata start` and `/etc/init.d/netdata stop`
+- `systemctl start netdata` and `systemctl stop netdata`
+- `service netdata start` and `service netdata stop`
+- `/etc/init.d/netdata start` and `/etc/init.d/netdata stop`
Once Netdata is installed, the installer configures it to start at boot and stop at shutdown.
@@ -89,10 +89,10 @@ Netdata supports auto-detection of data collection sources. It auto-detects almo
This auto-detection process happens **only once**, when Netdata starts. To have Netdata re-discover data sources, you need to restart it. There are a few exceptions to this:
-- containers and VMs are auto-detected forever (when Netdata is running at the host).
-- many data sources are collected but are silenced by default, until there is useful information to collect (for example network interface dropped packet, will appear after a packet has been dropped).
-- services that are not optimal to collect on all systems, are disabled by default.
-- services we received feedback from users that caused issues when monitored, are also disabled by default (for example, `chrony` is disabled by default, because CentOS ships a version of it that uses 100% CPU when queried for statistics).
+- containers and VMs are auto-detected forever (when Netdata is running at the host).
+- many data sources are collected but are silenced by default, until there is useful information to collect (for example network interface dropped packet, will appear after a packet has been dropped).
+- services that are not optimal to collect on all systems, are disabled by default.
+- services we received feedback from users that caused issues when monitored, are also disabled by default (for example, `chrony` is disabled by default, because CentOS ships a version of it that uses 100% CPU when queried for statistics).
Once a data collection source is detected, Netdata will never quit trying to collect data from it, until Netdata is restarted. So, if you stop your web server, Netdata will pick it up automatically when it is started again.
@@ -100,15 +100,15 @@ Since Netdata is installed on all your systems (even inside containers), auto-de
A few well known data collection sources that commonly need to be configured are:
-- [systemd services utilization](../collectors/cgroups.plugin/#monitoring-systemd-services) are not exposed by default on most systems, so `systemd` has to be configured to expose those metrics.
+- [systemd services utilization](../collectors/cgroups.plugin/#monitoring-systemd-services) are not exposed by default on most systems, so `systemd` has to be configured to expose those metrics.
## Configuration quick start
In Netdata we have:
-- **internal** data collection plugins (running inside the Netdata daemon)
-- **external** data collection plugins (independent processes, sending data to Netdata over pipes)
-- modular plugin **orchestrators** (external plugins that have multiple data collection modules)
+- **internal** data collection plugins (running inside the Netdata daemon)
+- **external** data collection plugins (independent processes, sending data to Netdata over pipes)
+- modular plugin **orchestrators** (external plugins that have multiple data collection modules)
You can enable and disable plugins (internal and external) via `netdata.conf` at the section `[plugins]`.
@@ -174,9 +174,9 @@ and set `SEND_EMAIL="NO"`.
## What is next?
-- Check [Data Collection](../collectors) for configuring data collection plugins.
-- Check [Health Monitoring](../health) for configuring your own alarms, or setting up alarm notifications.
-- Check [Streaming](../streaming) for centralizing Netdata metrics.
-- Check [Backends](../backends) for long term archiving of Netdata metrics to time-series databases.
+- Check [Data Collection](../collectors) for configuring data collection plugins.
+- Check [Health Monitoring](../health) for configuring your own alarms, or setting up alarm notifications.
+- Check [Streaming](../streaming) for centralizing Netdata metrics.
+- Check [Backends](../backends) for long term archiving of Netdata metrics to time-series databases.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FGettingStarted&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FGettingStarted&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Performance.md b/docs/Performance.md
index fbc6d576..8205c70e 100644
--- a/docs/Performance.md
+++ b/docs/Performance.md
@@ -3,19 +3,21 @@
Netdata performance is affected by:
**Data collection**
-- the number of charts for which data are collected
-- the number of plugins running
-- the technology of the plugins (i.e. BASH plugins are slower than binary plugins)
-- the frequency of data collection
+
+- the number of charts for which data are collected
+- the number of plugins running
+- the technology of the plugins (i.e. BASH plugins are slower than binary plugins)
+- the frequency of data collection
You can control all the above.
**Web clients accessing the data**
-- the duration of the charts in the dashboard
-- the number of charts refreshes requested
-- the compression level of the web responses
----
+- the duration of the charts in the dashboard
+- the number of charts refreshes requested
+- the compression level of the web responses
+
+- - -
## Netdata Daemon
@@ -24,9 +26,10 @@ For most server systems, with a few hundred charts and a few thousand dimensions
To prove Netdata scalability, check issue [#1323](https://github.com/netdata/netdata/issues/1323#issuecomment-265501668) where Netdata collects 95.000 metrics per second, with 12% CPU utilization of a single core!
In embedded systems, if the Netdata daemon is using a lot of CPU without any web clients accessing it, you should lower the data collection frequency. To set the data collection frequency, edit `/etc/netdata/netdata.conf` and set `update_every` to a higher number (this is the frequency in seconds data are collected for all charts: higher number of seconds = lower frequency, the default is 1 for per second data collection). You can also set this frequency per module or chart. Check the [daemon configuration](../daemon/config) for plugins and charts. For specific modules, the configuration needs to be changed in:
-- `python.d.conf` for [python](../collectors/python.d.plugin/#pythondplugin)
-- `node.d.conf` for [nodejs](../collectors/node.d.plugin/#nodedplugin)
-- `charts.d.conf` for [bash](../collectors/charts.d.plugin/#chartsdplugin)
+
+- `python.d.conf` for [python](../collectors/python.d.plugin/#pythondplugin)
+- `node.d.conf` for [nodejs](../collectors/node.d.plugin/#nodedplugin)
+- `charts.d.conf` for [bash](../collectors/charts.d.plugin/#chartsdplugin)
## Plugins
@@ -42,7 +45,6 @@ Netdata runs with the lowest possible process priority, so even if 1000 users ar
To lower the CPU utilization of Netdata when clients are accessing the dashboard, set `web compression level = 1`, or disable web compression completely by setting `enable web responses gzip compression = no`. Both settings are in the `[web]` section.
-
## Monitoring a heavy loaded system
Netdata, while running, does not depend on disk I/O (apart its log files and `access.log` is written with buffering enabled and can be disabled). Some plugins that need disk may stop and show gaps during heavy system load, but the Netdata daemon itself should be able to work and collect values from `/proc` and `/sys` and serve web clients accessing it.
@@ -119,17 +121,17 @@ Edit `/etc/netdata/netdata.conf`, find the `[plugins]` section:
In detail:
-plugin|description
-:---:|:---------
-`proc`|the internal plugin used to monitor the system. Normally, you don't want to disable this. You can disable individual functions of it at the next section.
-`tc`|monitoring network interfaces QoS (tc classes)
-`idlejitter`|internal plugin (written in C) that attempts show if the systems starved for CPU. Disabling it will eliminate a thread.
-`cgroups`|monitoring linux containers. Most probably you are not going to need it. This will also eliminate another thread.
-`checks`|a debugging plugin, which is disabled by default.
-`apps`|a plugin that monitors system processes. It is very complex and heavy (consumes twice the CPU resources of the Netdata daemon), so if you don't need to monitor the process tree, you can disable it.
-`charts.d`|BASH plugins (squid, nginx, mysql, etc). This is a heavy plugin, that consumes twice the CPU resources of the Netdata daemon.
-`node.d`|node.js plugin, currently used for SNMP data collection and monitoring named (the name server).
-`python.d`|has many modules and can use over 20MB of memory.
+| plugin|description|
+|:----:|:----------|
+| `proc`|the internal plugin used to monitor the system. Normally, you don't want to disable this. You can disable individual functions of it at the next section.|
+| `tc`|monitoring network interfaces QoS (tc classes)|
+| `idlejitter`|internal plugin (written in C) that attempts show if the systems starved for CPU. Disabling it will eliminate a thread.|
+| `cgroups`|monitoring linux containers. Most probably you are not going to need it. This will also eliminate another thread.|
+| `checks`|a debugging plugin, which is disabled by default.|
+| `apps`|a plugin that monitors system processes. It is very complex and heavy (consumes twice the CPU resources of the Netdata daemon), so if you don't need to monitor the process tree, you can disable it.|
+| `charts.d`|BASH plugins (squid, nginx, mysql, etc). This is a heavy plugin, that consumes twice the CPU resources of the Netdata daemon.|
+| `node.d`|node.js plugin, currently used for SNMP data collection and monitoring named (the name server).|
+| `python.d`|has many modules and can use over 20MB of memory.|
For most IoT devices, you can disable all plugins except `proc`. For `proc` there is another section that controls which functions of it you need. Check the next section.
@@ -170,6 +172,7 @@ Normally, you will not need them. To disable them, set:
error log = none
access log = none
```
+
### 5. Set memory mode to RAM
Setting the memory mode to `ram` will disable loading and saving the round robin database. This will not affect anything while running Netdata, but it might be required if you have very limited storage available.
@@ -192,7 +195,6 @@ The units for history is `[global].update every` seconds. So if `[global].update
Check also [Database](../database) for directions on calculating the size of the round robin database.
-
### 7. Disable gzip compression of responses
Gzip compression of the web responses is using more CPU that the rest of Netdata. You can lower the compression level or disable gzip compression completely. You can disable it, like this:
@@ -217,4 +219,4 @@ Finally, if no web server is installed on your device, you can use port tcp/80 f
port = 80
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FPerformance&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FPerformance&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/README.md b/docs/README.md
index 8dd0c7a6..752802f6 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,7 +1,7 @@
-# Read documentation on https://docs.netdata.cloud
+# Read documentation on <https://docs.netdata.cloud>
Welcome to the Netdata documentation! While you can read Netdata documentation here, or throughout the Netdata repository, our intention is that these pages are read on [docs.netdata.cloud](https://docs.netdata.cloud).
Links between documentation pages will work fine here, but the formatting may not be perfect, as our documentation site uses a few extra Markdown features that GitHub doesn't support natively. Other things might be missing or look less than perfect.
-Now get out there and build an exceptional infrastructure. \ No newline at end of file
+Now get out there and build an exceptional infrastructure.
diff --git a/docs/Running-behind-apache.md b/docs/Running-behind-apache.md
index c4def5f6..6c5ab677 100644
--- a/docs/Running-behind-apache.md
+++ b/docs/Running-behind-apache.md
@@ -2,11 +2,10 @@
Below you can find instructions for configuring an apache server to:
-1. proxy a single Netdata via an HTTP and HTTPS virtual host
-2. dynamically proxy any number of Netdata servers
-3. add user authentication
-4. adjust Netdata settings to get optimal results
-
+1. proxy a single Netdata via an HTTP and HTTPS virtual host
+2. dynamically proxy any number of Netdata servers
+3. add user authentication
+4. adjust Netdata settings to get optimal results
## Requirements
@@ -20,14 +19,14 @@ sudo apt-get install apache2-bin
Also make sure they are enabled:
-```
+```sh
sudo a2enmod proxy
sudo a2enmod proxy_http
```
Ensure your rewrite module is enabled:
-```
+```sh
sudo a2enmod rewrite
```
@@ -41,7 +40,7 @@ On any **existing** and already **working** apache virtual host, you can redirec
Add the following on top of any existing virtual host. It will allow you to access Netdata as `http://virtual.host/netdata/`.
-```
+```conf
<VirtualHost *:80>
RewriteEngine On
@@ -71,7 +70,7 @@ Add the following on top of any existing virtual host. It will allow you to acce
Add the following on top of any existing virtual host. It will allow you to access multiple Netdata as `http://virtual.host/netdata/HOSTNAME/`, where `HOSTNAME` is the hostname of any other Netdata server you have (to access the `localhost` Netdata, use `http://virtual.host/netdata/localhost/`).
-```
+```conf
<VirtualHost *:80>
RewriteEngine On
@@ -117,7 +116,7 @@ nano /etc/apache2/sites-available/netdata.conf
with this content:
-```
+```conf
<VirtualHost *:80>
RewriteEngine On
ProxyRequests Off
@@ -144,19 +143,20 @@ sudo a2ensite netdata.conf && service apache2 reload
```
## Netdata proxy in Plesk
+
_Assuming the main goal is to make Netdata running in HTTPS._
-1. Make a subdomain for Netdata on which you enable and force HTTPS - You can use a free Let's Encrypt certificate
-2. Go to "Apache & nginx Settings", and in the following section, add:
+1. Make a subdomain for Netdata on which you enable and force HTTPS - You can use a free Let's Encrypt certificate
+2. Go to "Apache & nginx Settings", and in the following section, add:
-```
+```conf
RewriteEngine on
RewriteRule (.*) http://localhost:19999/$1 [P,L]
```
-3. Optional: If your server is remote, then just replace "localhost" with your actual hostname or IP, it just works.
-Repeat the operation for as many servers as you need.
+3. Optional: If your server is remote, then just replace "localhost" with your actual hostname or IP, it just works.
+Repeat the operation for as many servers as you need.
## Enable Basic Auth
@@ -166,10 +166,10 @@ Install the package `apache2-utils`. On debian / ubuntu run `sudo apt-get instal
Then, generate password for user `netdata`, using `htpasswd -c /etc/apache2/.htpasswd netdata`
-**Apache 2.2 Example:**
+**Apache 2.2 Example:**\
Modify the virtual host with these:
-```
+```conf
# replace the <Proxy *> section
<Proxy *>
Order deny,allow
@@ -189,11 +189,9 @@ Modify the virtual host with these:
Specify `Location /` if Netdata is running on dedicated virtual host.
-
-
**Apache 2.4 (dedicated virtual host) Example:**
-```
+```conf
<VirtualHost *:80>
RewriteEngine On
ProxyRequests Off
@@ -219,6 +217,16 @@ Specify `Location /` if Netdata is running on dedicated virtual host.
Note: Changes are applied by reloading or restarting Apache.
+## Configuration of Content Security Policy
+
+If you want to enable CSP within your Apache, you should consider some special requirements of the headers. Modify your configuration like that:
+
+```
+ Header always set Content-Security-Policy "default-src http: 'unsafe-inline' 'self' 'unsafe-eval'; script-src http: 'unsafe-inline' 'self' 'unsafe-eval'; style-src http: 'self' 'unsafe-inline'"
+```
+
+Note: Changes are applied by reloading or restarting Apache.
+
# Netdata configuration
You might edit `/etc/netdata/netdata.conf` to optimize your setup a bit. For applying these changes you need to restart Netdata.
@@ -242,12 +250,16 @@ You would also need to instruct Netdata to listen only on `localhost`, `127.0.0.
[web]
bind to = localhost
```
+
or
+
```
[web]
bind to = 127.0.0.1
```
+
or
+
```
[web]
bind to = ::1
@@ -286,7 +298,8 @@ If your apache server is not on localhost, you can set:
bind to = *
allow connections from = IP_OF_APACHE_SERVER
```
-_note: Netdata v1.9+ support `allow connections from`_
+
+*note: Netdata v1.9+ support `allow connections from`*
`allow connections from` accepts [Netdata simple patterns](../libnetdata/simple_pattern/) to match against the connection IP address.
@@ -303,7 +316,7 @@ apache logs accesses and Netdata logs them too. You can prevent Netdata from gen
Make sure the requests reach Netdata, by examing `/var/log/netdata/access.log`.
-1. if the requests do not reach Netdata, your apache does not forward them.
-2. if the requests reach Netdata but the URLs are wrong, you have not re-written them properly.
+1. if the requests do not reach Netdata, your apache does not forward them.
+2. if the requests reach Netdata but the URLs are wrong, you have not re-written them properly.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-apache&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-apache&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Running-behind-caddy.md b/docs/Running-behind-caddy.md
index 4e530e94..866d488d 100644
--- a/docs/Running-behind-caddy.md
+++ b/docs/Running-behind-caddy.md
@@ -2,7 +2,7 @@
To run Netdata via [Caddy's proxying,](https://caddyserver.com/docs/proxy) set your Caddyfile up like this:
-```
+```caddyfile
netdata.domain.tld {
proxy / localhost:19999
}
@@ -12,7 +12,7 @@ Other directives can be added between the curly brackets as needed.
To run Netdata in a subfolder:
-```
+```caddyfile
netdata.domain.tld {
proxy /netdata/ localhost:19999 {
without /netdata
@@ -26,4 +26,4 @@ You would also need to instruct Netdata to listen only to `127.0.0.1` or `::1`.
To limit access to Netdata only from localhost, set `bind socket to IP = 127.0.0.1` or `bind socket to IP = ::1` in `/etc/netdata/netdata.conf`.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-caddy&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-caddy&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Running-behind-haproxy.md b/docs/Running-behind-haproxy.md
index 2c1835f5..cf95a491 100644
--- a/docs/Running-behind-haproxy.md
+++ b/docs/Running-behind-haproxy.md
@@ -2,7 +2,7 @@
> HAProxy is a free, very fast and reliable solution offering high availability, load balancing, and proxying for TCP and HTTP-based applications. It is particularly suited for very high traffic web sites and powers quite a number of the world's most visited ones.
-If Netdata is running on a host running HAProxy, rather than connecting to Netdata from a port number, a domain name can be pointed at HAProxy, and HAProxy can redirect connections to the Netdata port. This can make it possible to connect to Netdata at https://example.com or https://example.com/netdata/, which is a much nicer experience then http://example.com:19999.
+If Netdata is running on a host running HAProxy, rather than connecting to Netdata from a port number, a domain name can be pointed at HAProxy, and HAProxy can redirect connections to the Netdata port. This can make it possible to connect to Netdata at <https://example.com> or <https://example.com/netdata/>, which is a much nicer experience then <http://example.com:19999>.
To proxy requests from [HAProxy](https://github.com/haproxy/haproxy) to Netdata, the following configuration can be used:
@@ -17,7 +17,7 @@ defaults
## Simple Configuration
-A simple example where the base URL, say http://example.com, is used with no subpath:
+A simple example where the base URL, say <http://example.com>, is used with no subpath:
### Frontend
@@ -277,4 +277,4 @@ backend netdata_backend
http-request set-header Connection "keep-alive"
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-haproxy&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-haproxy&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Running-behind-lighttpd.md b/docs/Running-behind-lighttpd.md
index 8e43a038..8f05973b 100644
--- a/docs/Running-behind-lighttpd.md
+++ b/docs/Running-behind-lighttpd.md
@@ -26,11 +26,14 @@ $SERVER["socket"] == ":19998" {
If the only thing the server is exposing via the web is Netdata (and thus no suburl rewriting required),
then you can get away with just
+
```
proxy.server = ( "" => ( ( "host" => "127.0.0.1", "port" => 19999 )))
```
+
Though if it's public facing you might then want to put some authentication on it. htdigest support
looks like:
+
```
auth.backend = "htdigest"
auth.backend.htdigest.userfile = "/etc/lighttpd/lighttpd.htdigest"
@@ -40,6 +43,7 @@ auth.require = ( "" => ( "method" => "digest",
)
)
```
+
other auth methods, and more info on htdigest, can be found in lighttpd's [mod_auth docs](http://redmine.lighttpd.net/projects/lighttpd/wiki/Docs_ModAuth).
---
@@ -47,7 +51,7 @@ other auth methods, and more info on htdigest, can be found in lighttpd's [mod_a
It seems that lighttpd (or some versions of it), fail to proxy compressed web responses.
To solve this issue, disable web response compression in Netdata.
-Open /etc/netdata/netdata.conf and set in [global]:
+Open `/etc/netdata/netdata.conf` and set in [global]\:
```
enable web responses gzip compression = no
@@ -59,4 +63,4 @@ You would also need to instruct Netdata to listen only to `127.0.0.1` or `::1`.
To limit access to Netdata only from localhost, set `bind socket to IP = 127.0.0.1` or `bind socket to IP = ::1` in `/etc/netdata/netdata.conf`.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-lighttpd&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-lighttpd&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Running-behind-nginx.md b/docs/Running-behind-nginx.md
index 81ebc1a7..cad41626 100644
--- a/docs/Running-behind-nginx.md
+++ b/docs/Running-behind-nginx.md
@@ -8,13 +8,13 @@ The software is known for its low impact on memory resources, high scalability,
## Why Nginx
-- By default, Nginx is fast and lightweight out of the box.
+- By default, Nginx is fast and lightweight out of the box.
-- Nginx is used and useful in cases when you want to access different instances of Netdata from a single server.
+- Nginx is used and useful in cases when you want to access different instances of Netdata from a single server.
-- Password-protect access to Netdata, until distributed authentication is implemented via the Netdata cloud Sign In mechanism.
+- Password-protect access to Netdata, until distributed authentication is implemented via the Netdata cloud Sign In mechanism.
-- A proxy was necessary to encrypt the communication to netdata, until v1.16.0, which provided TLS (HTTPS) support.
+- A proxy was necessary to encrypt the communication to Netdata, until v1.16.0, which provided TLS (HTTPS) support.
## Nginx configuration file
@@ -28,9 +28,9 @@ You can edit the Nginx configuration file with Nano, Vim or any other text edito
After making changes to the configuration files:
-- Test Nginx configuration with `nginx -t`.
+- Test Nginx configuration with `nginx -t`.
-- Restart Nginx to effect the change with `/etc/init.d/nginx restart` or `service nginx restart`.
+- Restart Nginx to effect the change with `/etc/init.d/nginx restart` or `service nginx restart`.
## Ways to access Netdata via Nginx
@@ -38,7 +38,7 @@ After making changes to the configuration files:
With this method instead of `SERVER_IP_ADDRESS:19999`, the Netdata dashboard can be accessed via a human-readable URL such as `netdata.example.com` used in the configuration below.
-```
+```conf
upstream backend {
# the Netdata server
server 127.0.0.1:19999;
@@ -64,12 +64,13 @@ server {
}
}
```
+
### As a subfolder to an existing virtual host
This method is recommended when Netdata is to be served from a subfolder (or directory).
In this case, the virtual host `netdata.example.com` already exists and Netdata has to be accessed via `netdata.example.com/netdata/`.
-```
+```conf
upstream netdata {
server 127.0.0.1:19999;
keepalive 64;
@@ -109,7 +110,7 @@ server {
This is the recommended configuration when one Nginx will be used to manage multiple Netdata servers via subfolders.
-```
+```conf
upstream backend-server1 {
server 10.1.1.103:19999;
keepalive 64;
@@ -152,14 +153,14 @@ Of course you can add as many backend servers as you like.
Using the above, you access Netdata on the backend servers, like this:
-- `http://netdata.example.com/netdata/server1/` to reach `backend-server1`
-- `http://netdata.example.com/netdata/server2/` to reach `backend-server2`
+- `http://netdata.example.com/netdata/server1/` to reach `backend-server1`
+- `http://netdata.example.com/netdata/server2/` to reach `backend-server2`
### Encrypt the communication between Nginx and Netdata
In case Netdata's web server has been [configured to use TLS](../web/server/#enabling-tls-support), it is necessary to specify inside the Nginx configuration that the final destination is using TLS. To do this, please, append the following parameters in your `nginx.conf`
-```
+```conf
proxy_set_header X-Forwarded-Proto https;
proxy_pass https://localhost:19999;
```
@@ -174,13 +175,13 @@ Create an authentication file to enable basic authentication via Nginx, this sec
If you don't have an authentication file, you can use the following command:
-```
+```sh
printf "yourusername:$(openssl passwd -apr1)" > /etc/nginx/passwords
```
And then enable the authentication inside your server directive:
-```
+```conf
server {
# ...
auth_basic "Protected";
@@ -206,11 +207,12 @@ You can also use a unix domain socket. This will also provide a faster route bet
[web]
bind to = unix:/tmp/netdata.sock
```
-_note: Netdata v1.8+ support unix domain sockets_
+
+*note: Netdata v1.8+ support unix domain sockets*
At the Nginx side, use something like this to use the same unix domain socket:
-```
+```conf
upstream backend {
server unix:/tmp/netdata.sock;
keepalive 64;
@@ -227,7 +229,7 @@ If your Nginx server is not on localhost, you can set:
allow connections from = IP_OF_NGINX_SERVER
```
-_note: Netdata v1.9+ support `allow connections from`_
+*note: Netdata v1.9+ support `allow connections from`*
`allow connections from` accepts [Netdata simple patterns](../libnetdata/simple_pattern/) to match against the connection IP address.
@@ -251,5 +253,4 @@ If you get an 502 Bad Gateway error you might check your Nginx error log:
If you see something like the above, chances are high that SELinux prevents nginx from connecting to the backend server. To fix that, just use this policy: `setsebool -P httpd_can_network_connect true`.
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-nginx&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() \ No newline at end of file
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-nginx&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/Third-Party-Plugins.md b/docs/Third-Party-Plugins.md
index 8d227203..1b7344b1 100644
--- a/docs/Third-Party-Plugins.md
+++ b/docs/Third-Party-Plugins.md
@@ -28,4 +28,4 @@ Collect [number of currently logged-on users](https://github.com/veksh/netdata-n
There is an unofficial [nim plugin helper](https://github.com/FedericoCeratto/nim-netdata-plugin)
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FThird-Party-Plugins&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FThird-Party-Plugins&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/a-github-star-is-important.md b/docs/a-github-star-is-important.md
index cac01f3e..6bac3ace 100644
--- a/docs/a-github-star-is-important.md
+++ b/docs/a-github-star-is-important.md
@@ -12,4 +12,4 @@ Thank you!
Costa Tsaousis
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fa-github-star-is-important&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fa-github-star-is-important&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/anonymous-statistics.md b/docs/anonymous-statistics.md
index 376a2c4a..7f175a1c 100644
--- a/docs/anonymous-statistics.md
+++ b/docs/anonymous-statistics.md
@@ -3,13 +3,14 @@
From Netdata v1.12 and above, anonymous usage information is collected by default and sent to Google Analytics.
The statistics calculated from this information will be used for:
-1. **Quality assurance**, to help us understand if Netdata behaves as expected and help us identify repeating issues for certain distributions or environment.
+1. **Quality assurance**, to help us understand if Netdata behaves as expected and help us identify repeating issues for certain distributions or environment.
-2. **Usage statistics**, to help us focus on the parts of Netdata that are used the most, or help us identify the extend our development decisions influence the community.
+2. **Usage statistics**, to help us focus on the parts of Netdata that are used the most, or help us identify the extend our development decisions influence the community.
Information is sent to Netdata via two different channels:
-- Google Tag Manager is used when an agent's dashboard is accessed.
-- The script `anonymous-statistics.sh` is executed by the Netdata daemon, when Netdata starts, stops cleanly, or fails.
+
+- Google Tag Manager is used when an agent's dashboard is accessed.
+- The script `anonymous-statistics.sh` is executed by the Netdata daemon, when Netdata starts, stops cleanly, or fails.
Both methods are controlled via the same [opt-out mechanism](#opt-out)
@@ -21,21 +22,21 @@ We have configured GTM to trigger the tag only when the variable `anonymous_stat
To ensure anonymity of the stored information, we have configured GTM's GA variable "Fields to set" as follows:
-|Field Name|Value
-|---|---
-|page|netdata-dashboard
-|hostname|dashboard.my-netdata.io
-|anonymizeIp|true
-|title|netdata dashboard
-|campaignSource|{{machine_guid}}
-|campaignMedium|web
-|referrer|http://dashboard.my-netdata.io
-|Page URL|http://dashboard.my-netdata.io/netdata-dashboard
-|Page Hostname|http://dashboard.my-netdata.io
-|Page Path|/netdata-dashboard
-|location|http://dashboard.my-netdata.io
-
-In addition, the netdata-generated unique machine guid is sent to GA via a custom dimension.
+| Field Name|Value|
+|----------|-----|
+| page|netdata-dashboard|
+| hostname|dashboard.my-netdata.io|
+| anonymizeIp|true|
+| title|Netdata dashboard|
+| campaignSource|{{machine_guid}}|
+| campaignMedium|web|
+| referrer|<http://dashboard.my-netdata.io>|
+| Page URL|<http://dashboard.my-netdata.io/netdata-dashboard>|
+| Page Hostname|<http://dashboard.my-netdata.io>|
+| Page Path|/netdata-dashboard|
+| location|<http://dashboard.my-netdata.io>|
+
+In addition, the Netdata-generated unique machine guid is sent to GA via a custom dimension.
You can verify the effect of these settings by examining the GA `collect` request parameters.
The only thing that's impossible for us to prevent from being **sent** is the URL in the "Referrer" Header of the browser request to GA. However, the settings above ensure that all **stored** URLs and host names are anonymized.
@@ -43,21 +44,23 @@ The only thing that's impossible for us to prevent from being **sent** is the UR
## Anonymous Statistics Script
Every time the daemon is started or stopped and every time a fatal condition is encountered, Netdata uses the anonymous statistics script to collect system information and send it to GA via an http call. The information collected for all events is:
- - Netdata version
- - OS name, version, id, id_like
- - Kernel name, version, architecture
- - Virtualization technology
- - Containerization technology
+
+- Netdata version
+- OS name, version, id, id_like
+- Kernel name, version, architecture
+- Virtualization technology
+- Containerization technology
Furthermore, the FATAL event sends the Netdata process & thread name, along with the source code function, source code filename and source code line number of the fatal error.
-
+
To see exactly what and how is collected, you can review the script template `daemon/anonymous-statistics.sh.in`. The template is converted to a bash script called `anonymous-statistics.sh`, installed under the Netdata `plugins directory`, which is usually `/usr/libexec/netdata/plugins.d`.
## Opt-Out
To opt-out from sending anonymous statistics, you can create a file called `.opt-out-from-anonymous-statistics` under the user configuration directory (usually `/etc/netdata`). The effect of creating the file is the following:
- - The daemon will never execute the anonymous statistics script
- - The anonymous statistics script will exit immediately if called via any other way (e.g. shell)
- - The Google Tag Manager Javascript snippet will remain in the page, but the linked tag will not be fired. The effect is that no data will ever be sent to GA.
+
+- The daemon will never execute the anonymous statistics script
+- The anonymous statistics script will exit immediately if called via any other way (e.g. shell)
+- The Google Tag Manager Javascript snippet will remain in the page, but the linked tag will not be fired. The effect is that no data will ever be sent to GA.
You can also disable telemetry by passing the option `--disable-telemetry` to any of the installers.
diff --git a/docs/configuration-guide.md b/docs/configuration-guide.md
index 1c79e027..c1334774 100644
--- a/docs/configuration-guide.md
+++ b/docs/configuration-guide.md
@@ -7,27 +7,26 @@ Depending on your installation method, Netdata will have been installed either d
Under that directory you will see the following:
-- `netdata.conf` is [the main configuration file](../daemon/config/#daemon-configuration)
-- `edit-config` is an sh script that you can use to easily and safely edit the configuration. Just run it to see its usage.
-- Other directories, initially empty, where your custom configurations for alarms and collector plugins/modules will be copied from the stock configuration, if and when you customize them using `edit-config`.
-- `orig` is a symbolic link to the directory `/usr/lib/netdata/conf.d`, which contains the stock configurations for everything not included in `netdata.conf`:
- - `health_alarm_notify.conf` is where you configure how and to who Netdata will send [alarm notifications](../health/notifications/#netdata-alarm-notifications).
- - `health.d` is the directory that contains the alarm triggers for [health monitoring](../health/#health-monitoring). It contains one .conf file per collector.
- - The [modular plugin orchestrators](../collectors/plugins.d/#external-plugins-overview) have:
- - One config file each, mainly to turn their modules on and off: `python.d.conf` for [python](../collectors/python.d.plugin/#pythondplugin), `node.d.conf` for [nodejs](../collectors/node.d.plugin/#nodedplugin) and `charts.d.conf` for [bash](../collectors/charts.d.plugin/#chartsdplugin) modules.
- - One directory each, where the module-specific configuration files can be found.
- - `stream.conf` is where you configure [streaming and replication](../streaming/#streaming-and-replication)
- - `stats.d` is a directory under which you can add .conf files to add [synthetic charts](../collectors/statsd.plugin/#synthetic-statsd-charts).
- - Individual collector plugin config files, such as `fping.conf` for the [fping plugin](../collectors/fping.plugin/) and `apps_groups.conf` for the [apps plugin](../collectors/apps.plugin/)
+- `netdata.conf` is [the main configuration file](../daemon/config/#daemon-configuration)
+- `edit-config` is an sh script that you can use to easily and safely edit the configuration. Just run it to see its usage.
+- Other directories, initially empty, where your custom configurations for alarms and collector plugins/modules will be copied from the stock configuration, if and when you customize them using `edit-config`.
+- `orig` is a symbolic link to the directory `/usr/lib/netdata/conf.d`, which contains the stock configurations for everything not included in `netdata.conf`:
+ - `health_alarm_notify.conf` is where you configure how and to who Netdata will send [alarm notifications](../health/notifications/#netdata-alarm-notifications).
+ - `health.d` is the directory that contains the alarm triggers for [health monitoring](../health/#health-monitoring). It contains one .conf file per collector.
+ - The [modular plugin orchestrators](../collectors/plugins.d/#external-plugins-overview) have:
+ - One config file each, mainly to turn their modules on and off: `python.d.conf` for [python](../collectors/python.d.plugin/#pythondplugin), `node.d.conf` for [nodejs](../collectors/node.d.plugin/#nodedplugin) and `charts.d.conf` for [bash](../collectors/charts.d.plugin/#chartsdplugin) modules.
+ - One directory each, where the module-specific configuration files can be found.
+ - `stream.conf` is where you configure [streaming and replication](../streaming/#streaming-and-replication)
+ - `stats.d` is a directory under which you can add .conf files to add [synthetic charts](../collectors/statsd.plugin/#synthetic-statsd-charts).
+ - Individual collector plugin config files, such as `fping.conf` for the [fping plugin](../collectors/fping.plugin/) and `apps_groups.conf` for the [apps plugin](../collectors/apps.plugin/)
So there are many configuration files to control every aspect of Netdata's behavior. It can be overwhelming at first, but you won't have to deal with any of them, unless you have specific things you need to change. The following HOWTO will guide you on how to customize your Netdata, based on what you want to do.
-
## How to
### Persist my configuration
-In http://localhost:19999/netdata.conf, you will see the following two parameters:
+In <http://localhost:19999/netdata.conf>, you will see the following two parameters:
```bash
# config directory = /etc/netdata
@@ -40,26 +39,27 @@ To persist your configurations, don't edit the files under the `stock config dir
##### Increase the metrics retention period
-Increase `history` in [netdata.conf [global]](../daemon/config/#global-section-options). Just ensure you understand [how much memory will be required](../database/)
+Increase `history` in [netdata.conf \[global\]](../daemon/config/#global-section-options). Just ensure you understand [how much memory will be required](../database/)
##### Reduce the data collection frequency
-Increase `update every` in [netdata.conf [global]](../daemon/config/#global-section-options). This is another way to increase your metrics retention period, but at a lower resolution than the default 1s.
+Increase `update every` in [netdata.conf \[global\]](../daemon/config/#global-section-options). This is another way to increase your metrics retention period, but at a lower resolution than the default 1s.
##### Modify how a chart is displayed
-In `netdata.conf` under `# Per chart configuration` you will find several [[CHART_NAME] sections](../daemon/config/#per-chart-configuration), where you can control all aspects of a specific chart.
+In `netdata.conf` under `# Per chart configuration` you will find several [\[CHART_NAME\] sections](../daemon/config/#per-chart-configuration), where you can control all aspects of a specific chart.
##### Disable a collector
-Entire plugins can be turned off from the [netdata.conf [plugins]](../daemon/config/#plugins-section-options) section. To disable specific modules of a plugin orchestrator, you need to edit one of the following:
-- `python.d.conf` for [python](../collectors/python.d.plugin/#pythondplugin)
-- `node.d.conf` for [nodejs](../collectors/node.d.plugin/#nodedplugin)
-- `charts.d.conf` for [bash](../collectors/charts.d.plugin/#chartsdplugin)
+Entire plugins can be turned off from the [netdata.conf \[plugins\]](../daemon/config/#plugins-section-options) section. To disable specific modules of a plugin orchestrator, you need to edit one of the following:
+
+- `python.d.conf` for [python](../collectors/python.d.plugin/#pythondplugin)
+- `node.d.conf` for [nodejs](../collectors/node.d.plugin/#nodedplugin)
+- `charts.d.conf` for [bash](../collectors/charts.d.plugin/#chartsdplugin)
##### Show charts with zero metrics
-By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
+By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
### Modify alarms and notifications
@@ -69,7 +69,7 @@ You can add a new alarm definition either by editing an existing stock alarm con
##### Turn off all alarms and notifications
-Just set `enabled = no` in the [netdata.conf [health]](../daemon/config/#health-section-options) section
+Just set `enabled = no` in the [netdata.conf \[health\]](../daemon/config/#health-section-options) section
##### Modify or disable a specific alarm
@@ -88,15 +88,15 @@ You only need to configure `health_alarm_notify.conf`. To learn how to do it, re
##### Change the Netdata web server access lists
-You have several options under the [netdata.conf [web]](../web/server/#access-lists) section.
+You have several options under the [netdata.conf \[web\]](../web/server/#access-lists) section.
##### Stop sending info to registry.my-netdata.io
-You will need to configure the [registry] section in netdata.conf. First read the [registry documentation](../registry/). In it, are instructions on how to [run your own registry](../registry/#run-your-own-registry).
+You will need to configure the [registry] section in `netdata.conf`. First read the [registry documentation](../registry/). In it, are instructions on how to [run your own registry](../registry/#run-your-own-registry).
##### Change the IP address/port Netdata listens to
-The settings are under netdata.conf [web]. Look at the [web server documentation](../web/server/#binding-netdata-to-multiple-ports) for more info.
+The settings are under `netdata.conf` [web]. Look at the [web server documentation](../web/server/#binding-netdata-to-multiple-ports) for more info.
### System resource usage
@@ -106,18 +106,17 @@ The page on [Netdata performance](Performance.md) has an excellent guide on how
##### Change when Netdata saves metrics to disk
-[netdata.conf [global]](../daemon/config/#global-section-options) : `memory mode`</details>
+[netdata.conf \[global\]](../daemon/config/#global-section-options) : `memory mode`</details>
##### Prevent Netdata from getting immediately killed when my server runs out of memory
-You can change the Netdata [OOM score](../daemon/#oom-score) in netdata.conf [global].
+You can change the Netdata [OOM score](../daemon/#oom-score) in `netdata.conf` [global].
### Other
##### Move Netdata directories
-The various directory paths are in [netdata.conf [global]](../daemon/config/#global-section-options).
-
+The various directory paths are in [netdata.conf \[global\]](../daemon/config/#global-section-options).
## How Netdata configuration works
@@ -135,4 +134,4 @@ Unix prefers regular expressions. But they are just too hard, too cryptic to use
So, Netdata supports [simple patterns](../libnetdata/simple_pattern/).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fconfiguration-guide&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fconfiguration-guide&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/contributing/contributing-documentation.md b/docs/contributing/contributing-documentation.md
new file mode 100644
index 00000000..7cf0d820
--- /dev/null
+++ b/docs/contributing/contributing-documentation.md
@@ -0,0 +1,137 @@
+# Contributing to documentation
+
+We welcome contributions to Netdata's already extensive documentation, which we host at [docs.netdata.cloud](https://docs.netdata.cloud/) and store inside of the [main repository](https://github.com/netdata/netdata) on GitHub.
+
+Like all contributing to all other aspects of Netdata, we ask that anyone who wants to help with documentation read and abide by the [Contributor Convenant Code of Conduct](https://docs.netdata.cloud/code_of_conduct/) and follow the instructions outlined in our [Contributing document](../../CONTRIBUTING.md).
+
+We also ask you to read our [documentation style guide](style-guide.md), which, while not complete, will give you some guidance on how we write and organize our documentation.
+
+All our documentation uses the Markdown syntax. If you're not familiar with how it works, please read the [Markdown introduction post](https://daringfireball.net/projects/markdown/) by its creator, followed by [Mastering Markdown](https://guides.github.com/features/mastering-markdown/) guide from GitHub.
+
+## How contributing to the documentation works
+
+There are two ways to contribute to Netdata's documentation:
+
+1. Edit documentation [directly in GitHub](#edit-documentation-directly-on-gitHub).
+2. Download the repository and [edit documentation locally](#edit-documentation-locally).
+
+Editing in GitHub is a simpler process and is perfect for quick edits to a single document, such as fixing a typo or clarifying a confusing sentence.
+
+Editing locally is more complex, as you need to download the Netdata repository and build the documentation using `mkdocs`, but allows you to better organize complex projects. By building documentation locally, you can preview your work using a local web server before you submit your PR.
+
+In both cases, you'll finish by submitting a pull request (PR). Once you submit your PR, GitHub will initiate a number of jobs, including a Netlify preview. You can use this preview to view the documentation site with your changes applied, which might help you catch any lingering issues.
+
+To continue, follow one of the paths below:
+
+- [Edit documentation directly in GitHub](#edit-documentation-directly-on-github)
+- [Edit documentation locally](#edit-documentation-locally)
+
+## Edit documentation directly on GitHub
+
+Start editing documentation on GitHub by clicking the small pencil icon on any page on Netdata's [documentation site](https://docs.netdata.cloud/). You can find them at the top of every page.
+
+Clicking on this icon will take you to the associated page in the `netdata/netdata` repository. Then click the small pencil icon on any documentation file (those ending in the `.md` [Markdown] extension) in the `netdata/netdata` repository.
+
+![A screenshot of editing a Markdown file directly in the Netdata repository](https://user-images.githubusercontent.com/1153921/59637188-10426d00-910a-11e9-99f2-ec564d6fb7d5.png)
+
+If you know where a file resides in the Netdata repository already, you can skip the step of beginning on the documentation site and go directly to GitHub.
+
+Once you've clicked the pencil icon on GitHub, you'll see a full Markdown version of the file. Make changes as you see fit. You can use the `Preview changes` button to ensure your Markdown syntax is working properly.
+
+Under the `Propose file change` header, write in a descriptive title for your requested change. Beneath that, add a concise descrition of what you've changed and why you think it's important. Then, click the `Propose file change` button.
+
+After you've hit that button, jump down to our instructions on [pull requests and cleanup](#pull-requests-and-final-steps) for your next steps.
+
+!!! note
+ This process will create a branch directly on the `netdata/netdata` repository, which then requires manual cleanup. If you're going to make significant documentation contributions, or contribute often, we recommend the local editing process just below.
+
+## Edit documentation locally
+
+Editing documentation locally is the preferred method for complex changes, PRs that span across multiple documents, or those that change the styling or underlying functionality of the documentation.
+
+Here is the workflow for editing documentation locally. First, create a fork of the Netdata repository, if you don't have one already. Visit the [Netdata repository](https://github.com/netdata/netdata) and click on the `Fork` button in the upper-right corner of the window.
+
+![Screenshot of forking the Netdata repository](https://user-images.githubusercontent.com/1153921/59873572-25f5a380-9351-11e9-92a4-a681fe4a2ed9.png)
+
+GitHub will ask you where you want to clone the repository, and once finished you'll end up at the index of your forked Netdata repository. Clone your fork to your local machine:
+
+```bash
+$ git clone https://github.com/YOUR-GITHUB-USERNAME/netdata.git
+```
+
+You can now jump into the directory and explore Netdata's structure for yourself.
+
+### Understanding the structure of Netdata's documentation
+
+All of Netdata's documentation is stored within the repository itself, as close as possible to the code it corresponds to. Many sub-folders contain a `README.md` file, which is then used to populate the documentation about that feature/component of Netdata.
+
+For example, the file at `packaging/installer/README.md` becomes `https://docs.netdata.cloud/packaging/installer/` and is our installation documentation. By co-locating it with quick-start installtion code, we ensure documentation is always tightly knit with the functions it describes.
+
+You might find other `.md` files within these directories. The `packaging/installer/` folder also contains `UPDATE.md` and `UNINSTALL.md`, which become `https://docs.netdata.cloud/packaging/installer/update/` and `https://docs.netdata.cloud/packaging/installer/uninstall/`, respectively.
+
+If the documentation you're working on has a direct correlation to some component of Netdata, place it into the correct folder and either name it `README.md` for generic documentation, or with another name for very specific instructions.
+
+#### The `docs` folder
+
+At the root of the Netdata repository is a `docs/` folder. Inside this folder we place documentation that does not have a direct relationship to a specific component of Netdata. It's where we house our [getting started guide](../GettingStarted.md), guides on [running Netdata behind Nginx](../Running-behind-nginx.md), and more.
+
+If the documentation you're working on doesn't have a direct relaionship to a component of Netdata, it can be placed in this `docs/` folder.
+
+### Make your edits
+
+Now that you're set up and understand where to find or create your `.md` file, you can now begin to make your edits. Just use your favorite editor and keep in mind our [style guide](style-guide.md) as you work.
+
+If you add a new file to the documentation, you may need to modify the `buildyaml.sh` file to ensure it's added to the site's navigation. This is true for any file added to the `docs/` folder.
+
+Be sure to periodically add/commit your edits so that you don't lose your work! We use version control software for a reason.
+
+### Build the documentation
+
+Building the documentation periodically gives you a glimpse into the final product, and is generally required if you're making changes to the table of contents.
+
+!!! attention ""
+ We have only tested the build process on Linux. Initial tests on OS X have been unsuccessful. Windows is fully untested at this point, but we would love to know if it works there as well!
+
+To build the documentation, you need `python`/`pip`, `mkdocs`, and `mkdocs-material` installed on your machine.
+
+Follow the [Python installation instructions](https://www.python.org/downloads/) for your machine.
+
+Use `pip`, which was installed alongside Python, to install `mkdocs` and `mkdocs-material`. Your operating system might force you to use `pip2` or `pip3` instead, dependin on which version of Python you have installed.
+
+```bash
+$ pip install mkdocs mkdocs-material
+```
+
+??? note "Troubleshooting"
+ If you're having trouble with the installation of Python, `mkdocs`, or `mkdocs-material`, try looking into the `mkdocs` [installation instructions](https://squidfunk.github.io/mkdocs-material/getting-started/#installation).
+
+When `pip` is finished installing, navigate to the root directory of the Netdata repository and run the documentation generator script.
+
+```bash
+$ sh docs/generator/buildhtml.sh
+```
+
+This process will take some time. Once finished, the built documentation site will be located at `docs/generator/build/`.
+
+### Run a local web server to test documentation
+
+The best way to view the documentation site you just built is to run a simple web server from the `docs/generator/build/` directory. So, navigate there and run a Python-based web server:
+
+```
+$ cd docs/generator/build/
+$ python3 -m http.server 20000
+```
+
+Feel free to replace the port number you want this web server to listen on (port `20000` in this case [only one higher than the agent!]).
+
+Open your web browser and navigate to `http://localhost:20000`. If you replaced the port earlier, change it here as well. You can now navigate through the documentation as you would on the live site!
+
+## Pull requests and final steps
+
+When you're finished with your changes, add and commit them to your fork of the Netdata repository. Head over to GitHub to create your pull request (PR).
+
+Once we receive your pull request (PR), we'll take time to read through it and assess it for correctness, conciseness, and overall quality. We may point to specific sections and ask for additional information or other fixes.
+
+## What's next
+
+- Read up on the Netdata documentation [style guide](style-guide.md).
diff --git a/docs/contributing/style-guide.md b/docs/contributing/style-guide.md
new file mode 100644
index 00000000..42c98101
--- /dev/null
+++ b/docs/contributing/style-guide.md
@@ -0,0 +1,317 @@
+# Netdata style guide
+
+This in-progress style guide establishes editorial guidelines for anyone who wants to write documentation for Netdata products.
+
+## Table of contents
+
+- [Welcome!](#welcome)
+- [Goals of the Netdata style guide](#goals-of-the-Netdata-style-guide)
+- [General principles](#general-principles)
+- [Tone and content](#tone-and-content)
+- [Language and grammar](#language-and-grammar)
+- [Markdown syntax](#markdown-syntax)
+- [Accessibility](#accessibility)
+
+## Welcome
+
+Proper documentation is essential to the success of any open-source project. Netdata is no different. The health of our monitoring agent, and the community it's created, depends on this effort.
+
+We’re here to make developers, sysadmins, and DevOps engineers better at their jobs, after all!
+
+We welcome contributions to Netdata's documentation. Begin with the [contributing to documentation guide](contributing-documentation.md), followed by this style guide.
+
+## Goals of the Netdata style guide
+
+An editorial style guide establishes standards for writing and maintaining documentation. At Netdata, we focus on the following principles:
+
+- Consistency
+- High-quality writing
+- Conciseness
+- Accessibility
+
+These principles will make documentation better for everyone who wants to use Netdata, whether they're a beginner or an expert.
+
+### Breaking the rules
+
+None of the rules described in this style guide are absolute. **We welcome rule-breaking if it creates better, more accessible documentation.**
+
+But be aware that Netdata staff or community members may ask you to justify your rule-breaking during the PR review process.
+
+## General principles
+
+Yes, this style guide is pretty overwhelming! Establishing standards for a global community is never easy.
+
+Here's a few key points to start with. Where relevant, they link to more in-depth information about a given rule.
+
+**[Tone and content](#tone-and-content)**:
+
+- Be [conversational and friendly](#conversational-and-friendly-tone).
+- Write [concisely](#write-concisely).
+- Don't use words like **here** when [creating hyperlinks](#use-informational-hyperlinks).
+- Don't mention [future releases or features](#mentioning-future-releases-or-features) in documentation.
+
+**[Language and grammar](#language-and-grammar)**:
+
+- [Capitalize words](#capitalization) at the beginning of sentences, for proper nouns, and at the beginning of document titles and section headers.
+- Use [second person](#second-person)—"you" rather than "we"—when giving instructions.
+- Use [active voice](#active-voice) to make clear who or what is performing an action.
+- Always employ an [Oxford comma](#oxford-comma) on lists.
+
+**[Markdown syntax](#markdown-syntax)**:
+
+- [Reference UI elements](#references-to-ui-elements) with bold text.
+- Use our [built-in syntax highlighter](#language-specific-syntax-highlighting-in-code-blocks) to improve the readability and usefulness of code blocks.
+
+**[Accessibility](#accessibility)**:
+
+- Include [alt tags on images](#images).
+
+---
+
+## Tone and content
+
+Netdata's documentation should be conversational, concise, and informational, without feeling formal. This isn't a textbook. It's a repository of information that should (on occasion!) encourage and excite its readers.
+
+By following a few principles on tone and content we'll ensure more readers from every background and skill level will learn as much as possible about Netdata's capabilities.
+
+### Conversational and friendly tone
+
+Netdata's documentation should be conversational and friendly. To borrow from Google's fantastic [developer style guide](https://developers.google.com/style/tone):
+
+> Try to sound like a knowledgeable friend who understands what the developer wants to do.
+
+Feel free to let some of your personality show! Documentation can be highly professional without being dry, formal, or overly instructive.
+
+### Write concisely
+
+You should always try to use as few words as possible to explain a particular feature, configuration, or process. Conciseness leads to more accurate and understandable writing.
+
+### Use informational hyperlinks
+
+Hyperlinks should clearly state its destination. Don't use words like "here" to describe where a link will take your reader.
+
+```
+# Not recommended
+To install Netdata, click [here](https://docs.netdata.cloud/packaging/installer/).
+
+# Recommended
+To install Netdata, read our [installation instructions](https://docs.netdata.cloud/packaging/installer/).
+```
+
+In general, guides should include fewer hyperlinks to keep the reader focused on the task at hand. Documentation should include as many hyperlinks as necessary to provide meaningful context.
+
+### Avoid words like "easy" or "simple"
+
+Never assume readers of Netdata documentation are experts in Netdata's inner workings or health monitoring/performance troubleshooting in general.
+
+If you claim that a task is easy and the reader struggles to complete it, they'll get discouraged.
+
+If you perceive one option to be easier than another, be specific about how and why. For example, don't write, "Netdata's one-line installer is the easiest way to install Netdata." Instead, you might want to say, "Netdata's one-line installer requires fewer steps than manually installing from source."
+
+### Avoid slang, metaphors, and jargon
+
+A particular word, phrase, or metaphor you're familiar with might not translate well to the other cultures featured among Netdata's global community. It's recommended you avoid slang or colloquialisms in your writing.
+
+If you must use industry jargon, such as "white-box monitoring," in a document, be sure to define the term as clearly and concisely as you can.
+
+> White-box monitoring: Monitoring of a system or application based on the metrics it directly exposes, such as logs.
+
+Avoid emojis whenever possible for the same reasons—they can be difficult to understand immediately and don't translate well.
+
+### Mentioning future releases or features
+
+Documentation is meant to describe the product as-is, not as it will be or could be in the future. Netdata documentation generally avoids talking about future features or products, even if we know they are inevitable.
+
+An exception can be made for documenting beta features that are subject to change with further development.
+
+## Language and grammar
+
+Netdata's documentation should be consistent in the way it uses certain words, phrases, and grammar. The following sections will outline the preferred usage for capitalization, point of view, active voice, and more.
+
+### Capitalization
+
+In text, follow the general [English standards](https://owl.purdue.edu/owl/general_writing/mechanics/help_with_capitals.html) for capitalization. In summary:
+
+- Capitalize the first word of every new sentence.
+- Don't use uppercase for emphasis. (Netdata is the BEST!)
+- Capitalize the names of brands, software, products, and companies according to their official guidelines. (Netdata, Docker, Apache, Nginx)
+- Avoid camel case (NetData) or all caps (NETDATA).
+
+#### Capitalization of 'Netdata' and 'netdata'
+
+Whenever you refer to the company Netdata, Inc., or the open-source monitoring agent the company develops, capitalize **Netdata**.
+
+However, if you are referring to a process, user, or group on a Linux system, you should not capitalize, as by default those are typically lowercased. In this case, you should also fence these terms in an inline code block: `` `netdata` ``.
+
+```
+# Not recommended
+The netdata agent, which spawns the netdata process, is actively maintained by netdata, inc.
+
+# Recommended
+The Netdata agent, which spawns the `netdata` process, is actively maintained by Netdata, Inc.
+```
+
+#### Capitalization of document titles and page headings
+
+Document titles and page headings should use sentence case. That means you should only capitalize the first word.
+
+If you need to use the name of a brand, software, product, and company, capitalize it according to their official guidelines.
+
+Also, don't put a period (`.`) or colon (`:`) at the end of a title or header.
+
+**Document titles**:
+
+| Capitalization | Not recommended | Recommended
+| --- | --- | ---
+| Document titles | Getting Started Guide | Getting started guide
+| Page headings | Service Discovery and Auto-Detection: | Service discovery and auto-detection
+| Proper nouns | Install netdata with docker | Install Netdata with Docker
+
+### Second person
+
+When writing documentation, you should use the second person ("you") to give instructions. When using the second person, you give the impression that you're personally leading your reader through the steps or tips in question.
+
+See how that works? It's a core part of making Netdata's documentation feel welcoming to all.
+
+Avoid using "we," "I," "let's," and "us" in documentation whenever possible.
+
+The "you" pronoun can also be implied, depending on your sentence structure.
+
+```
+# Not recommended
+To install Netdata, we should try the one-line installer...
+
+# Recommended
+To install Netdata, you should try the one-line installer...
+
+# Recommended, implied "you"
+To install Netdata, try the one-line installer...
+```
+
+### Active voice
+
+Use active voice instead of passive voice, because active voice is more concise and easier to understand.
+
+When using voice, the subject of the sentence is performing the action. In passive voice, the subject is being acted upon. A famous example of passive voice is the phrase "mistakes were made."
+
+```
+# Not recommended (passive)
+When an alarm is triggered by a metric, a notification is sent by Netdata...
+
+# Recommended (active)
+When a metric triggers an alarm, Netdata sends a notification...
+```
+
+### Standard American spelling
+
+While the Netdata team is mostly *not* American, we still aspire to use American spelling whenever possible, as it is more commonly used within the monitoring industry.
+
+### Clause order
+
+If you want to instruct your reader to take some action in a particular circumstance, such as optional steps, the beginning of the sentence should indicate that circumstance.
+
+```
+# Not recommended
+Read the reference guide if you'd like to learn more about custom dashboards.
+
+# Recommended
+If you'd like to learn more about custom dashboards, read the reference guide.
+```
+
+By placing the circumstance at the beginning of the sentence, those who don't want to follow can stop reading and move on. Those who *do* want to read it are less likely to skip over the sentence.
+
+### Oxford comma
+
+The Oxford comma is the comma used after the second-to-last item in a list of three or more items. It appears just before "and" or "or."
+
+```
+# Not recommended
+Netdata can monitor RAM, disk I/O, MySQL queries per second and lm-sensors.
+
+# Recommended
+Netdata can monitor RAM, disk I/O, MySQL queries per second, and lm-sensors.
+```
+
+## Markdown syntax
+
+The Netdata documentation uses the Markdown syntax for styling and formatting. If you're not familiar with how it works, please read the [Markdown introduction post](https://daringfireball.net/projects/markdown/) by its creator, followed by [Mastering Markdown](https://guides.github.com/features/mastering-markdown/) guide from GitHub.
+
+We also leverage the power of the [Material theme for MkDocs](https://squidfunk.github.io/mkdocs-material/), which features several [extensions](https://squidfunk.github.io/mkdocs-material/extensions/admonition/), such as the ability to create notes, warnings, and collapsible blocks.
+
+You can follow the syntax specified in the above resources for the majority of documents, but the following sections specify a few particular use cases.
+
+### References to UI elements
+
+If you need to instruct your reader to click a user interface (UI) element inside of a Netdata interface, you should reference the label text of the link/button with Markdown's (`**bold text**`) tag.
+
+```markdown
+Click on the **Sign in** button.
+```
+
+!!! note
+ Whenever possible, avoid using directional language to orient readers, because not every reader can use instructions like "look at the top-left corner" to find their way around an interface.
+
+```
+If you feel that you must use directional language, perhaps use an [image](#images) (with proper alt text) instead.
+
+We're also working to establish standards for how we refer to certain elements of the Netdata's web interface. We'll include that in this style guide as soon as it's complete.
+```
+
+### Language-specific syntax highlighting in code blocks
+
+Our documentation uses the [Highlight extension](https://facelessuser.github.io/pymdown-extensions/extensions/highlight/) for syntax highlighting. Highlight is fully compatible with [Pygments](http://pygments.org/), allowing you to highlight the syntax within code blocks in a number of interesting ways.
+
+For a full list of languages, see [Pygment's supported languages](http://pygments.org/languages/). Netdata documentation will use the following for the most part: `c`, `python`, `js`, `shell`, `markdown`, `bash`, `css`, `html`, and `go`. If no language is specified, the Highlight extension doesn't apply syntax highlighting.
+
+Include the language directly after the three backticks (```` ``` ````) that start the code block. For highlighting C code, for example:
+
+````
+```c
+inline char *health_stock_config_dir(void) {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_stock_config_dir);
+ return config_get(CONFIG_SECTION_HEALTH, "stock health configuration directory", buffer);
+}
+```
+````
+
+And the prettified result:
+
+```c
+inline char *health_stock_config_dir(void) {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_stock_config_dir);
+ return config_get(CONFIG_SECTION_HEALTH, "stock health configuration directory", buffer);
+}
+```
+
+You can also use the Highlight and [SuperFences](https://facelessuser.github.io/pymdown-extensions/extensions/superfences/) extensions together to show line numbers or highlight specific lines.
+
+Display line numbers by appending `linenums="1"` after the language declaration, replacing `1` with the starting line number of your choice. Highlight lines by appending `hl_lines="2"`, replacing `2` with the line you'd like to highlight. Or, multiple lines: `hl_lines="1 2 4 12`.
+
+!!! note
+ Line numbers and highlights are not compatible with GitHub's Markdown parser, and thus will only be viewable on our [documentation site](https://docs.netdata.cloud/). They should be used sparingly and only when necessary.
+
+## Accessibility
+
+Netdata's documentation should be as accessible as possible to as many people as possible. While the rules about [tone and content](#tone-and-content) and [language and grammar](#language-and-grammar) are helpful to an extent, we also need some additional rules to improve the reading experience for all readers.
+
+### Images
+
+Images are an important component to documentation, which is why we have a few rules around their usage.
+
+Perhaps most importantly, don't use only images to convey instructions. Each image should be accompanied by alt text and text-based instructions to ensure that every reader can access the information in the best way for them.
+
+#### Alt text
+
+Provide alt text for every image you include in Netdata's documentation. It should summarize the intent and content of the image.
+
+In Markdown, use the standard image syntax, `![]()`, and place the alt text between the brackets `[]`. Here's an example using our logo:
+
+```
+![The Netdata logo](../../web/gui/images/netdata-logomark.svg)
+```
+
+#### Images of text
+
+Don't use images of text, code samples, or terminal output. Instead, put that text content in a code block so that all devices can render it clearly and screen readers can parse it.
diff --git a/docs/generator/buildyaml.sh b/docs/generator/buildyaml.sh
index f887c695..95e11c5a 100755
--- a/docs/generator/buildyaml.sh
+++ b/docs/generator/buildyaml.sh
@@ -104,7 +104,8 @@ markdown_extensions:
- pymdownx.details
- pymdownx.highlight:
pygments_style: manni
- noclasses: true
+ css_class: "highlight codehilite"
+ linenums_style: pymdownx-inline
- pymdownx.inlinehilite
- pymdownx.magiclink
- pymdownx.mark
@@ -135,7 +136,6 @@ echo -ne " - 'docs/what-is-netdata.md'
- 'docs/a-github-star-is-important.md'
- REDISTRIBUTED.md
- CHANGELOG.md
- - CONTRIBUTING.md
- SECURITY.md
- Why Netdata:
- 'docs/why-netdata/README.md'
@@ -175,6 +175,13 @@ echo -ne " - 'docs/Performance.md'
- 'docs/high-performance-netdata.md'
"
+navpart 1 . netdata-cloud "Netdata Cloud"
+echo -ne "
+ - 'docs/netdata-cloud/README.md'
+ - 'docs/netdata-cloud/signing-in.md'
+ - 'docs/netdata-cloud/nodes-view.md'
+"
+
navpart 1 collectors "" "Data collection" 1
echo -ne " - 'docs/Add-more-charts-to-netdata.md'
- Internal plugins:
@@ -264,13 +271,19 @@ navpart 2 web/api/badges "" "" 2
navpart 2 web/api/health "" "" 2
navpart 2 web/api/queries "" "Queries" 2
-echo -ne "- Additional Info:
+echo -ne "- Contributing to Netdata:
+ - CONTRIBUTING.md
+ - 'docs/contributing/contributing-documentation.md'
+ - 'docs/contributing/style-guide.md'
- CODE_OF_CONDUCT.md
- CONTRIBUTORS.md
- packaging/maintainers/README.md
"
+
+echo -ne "- Additional information:
+"
navpart 2 packaging/makeself "" "" 4
navpart 2 libnetdata "" "libnetdata" 4
navpart 2 contrib
navpart 2 tests "" "" 2
-navpart 2 diagrams/data_structures
+navpart 2 diagrams/data_structures \ No newline at end of file
diff --git a/docs/generator/checklinks.sh b/docs/generator/checklinks.sh
index 5012ad17..6521ca9a 100755
--- a/docs/generator/checklinks.sh
+++ b/docs/generator/checklinks.sh
@@ -227,7 +227,7 @@ checklinks () {
while read -r l ; do
for word in $l ; do
if [[ $word =~ .*\]\(([^\(\) ]*)\).* ]] ; then
- lnk="${BASH_REMATCH[1]}"
+ lnk=$(echo "${BASH_REMATCH[1]}" | tr -d '<>')
if [ -z "$lnk" ] ; then continue ; fi
dbg "-$lnk"
case "$lnk" in
diff --git a/docs/generator/custom/css/netdata.css b/docs/generator/custom/css/netdata.css
index 27f1b08c..7b1934db 100644
--- a/docs/generator/custom/css/netdata.css
+++ b/docs/generator/custom/css/netdata.css
@@ -14,7 +14,6 @@
/* Custom styling for the new documentation homepage.
In particular, the three buttons for install/getting started/configuration. */
-
.homepage-nav {
display: flex;
margin-top: 1.4rem;
@@ -64,11 +63,33 @@
margin-bottom: 6rem;
}
-/* Make sure inline code in tables doesn't break. */
+/* Make sure inline code in tables don't break. */
.md-typeset__table code {
word-break: normal;
}
+/* Give code blocks a little more line height */
+.md-typeset pre {
+ line-height: 1.6;
+}
+
+/* Show line numbers. */
+[data-linenos]:before {
+ border-right: .0625rem solid #ddd;
+ color: #999;
+ content: attr(data-linenos);
+ display: inline-block;
+ margin-left: -1.2rem;
+ margin-right: .7rem;
+ padding-left: 1.2rem;
+}
+
+.md-typeset .highlight .hll {
+ display: inline;
+ margin: 0;
+ padding: 0;
+}
+
/* Bold the first item on the docs sidebar: Netdata Documentation */
.md-nav--primary > .md-nav__list > .md-nav__item:first-of-type {
font-weight: 700;
diff --git a/docs/high-performance-netdata.md b/docs/high-performance-netdata.md
index 553ad6da..3611fee3 100644
--- a/docs/high-performance-netdata.md
+++ b/docs/high-performance-netdata.md
@@ -10,13 +10,13 @@ If you plan to have your Netdata public on the internet, this strategy wastes re
In the following nginx configuration we do the following:
-- allow nginx to maintain up to 1024 idle connections to Netdata (so Netdata will have up to 1024 threads waiting for requests)
+- allow nginx to maintain up to 1024 idle connections to Netdata (so Netdata will have up to 1024 threads waiting for requests)
-- allow nginx to compress the responses of Netdata (later we will disable gzip compression at Netdata)
+- allow nginx to compress the responses of Netdata (later we will disable gzip compression at Netdata)
-- we disable wordpress pingback attacks and allow only GET, HEAD and OPTIONS requests.
+- we disable wordpress pingback attacks and allow only GET, HEAD and OPTIONS requests.
-```
+```conf
upstream backend {
server 127.0.0.1:19999;
keepalive 1024;
@@ -65,25 +65,25 @@ Then edit `/etc/netdata/netdata.conf` and set these config options:
These options:
-- `[global].bind socket to IP = 127.0.0.1` makes Netdata listen only for requests from localhost (nginx).
-- `[global].access log = none` disables the access.log of Netdata. It is not needed since Netdata only listens for requests on 127.0.0.1 and thus only nginx can access it. nginx has its own access.log for your record.
-- `[global].disconnect idle web clients after seconds = 3600` will kill inactive web threads after an hour of inactivity.
-- `[global].enable web responses gzip compression = no` disables gzip compression at Netdata (nginx will compress the responses).
+- `[global].bind socket to IP = 127.0.0.1` makes Netdata listen only for requests from localhost (nginx).
+- `[global].access log = none` disables the access.log of Netdata. It is not needed since Netdata only listens for requests on 127.0.0.1 and thus only nginx can access it. nginx has its own access.log for your record.
+- `[global].disconnect idle web clients after seconds = 3600` will kill inactive web threads after an hour of inactivity.
+- `[global].enable web responses gzip compression = no` disables gzip compression at Netdata (nginx will compress the responses).
## 2. increase open files limit (non-systemd)
By default Linux limits open file descriptors per process to 1024. This means that less than half of this number of client connections can be accepted by both nginx and Netdata. To increase them, create 2 new files:
-1. `/etc/security/limits.d/nginx.conf`, with these contents:
+1. `/etc/security/limits.d/nginx.conf`, with these contents:
- ```
+```
nginx soft nofile 10000
nginx hard nofile 30000
```
-2. `/etc/security/limits.d/netdata.conf`, with these contents:
+2. `/etc/security/limits.d/netdata.conf`, with these contents:
- ```
+```
netdata soft nofile 10000
netdata hard nofile 30000
```
@@ -98,25 +98,25 @@ sysctl -p
Thanks to [@leleobhz](https://github.com/netdata/netdata/issues/655#issue-163932584), this is what you need to raise the limits using systemd:
-This is based on https://ma.ttias.be/increase-open-files-limit-in-mariadb-on-centos-7-with-systemd/ and here worked as following:
+This is based on <https://ma.ttias.be/increase-open-files-limit-in-mariadb-on-centos-7-with-systemd/> and here worked as following:
-1. Create the folders in /etc:
+1. Create the folders in /etc:
- ```
+```
mkdir -p /etc/systemd/system/netdata.service.d
mkdir -p /etc/systemd/system/nginx.service.d
```
-2. Create limits.conf in each folder as following:
+2. Create limits.conf in each folder as following:
- ```
+```
[Service]
LimitNOFILE=30000
```
-3. Reload systemd daemon list and restart services:
+3. Reload systemd daemon list and restart services:
- ```sh
+```sh
systemctl daemon-reload
systemctl restart netdata.service
systemctl restart nginx.service
@@ -145,7 +145,6 @@ Max open files 30000 30000 files
# cat /proc/$(ps aux | grep "netdata" | head -n1 | grep -v grep | awk '{print $2}')/limits | grep "Max open files"
Max open files 30000 30000 files
-
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fhigh-performance-netdata&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fhigh-performance-netdata&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/netdata-cloud/README.md b/docs/netdata-cloud/README.md
new file mode 100644
index 00000000..92084c3c
--- /dev/null
+++ b/docs/netdata-cloud/README.md
@@ -0,0 +1,44 @@
+# Netdata Cloud
+
+Netdata Cloud is core to our ongoing mission to provide real-time, distributed health monitoring and performance troubleshooting. It's the foundation of an ecosystem of tools that will help you build more extraordinary infrastructures.
+
+Netdata Cloud is also the next iteration of our global Netdata registry. For technical information about how our registries work, what information they store, and how your web browser "talks" to both, visit our [registry documentation](../../registry).
+
+Learn more about the future of Netdata Cloud on our [announcement post](https://blog.netdata.cloud/posts/netdata-cloud-announcement/).
+
+## Registering for or signing in to Netdata Cloud
+
+**If you're ready to register for a new Netdata Cloud account, or sign in to your existing Netdata Cloud account, visit our [signing in guide](signing-in.md) for details.**
+
+!!! attention "Private registries and Netdata Cloud"
+ If you're running a private registry and are interested in trying out Netdata Cloud as a replacement for your private registry, read [our notice](signing-in.md#private-registries-and-netdata-cloud) about transitioning from a private registry to our Netdata Cloud registry.
+
+## Netdata Cloud features
+
+Netdata Cloud currently enables two features: the **My nodes** in the top-left corner of the Netdata dashboard, and the [**Nodes View**](nodes-view.md).
+
+We have an aggressive roadmap of new features, such as Workspaces for different parts of your infrastructure, Rooms to collaborate with colleagues, and the ability to receive alarms from any number of distributed Netdata agents in a single place. Read more about our proposed features [here](https://blog.netdata.cloud/posts/netdata-cloud-announcement/#what-features-will-netdata-cloud-offer).
+
+### Planned enterprise features (paid)
+
+Large enterprises have unique real-time monitoring needs. They have thousands of servers and applications running concurrently, and are willing to pay for the complex features that help them make smarter, faster decisions about their infrastructure. We expect to create a paid tier of Netdata Cloud with a recurring, per-user pricing model that will unlock enterprise-focused features.
+
+A few of these planned features include:
+
+- Long-term storage of Netdata UI snapshots
+- Active Directory integration for single sign-on
+- Private service status pages
+- Extended retention of alarms timelines
+- Incident response toolkits
+- Additional enterprise plugins and integrations
+- Extended retention of chat messages
+
+Again, we expect that the vast majority of Netdata's users won't need these features. Creating these two tiers will help us further fund the company's efforts to deploy Netdata's open-source agent on a massive scale and entirely for free.
+
+## Running Netdata without Netdata Cloud
+
+Netdata Cloud is entirely optional. The application will never force you to create a Netdata Cloud account or associate nodes with the public registries. But, if you choose not to use Netdata Cloud, you will be missing out on the [Nodes View](nodes-view.md) and other upcoming features.
+
+## Running Netdata Cloud on-premises or as a hosted instance
+
+We plan on making both on-premises and hosted instances of Netdata Cloud available to enterprises. Until then, we are creating a list of people and businesses interested in either of these options. To add yourself or your organization to this list, email us at [info@netdata.cloud](mailto:info@netdata.cloud).
diff --git a/docs/netdata-cloud/nodes-view.md b/docs/netdata-cloud/nodes-view.md
new file mode 100644
index 00000000..ec09821c
--- /dev/null
+++ b/docs/netdata-cloud/nodes-view.md
@@ -0,0 +1,206 @@
+# Using the Nodes View
+
+## Introduction
+
+As of v1.15.0 of Netdata, and in conjunction with our announcement post about the [future of Netdata](https://blog.netdata.cloud/posts/netdata-cloud-announcement/), we have enabled an entirely new way to view your infrastructure using the open-source Netdata agent in conjunction with Netdata Cloud: the **Nodes View**.
+
+This view, powered by Netdata Cloud, provides an aggregated view of the Netdata agents that you have associated with your Netdata Cloud account. The main benefit of Nodes View is seeing the health of your infrastructure from a single interface, especially if you have many systems running Netdata. With Nodes View, you can monitor the health status of your nodes via active alarms and view a subset of real-time performance metrics the agent is collecting every second.
+
+!!! attention "Nodes View is beta software!"
+ The Nodes View is currently in beta, so all typical warnings about beta software apply. You may come across bugs or inconsistencies.
+
+```
+The current version of Nodes uses the API available on each Netdata agent to check for new alarms and the machine's overall health/availability. In the future, we will offer both polling via the API and real-time streaming of health status/metrics.
+```
+
+## The Nodes View
+
+To access the Nodes View, you must first be signed in to Netdata Cloud. To register for an account, or sign in to an existing account, visit our [signing in guide](signing-in.md) for details.
+
+Once you're signed in to Netdata Cloud, clicking on any of the **Nodes Beta** buttons in the node's web dashboard will lead you to the Nodes View. Find one (`1`) in the dropdown menu in the upper-right corner, a second (`2`) in the top navigation bar, and a third (`3`) in the dropdown menu in the top-left corner of the Netdata dashboard.
+
+![Annotated screenshot showing where to access Nodes View](https://user-images.githubusercontent.com/1153921/60359236-4fd04b00-998d-11e9-9e4c-f35ad2551a54.png)
+
+### Nodes
+
+The primary component of the Nodes View is a list of all the nodes with Netdata agents you have associated with your Netdata Cloud account via the Netdata Cloud registry.
+
+![A screenshot of the Netdata Cloud web interface](https://user-images.githubusercontent.com/1153921/59883580-657cb980-936a-11e9-8651-a51832a5f41e.png)
+
+Depending on which [view mode](#view-modes) you're using, Nodes View will present you with information about that node, such as its hostname, operating system, warnings/critical alerts, and any [supported services](#Services-available-in-the-Nodes-View) that are running on that node. Here is an example of the **full** view mode:
+
+![Annotated screenshot of the icons visible in the node entries](https://user-images.githubusercontent.com/1153921/60219761-9eb0a000-9828-11e9-9f77-b492dad016f9.png)
+
+The background color of each Node entry is an indication of its health status:
+
+| Health status | Background color |
+| ------------- | ------------------------------------------------------------------------------------------------- |
+| **White** | Normal status, no alarms |
+| **Yellow** | 1 or more active warnings |
+| **Red** | 1 or more active critical alerts |
+| **Grey** | Node is unreachable (server unreachable [due to network conditions], server down, or changed URL) |
+
+### Node overview
+
+When you click on any of the Nodes, an overview sidebar will appear on the right-hand side of the Nodes View.
+
+This overview contains the following:
+
+- An icon (`1`) representing the operating system installed on that machine
+- The hostname (`2`) of the machine
+- A link (`3`) to the URL at which the web dashboard is available
+- Three tabs (`4`) for **System** metrics, **Services** metrics, and **Alarms**
+- A number of selectors (`5`) to choose which metrics/alarms are shown in the overview
+ - **System** tab: _Overview_, _Disks_, and _Network_ selectors
+ - **Services** tab: _Databases_, _Web_, and _Messaging_ selectors
+ - **Alarms** tab: _Critical_ and _Warning_ selectors
+- The visualizations and/or alarms (`6`) supported under the chosen tab and selector
+- Any other available URLS (`7`) associated with that node under the **Node URLs** header.
+
+![A screenshot of the system overview area in the Netdata Cloud web interface](https://user-images.githubusercontent.com/1153921/60361418-f834de00-9992-11e9-9998-ab3da4b8b559.png)
+
+By default, clicking on a Node will display the sidebar with the **System** tab enabled. If there are warnings or alarms active for that Node, the **Alarms** tab will be displayed by default.
+
+**The visualizations in the overview sidebar are live!** As with all of Netdata's visualizations, you can scrub forward and backward in time, zoom, pause, and pinpoint anomalies down to the second.
+
+#### System tab
+
+The **System** tab has three sections: *Overview*, *Disks*, and *Network*.
+
+_Overview_ displays visualizations for `CPU`, `System Load Average` `Disk I/O`, `System RAM`, `System Swap`, `Physical Network Interfaces Aggregated Bandwidth`, and the URL of the node.
+
+_Disks_ displays visualizations for `Disk Utilization Time`, and `Disk Space Usage` for every available disk.
+
+_Network_ displays visualizations for `Bandwidth` for every available networking device.
+
+#### Services tab
+
+The **Services** tab will show visualizations for any [supported services](#Services-available-in-the-Nodes-View) that are running on that node. Three selectors are available: _Databases_, _Web_, and _Messaging_. If there are no services under any of these categories, the selector will not be clickable.
+
+#### Alarms tab
+
+The **Alarms** tab contains two selectors: _Critical_ and _Warning_. If there are no alarms under either of these categories, the selector will not be clickable.
+
+Both of these tabs will display alarms information when available, along with the relevant visualization with metrics from your Netdata agent. The `view` link redirects you to the web dashboard for the selected node and automatically shows the appropriate visualization and timeframe.
+
+![A screenshot of the alarms area in the Netdata Cloud web interface](https://user-images.githubusercontent.com/1153921/59883273-55180f00-9369-11e9-8895-f74f6c66e038.png)
+
+### Filtering field
+
+The search field will be useful for Netdata Cloud users with dozens or hundreds of Nodes. You can filter for the hostname of the Node you're interested in, the operating system it's running, or even for the services installed.
+
+The filtering field will offer you autocomplete suggestions. For example, the options available after typing `ng` into the filtering field:
+
+![A screenshot of the filtering field in the Netdata Cloud web interface](https://user-images.githubusercontent.com/1153921/59883296-6234fe00-9369-11e9-9950-4bd3986ce887.png)
+
+If you select multiple filters, results will display according to an `OR` operator.
+
+### View modes
+
+To the right of the filtering field is three functions that will help you organize your Visited Nodes according to your preferences.
+
+![Screenshot of the view mode, sorting, and grouping options](https://user-images.githubusercontent.com/1153921/59885999-2a7e8400-9372-11e9-8dae-022ba85e2b69.png)
+
+The view mode button lets you switch between three view modes:
+
+- **Full** mode, which displays the following information in a large squares for each connected Node:
+ - Operating system
+ - Critical/warning alerts in two separate indicators
+ - Hostname
+ - Icons for [supported services](#services-available-in-the-nodes-view)
+
+![Annotated screenshot of the full view mode](https://user-images.githubusercontent.com/1153921/60219885-15e63400-9829-11e9-8654-b49f119efb9a.png)
+
+- **Compact** mode, which displays the following information in small squares for each connected Node:
+ - Operating system
+
+![Annotated screenshot of the compact view mode](https://user-images.githubusercontent.com/1153921/60220570-547cee00-982b-11e9-9caf-9dd449184f3a.png)
+
+- **Detailed** mode, which displays the following information in large horizontal rectangles for each connected Node:
+ - Operating system
+ - Critical/warning alerts in two separate indicators
+ - Hostname
+ - Icons for [supported services](#services-available-in-the-nodes-view)
+
+![Annotated screenshot of the detailed view mode](https://user-images.githubusercontent.com/1153921/60220574-56df4800-982b-11e9-8300-aa9190bbf09f.png)
+
+## Sorting, and grouping
+
+The **Sort by** dropdown allows you to choose between sorting _alphabetically by hostname_, most _recently-viewed_ nodes, and most _frequently-view_ nodes.
+
+The **Group by** dropdown lets you switch between _alarm status_, _running services_, or _online status_.
+
+For example, the following screenshot represents the Nodes list with the following options: _detailed list_, _frequently visited_, and _alarm status_.
+
+![A screenshot of sorting, grouping, and view modes in the Netdata Cloud web interface](https://user-images.githubusercontent.com/1153921/59883300-68c37580-9369-11e9-8d6e-ce0a8147fc1d.png)
+
+Play around with the options until you find a setup that works for you.
+
+## Adding more agents to the Nodes View
+
+There is currently only one way to associate additional Netdata nodes with your Netdata Cloud account. You must visit the web dashboard for each node and click the **Sign in** button and complete the [sign in process](signing-in.md#signing-in-to-your-netdata-cloud-account).
+
+!!! note ""
+ We are aware that the process of registering each node individually is cumbersome for those who want to implement Netdata Cloud's features across a large infrastructure.
+
+```
+Please view [this comment on issue #6318](https://github.com/netdata/netdata/issues/6318#issuecomment-504106329) for how we plan on improving the process for adding additional nodes to your Netdata Cloud account.
+```
+
+## Services available in the Nodes View
+
+The following tables elaborate on which services will appear in the Nodes View. Alerts from [other collectors](../../collectors/README.md), when entered an alarm status, will show up in the _Alarms_ tab despite not appearing
+
+### Databases
+
+These services will appear under the _Databases_ selector beneath the _Services_ tab.
+
+| Service | Collectors | Context #1 | Context #2 | Context #3 |
+|--- |--- |--- |--- |--- |
+| MySQL | `python.d.plugin:mysql`, `go.d.plugin:mysql` | `mysql.queries` | `mysql.net` | `mysql.connections` |
+| MariaDB | `python.d.plugin:mysql`, `go.d.plugin:mysql` | `mysql.queries` | `mysql.net` | `mysql.connections` |
+| Oracle Database | `python.d.plugin:oracledb` | `oracledb.session_count` | `oracledb.physical_disk_read_writes ` | `oracledb.tablespace_usage_in_percent` |
+| PostgreSQL | `python.d.plugin:postgres` | `postgres.checkpointer` | `postgres.archive_wal` | `postgres.db_size` |
+| MongoDB | `python.d.plugin:mongodb` | `mongodb.active_clients` | `mongodb.read_operations` | `mongodb.write_operations` |
+| ElasticSearch | `python.d.plugin:elasticsearch` | `elastic.search_performance_total` | `elastic.index_performance_total` | `elastic.index_segments_memory` |
+| CouchDB | `python.d.plugin:couchdb` | `couchdb.activity` | `couchdb.response_codes` | |
+| Proxy SQL | `python.d.plugin:proxysql` | `proxysql.questions` | `proxysql.pool_status` | `proxysql.pool_overall_net` |
+| Redis | `python.d.plugin:redis` | `redis.operations` | `redis.net` | `redis.connections` |
+| MemCached | `python.d.plugin:memcached` | `memcached.cache` | `memcached.net` | `memcached.connections` |
+| RethinkDB | `python.d.plugin:rethinkdbs` | `rethinkdb.cluster_queries` | `rethinkdb.cluster_clients_active` | `rethinkdb.cluster_connected_servers` |
+| Solr | `go.d.plugin:solr` | `solr.search_requests` | `solr.update_requests` | |
+
+### Web services
+
+These services will appear under the _Web_ selector beneath the _Services_ tab. These also include proxies, load balancers (LB), and streaming services.
+
+| Service | Collectors | Context #1 | Context #2 | Context #3 |
+|--- |--- |--- |--- |--- |
+| Apache | `python.d.plugin:apache`, `go.d.plugin:apache` | `apache.requests` | `apache.connections` | `apache.net ` |
+| nginx | `python.d.plugin:nginx`, `go.d.plugin:nginx` | `nginx.requests` | `nginx.connections` | |
+| nginx+ | `python.d.plugin:nginx_plus` | `nginx_plus.requests_total` | `nginx_plus.connections_statistics` | |
+| lighthttpd | `python.d.plugin:lighttpd`, `go.d.plugin:lighttpd` | `lighttpd.requests` | `lighttpd.net` | |
+| lighthttpd2 | `go.d.plugin:lighttpd2` | `lighttpd2.requests` | `lighttpd2.traffic` | |
+| LiteSpeed | `python.d.plugin:litespeed` | `litespeed.requests` | `litespeed.requests_processing` | |
+| Tomcat | `python.d.plugin:tomcat` | `tomcat.accesses` | `tomcat.processing_time` | `tomcat.bandwidth` |
+| PHP FPM | `python.d.plugin:phpfm` | `phpfpm.performance` | `phpfpm.requests` | `phpfpm.connections` |
+| HAproxy | `python.d.plugin:haproxy` | `haproxy_f.scur` | `haproxy_f.bin` | `haproxy_f.bout` |
+| Squid | `python.d.plugin:squid` | `squid.clients_requests` | `squid.clients_net` | |
+| Traefik | `python.d.plugin:traefik` | `traefik.response_codes` | | |
+| Varnish | `python.d.plugin:varnish` | `varnish.session_connection` | `varnish.client_requests` | |
+| IPVS | `proc.plugin:/proc/net/ip_vs_stats` | `ipvs.sockets` | `ipvs.packets` | |
+| Web Log | `python.d.plugin:web_log`, `go.d.plugin:web_log` | `web_log.response_codes` | `web_log.bandwidth` | |
+| IPFS | `python.d.plugin:ipfs` | `ipfs.bandwidth` | `ipfs.peers` | |
+| IceCast Media Streaming | `python.d.plugin:icecast` | `icecast.listeners` | | |
+| RetroShare | `python.d.plugin:retroshare` | `retroshare.bandwidth` | `retroshare.peers` | |
+| HTTP Check | `python.d.plugin:httpcheck`, `go.d.plugin:httpcheck` | `httpcheck.responsetime` | `httpcheck.status` | |
+| x509 Check | `go.d.plugin:x509check` | `x509check.time_until_expiration` | | |
+
+### Messaging
+
+These services will appear under the _Messaging_ selector beneath the _Services_ tab.
+
+| Service | Collectors | Context #1 | Context #2 | Context #3 |
+| --- | --- | --- | --- | --- |
+| RabbitMQ | `python.d.plugin:rabbitmq`, `go.d.plugin:rabbitmq` | `rabbitmq.queued_messages` | `rabbitmq.erlang_run_queue` |
+| Beanstalkd | `python.d.plugin:beanstalk` | `beanstalk.total_jobs_rate` | `beanstalk.connections_rate` | `beanstalk.current_tubes` |
diff --git a/docs/netdata-cloud/signing-in.md b/docs/netdata-cloud/signing-in.md
new file mode 100644
index 00000000..6e9e334a
--- /dev/null
+++ b/docs/netdata-cloud/signing-in.md
@@ -0,0 +1,155 @@
+# Registration and signing in
+
+To use the features of [Netdata Cloud](README.md), you must first register an account with Netdata Cloud and associate your first Netdata node with the Netdata Cloud [registry](../../registry/README.md). **Netdata Cloud is entirely free for all Netdata users**, and does not store any metrics created by your machines. You keep your data—Netdata Cloud just connects it all together.
+
+!!! attention "Opting-in to Netdata Cloud"
+ By [signing in](signing-in.md) to Netdata Cloud, you opt-in to let Netdata Cloud receive and store the information described [here](../../registry/README.md#what-data-does-the-registry-store). We never store the metrics collected by Netdata agents, just machine GUIDs, person GUID, URLs, and account information.
+
+## Registering a Netdata Cloud account
+
+There is only one prerequisite to using Netdata Cloud: A working Netdata agent. If you don't have a running Netdata agent yet, check out the [installation guides](../../packaging/installer/) for more information.
+
+To begin, visit the web dashboard of your Netdata agent by navigating your browser of choice to `http://SERVER-IP:19999`. You’ll see a dashboard much like this:
+
+![A screenshot of Netdata's web interface](https://user-images.githubusercontent.com/1153921/59644657-b7330300-9122-11e9-9dda-ea784422f3f2.png)
+
+From here, you need to register for a Netdata Cloud account. Click on the **Sign in** button on the top-right corner of the dashboard's view.
+
+![A screenshot of the Sign in button in the Netdata dashboard](https://user-images.githubusercontent.com/1153921/59782688-6252d200-9273-11e9-9975-52be0d6714bf.png)
+
+??? note "Alternative registration routes"
+ While we recommend the **Sign in** button, the Netdata dashboard has one other direct route registering for or signing in to a Netdata Cloud account.
+
+```
+The text **Please sign in to netdata.cloud to view your nodes!** contains a link to access Netdata Cloud.
+
+![A screenshot of the Netdata Cloud sign in link](https://user-images.githubusercontent.com/1153921/59644958-2f4df880-9124-11e9-946c-bb30c8735e0a.png)
+
+Two other routes exist, but they are more directly related to accessing the Nodes View. They will, however, require either registration or sign in and thus are valid routes to access Netdata Cloud.
+
+One route can be found in the **Nodes Beta** button the left side of the navigation menu:
+
+![A screenshot of a link to the Nodes View in Netdata Cloud](https://user-images.githubusercontent.com/1153921/59644663-c1ed9800-9122-11e9-9ebc-d67e7db229a7.png)
+
+A second route can be found in the Nodes List—the drop-down menu in the top-left corner of the Netdata dashboard:
+
+ ![A screenshot of a second link to the Nodes View in Netdata Cloud](https://user-images.githubusercontent.com/1153921/59644973-3d9c1480-9124-11e9-9a1d-33c412578a9f.png)
+```
+
+??? note "Registration route when using a private registry"
+ If you're using a private registry, clicking the **Sign in** button will display a modal window warning you about the process of migrating away from your private registry and to Netdata Cloud's registry.
+
+```
+![A screenshot of the private registry warning modal](https://user-images.githubusercontent.com/1153921/59782901-ca091d00-9273-11e9-9f9a-0cb18f78ca26.png)
+
+If you agree to use Netdata Cloud over your private registry, and opt-in to let Netdata Cloud receive and store the information described [here](../../registry/README.md#what-data-does-the-registry-store), you should click the **Sign in** button again. If not, click the **Cancel** button to continue using your private registry.
+```
+
+### Choosing your registration or sign in method
+
+After clicking the **Sign in** button, you'll be directed to the Netdata Cloud registration/sign in page. Choose to authorize with your Google account, GitHub account, or email.
+
+!!! attention
+ Be consistent with the sign in method you use, whether GitHub, Google, or email. If you sign in via different methods, the system will create multiple Netdata Cloud accounts, one for each sign-in method used. We plan to offer multiple authentication methods for the same account in the future.
+
+![Screenshot of the registration/sign in view for Netdata Cloud](https://user-images.githubusercontent.com/1153921/59783226-8bc02d80-9274-11e9-8bbc-4718759b3145.png)
+
+### Registration via Google
+
+Click the **Authorize with Google** button to begin registration. You will be redirected to a Google authentication form where you confirm you will "share your name, email address, language preference, and profile picture with netdata.cloud."
+
+![Screenshot of the Google authentication screen for Netdata Cloud](https://user-images.githubusercontent.com/1153921/59786094-50752d00-927b-11e9-9411-5d7ce2b71ab0.png)
+
+Click on the account you would like to connect to Netdata Cloud to continue and then skip down to [Visiting Netdata Cloud for the first time](#visiting-the-nodes-view-for-the-first-time) for further instructions.
+
+### Registration via GitHub
+
+Click the **Authorize with GitHub** button to begin registration. You will be redirected to a GitHub authentication form where you confirm to share your email address with Netdata Cloud to create your account.
+
+![Screenshot of the GitHub authentication screen for Netdata Cloud](https://user-images.githubusercontent.com/1153921/59786227-a2b64e00-927b-11e9-939b-6fc51ef453b0.png)
+
+Click the **Authorize Netdata** button to continue and then skip down to [Visiting Netdata Cloud for the first time](#visiting-the-nodes-view-for-the-first-time) for further instructions.
+
+### Registration via email
+
+Enter your preferred email into the field and click the **Authorize** button.
+
+Open your email account and check for the verification email—it should arrive in less than a minute. If it doesn't show up, check your spam folder or click the **Resend email** button in the Netdata Cloud interface.
+
+When the email arrives, open it and click on the green **Sign in** button and then skip down to [Visiting Netdata Cloud for the first time](#visiting-the-nodes-view-for-the-first-time) for further instructions.
+
+![Screenshot of the verification email](https://user-images.githubusercontent.com/1153921/59783969-338a2b00-9276-11e9-84b8-a4f678de1242.png)
+
+## Visiting the Nodes View for the first time
+
+Regardless of which sign in method you used, you'll now be redirected back to your Netdata agent's dashboard. This node has now been associated with your Netdata Cloud account. Netdata Cloud uses a list of nodes associated with your account to populate the Nodes List dropdown in the dashboard and the Nodes View feature of Netdata Cloud.
+
+**For more information on how to use the Nodes View, visit the [Nodes View guide](nodes-view.md).**
+
+## Signing in to your Netdata Cloud account
+
+The process of signing in to an existing Netdata Cloud account the same as [registering for a new account](#registering-a-netdata-cloud-account). The recommended method is to use the **Sign in** button at the top-right corner of a Netdata nodes's dashboard. Choose the method you used to register for your Netdata Cloud account and complete the process.
+
+![A screenshot of the Sign in button in the Netdata dashboard](https://user-images.githubusercontent.com/1153921/59782688-6252d200-9273-11e9-9975-52be0d6714bf.png)
+
+## Adding additional nodes to your Netdata Cloud account
+
+There is currently only one way to associate additional Netdata nodes with your Netdata Cloud account: You must visit the web dashboard for each node and click the **Sign in** button and complete the [sign in process](#signing-in-to-your-netdata-cloud-account).
+
+!!! note ""
+ We are aware that the process of registering each node individually is cumbersome for those who want to implement Netdata Cloud's features across a large infrastructure.
+
+```
+Please view [this comment on issue #6318](https://github.com/netdata/netdata/issues/6318#issuecomment-504106329) for how we plan on improving the process for adding additional nodes to your Netdata Cloud account.
+```
+
+## Private registries and Netdata Cloud
+
+If you use a [private registry](../../registry/README.md#run-your-own-registry), and sign in to Netdata Cloud, you'll be using the Netdata Cloud registry in addition to your private registry.
+
+Clicking the **Sign in** button on the Netdata dashboard will display a modal window warning you about the synchronization of your private registry's entries to the Netdata Cloud's registry.
+
+![A screenshot of the private registry warning modal](https://user-images.githubusercontent.com/1153921/59807493-fd1bd280-92ac-11e9-8017-98efb2cbbed8.png)
+
+If your company's data policies don't allow storing information about your nodes on the Netdata Cloud registry, you should click the **Cancel** button and continue using your private registry. You'll be able to access the Nodes List in the top-left corner of a Netdata dashboard, but you won't be able to use the [Nodes View](nodes-view.md) feature within Netdata Cloud, or any of the [additional features](https://blog.netdata.cloud/posts/netdata-cloud-announcement/#what-features-will-netdata-cloud-offer) on our roadmap. You can also sign up for the waiting list for the [hosted and/or on-premises versions of Netdata Cloud](README.md#running-netdata-cloud-on-premises-or-as-a-hosted-instance) that we're working on.
+
+If you agree to use Netdata Cloud over your private registry, and opt-in to let Netdata Cloud receive and store the information described [here](../../registry/README.md#what-data-does-the-registry-store), you should click the **Sign in** button again to continue the registration/sign in process.
+
+### Returning to your private registry
+
+If you register for or sign in to Netdata Cloud from a node previously associated with a private registry, you can easily return to your private registry by signing out.
+
+You can sign out in two ways:
+
+1. **From a node's dashboard**: In the top-right corner you will find a dropdown menu with your email address. Click that and then click the **Sign Out** button.
+2. **From Netdata Cloud**: Click on your profile picture in the top-right corner and then click on the **Sign Out** button.
+
+Signing out from Netdata Cloud and returning to your private registry *does not remove* the [information stored](../../registry/README.md#what-data-does-the-registry-store) about your nodes or account details.
+
+But, upon signout, your Nodes List on all dashboards will once more be populated by your private registry and not Netdata Cloud.
+
+<!-- ## The 'Synchronize with Netdata Cloud' button
+
+Once signed in to Netdata Cloud, the Nodes List dropdown will now show a button labeled `Synchronize with netdata.cloud`.
+
+The `Synchronize with Netdata Cloud` button is a migration (or import) tool for Netdata Cloud. If either the public or your private registry contains a list of nodes associated with your `person_guid`, it will import them into Netdata Cloud and associate them with the `accounts` information in the Netdata Cloud registry.
+
+When you click the `Synchronize with netdata.cloud` button, you will receive one of two popup messages based on whether you were using the public registry (at `registry.my-netdata.io`) or a private registry.
+
+**Public registry**:
+
+![Screenshot of the synchronization warning for public registries](https://user-images.githubusercontent.com/1153921/59807540-3a806000-92ad-11e9-99b7-e2254d817ed4.png)
+
+**Private registry**:
+
+![Screenshot of the synchronization warning for private registries](https://user-images.githubusercontent.com/1153921/59807459-d8bff600-92ac-11e9-997f-e84b909f266e.png)
+
+If you do not want to synchronize your registry of choice with Netdata Cloud, click `Cancel`.
+
+If you do, click `Synchronize`. This will push GUIDs, hostnames, and URLs to Netdata Cloud's registry.
+
+Now, when you visit the Nodes View, you will be able to see all the nodes that were once associated with the public/private registry you were using previously. -->
+
+## What's next?
+
+Learn how to use the [Nodes View](nodes-view.md) to monitor many nodes concurrently.
diff --git a/docs/netdata-for-IoT.md b/docs/netdata-for-IoT.md
index ca538543..7a991c26 100644
--- a/docs/netdata-for-IoT.md
+++ b/docs/netdata-for-IoT.md
@@ -2,10 +2,10 @@
![image1](https://cloud.githubusercontent.com/assets/2662304/14252446/11ae13c4-fa90-11e5-9d03-d93a3eb3317a.gif)
-> New to Netdata? Check its demo: **[https://my-netdata.io/](https://my-netdata.io/)**
+> New to Netdata? Check its demo: **<https://my-netdata.io/>**
>
> [![User Base](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=null&value_color=blue&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry) [![Monitored Servers](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=null&value_color=orange&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry) [![Sessions Served](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=null&value_color=yellowgreen&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry)
->
+>
> [![New Users Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry) [![New Machines Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry) [![Sessions Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry)
---
@@ -38,4 +38,4 @@ Then restart Netdata. You will get this:
![image](https://user-images.githubusercontent.com/2662304/29658868-23aa65ae-88c5-11e7-9dad-c159600db5cc.png)
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fnetdata-for-IoT&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fnetdata-for-IoT&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/netdata-security.md b/docs/netdata-security.md
index a905717d..e3ce6d56 100644
--- a/docs/netdata-security.md
+++ b/docs/netdata-security.md
@@ -4,16 +4,16 @@ We have given special attention to all aspects of Netdata, ensuring that everyth
**Table of Contents**
-1. [Your data are safe with Netdata](#your-data-are-safe-with-netdata)
-2. [Your systems are safe with Netdata](#your-systems-are-safe-with-netdata)
-3. [Netdata is read-only](#netdata-is-read-only)
-4. [Netdata viewers authentication](#netdata-viewers-authentication)
- - [Why Netdata should be protected](#why-netdata-should-be-protected)
- - [Protect Netdata from the internet](#protect-netdata-from-the-internet)
- - [Expose Netdata only in a private LAN](#expose-netdata-only-in-a-private-lan)
- - [Use an authenticating web server in proxy mode](#use-an-authenticating-web-server-in-proxy-mode)
- - [Other methods](#other-methods)
-5. [Registry or how to not send any information to a third party server](#registry-or-how-to-not-send-any-information-to-a-third-party-server)
+1. [Your data are safe with Netdata](#your-data-are-safe-with-netdata)
+2. [Your systems are safe with Netdata](#your-systems-are-safe-with-netdata)
+3. [Netdata is read-only](#netdata-is-read-only)
+4. [Netdata viewers authentication](#netdata-viewers-authentication)
+ - [Why Netdata should be protected](#why-netdata-should-be-protected)
+ - [Protect Netdata from the internet](#protect-netdata-from-the-internet)
+ \- [Expose Netdata only in a private LAN](#expose-netdata-only-in-a-private-lan)
+ \- [Use an authenticating web server in proxy mode](#use-an-authenticating-web-server-in-proxy-mode)
+ \- [Other methods](#other-methods)
+5. [Registry or how to not send any information to a third party server](#registry-or-how-to-not-send-any-information-to-a-third-party-server)
## Your data are safe with Netdata
@@ -86,7 +86,6 @@ In Netdata v1.9+ there is also access list support, like this:
allow connections from = localhost 10.* 192.168.*
```
-
#### Use an authenticating web server in proxy mode
Use one web server to provide authentication in front of **all your Netdata servers**. So, you will be accessing all your Netdata with URLs like `http://{HOST}/netdata/{NETDATA_HOSTNAME}/` and authentication will be shared among all of them (you will sign-in once for all your servers). Instructions are provided on how to set the proxy configuration to have Netdata run behind [nginx](Running-behind-nginx.md), [Apache](Running-behind-apache.md), [lighthttpd](Running-behind-lighttpd.md#netdata-via-lighttpd-v14x) and [Caddy](Running-behind-caddy.md#netdata-via-caddy).
@@ -97,7 +96,8 @@ To use this method, you should firewall protect all your Netdata servers, so tha
PROXY_IP="1.2.3.4"
iptables -t filter -I INPUT -p tcp --dport 19999 \! -s ${PROXY_IP} -m conntrack --ctstate NEW -j DROP
```
-_commands to allow direct access to Netdata from a web server proxy_
+
+*commands to allow direct access to Netdata from a web server proxy*
The above will prevent anyone except your web server to access a Netdata dashboard running on the host.
@@ -132,9 +132,10 @@ iptables -t filter -A netdata -j DROP
iptables -t filter -D INPUT -p tcp --dport ${NETDATA_PORT} -m conntrack --ctstate NEW -j netdata 2>/dev/null
# add the input chain hook (again)
-# to send all new netdata connections to our filtering chain
+# to send all new Netdata connections to our filtering chain
iptables -t filter -I INPUT -p tcp --dport ${NETDATA_PORT} -m conntrack --ctstate NEW -j netdata
```
+
_script to allow access to Netdata only from a number of hosts_
You can run the above any number of times. Each time it runs it refreshes the list of allowed hosts.
@@ -143,19 +144,20 @@ You can run the above any number of times. Each time it runs it refreshes the li
Of course, there are many more methods you could use to protect Netdata:
-- bind Netdata to localhost and use `ssh -L 19998:127.0.0.1:19999 remote.netdata.ip` to forward connections of local port 19998 to remote port 19999. This way you can ssh to a Netdata server and then use `http://127.0.0.1:19998/` on your computer to access the remote Netdata dashboard.
+- bind Netdata to localhost and use `ssh -L 19998:127.0.0.1:19999 remote.netdata.ip` to forward connections of local port 19998 to remote port 19999. This way you can ssh to a Netdata server and then use `http://127.0.0.1:19998/` on your computer to access the remote Netdata dashboard.
-- If you are always under a static IP, you can use the script given above to allow direct access to your Netdata servers without authentication, from all your static IPs.
+- If you are always under a static IP, you can use the script given above to allow direct access to your Netdata servers without authentication, from all your static IPs.
-- install all your Netdata in **headless data collector** mode, forwarding all metrics in real-time to a master Netdata server, which will be protected with authentication using an nginx server running locally at the master Netdata server. This requires more resources (you will need a bigger master Netdata server), but does not require any firewall changes, since all the slave Netdata servers will not be listening for incoming connections.
+- install all your Netdata in **headless data collector** mode, forwarding all metrics in real-time to a master Netdata server, which will be protected with authentication using an nginx server running locally at the master Netdata server. This requires more resources (you will need a bigger master Netdata server), but does not require any firewall changes, since all the slave Netdata servers will not be listening for incoming connections.
## Anonymous Statistics
### Registry or how to not send any information to a third party server
The default configuration uses a public registry under registry.my-netdata.io (more information about the registry here: [mynetdata-menu-item](../registry/) ). Please be aware that if you use that public registry, you submit the following information to a third party server:
-- The url where you open the web-ui in the browser (via http request referer)
-- The hostnames of the Netdata servers
+
+- The url where you open the web-ui in the browser (via http request referer)
+- The hostnames of the Netdata servers
If sending this information to the central Netdata registry violates your security policies, you can configure Netdat to [run your own registry](../registry/#run-your-own-registry).
@@ -163,21 +165,21 @@ If sending this information to the central Netdata registry violates your securi
Starting with v1.12 Netdata also collects [anonymous statistics](anonymous-statistics.md) on certain events for:
-1. **Quality assurance**, to help us understand if Netdata behaves as expected and help us identify repeating issues for certain distributions or environments.
+1. **Quality assurance**, to help us understand if Netdata behaves as expected and help us identify repeating issues for certain distributions or environments.
-2. **Usage statistics**, to help us focus on the parts of Netdata that are used the most, or help us identify the extent our development decisions influence the community.
+2. **Usage statistics**, to help us focus on the parts of Netdata that are used the most, or help us identify the extent our development decisions influence the community.
To opt-out from sending anonymous statistics, you can create a file called `.opt-out-from-anonymous-statistics` under the user configuration directory (usually `/etc/netdata`).
## Netdata directories
-path|owner|permissions| Netdata |comments|
-:---|:----|:----------|:--------|:-------|
-`/etc/netdata`|user&nbsp;`root`<br/>group&nbsp;`netdata`|dirs `0755`<br/>files `0640`|reads|**Netdata config files**<br/>may contain sensitive information, so group `netdata` is allowed to read them.
-`/usr/libexec/netdata`|user&nbsp;`root`<br/>group&nbsp;`root`|executable by anyone<br/>dirs `0755`<br/>files `0644` or `0755`|executes|**Netdata plugins**<br/>permissions depend on the file - not all of them should have the executable flag.<br/>there are a few plugins that run with escalated privileges (Linux capabilities or `setuid`) - these plugins should be executable only by group `netdata`.
-`/usr/share/netdata`|user&nbsp;`root`<br/>group&nbsp;`netdata`|readable by anyone<br/>dirs `0755`<br/>files `0644`|reads and sends over the network|**Netdata web static files**<br/>these files are sent over the network to anyone that has access to the Netdata web server. Netdata checks the ownership of these files (using settings at the `[web]` section of `netdata.conf`) and refuses to serve them if they are not properly owned. Symbolic links are not supported. Netdata also refuses to serve URLs with `..` in their name.
-`/var/cache/netdata`|user&nbsp;`netdata`<br/>group&nbsp;`netdata`|dirs `0750`<br/>files `0660`|reads, writes, creates, deletes|**Netdata ephemeral database files**<br/>Netdata stores its ephemeral real-time database here.
-`/var/lib/netdata`|user&nbsp;`netdata`<br/>group&nbsp;`netdata`|dirs `0750`<br/>files `0660`|reads, writes, creates, deletes|**Netdata permanent database files**<br/>Netdata stores here the registry data, health alarm log db, etc.
-`/var/log/netdata`|user&nbsp;`netdata`<br/>group&nbsp;`root`|dirs `0755`<br/>files `0644`|writes, creates|**Netdata log files**<br/>all the Netdata applications, logs their errors or other informational messages to files in this directory. These files should be log rotated.
+| path|owner|permissions|Netdata|comments|
+|:---|:----|:----------|:------|:-------|
+| `/etc/netdata`|user `root`<br/>group `netdata`|dirs `0755`<br/>files `0640`|reads|**Netdata config files**<br/>may contain sensitive information, so group `netdata` is allowed to read them.|
+| `/usr/libexec/netdata`|user `root`<br/>group `root`|executable by anyone<br/>dirs `0755`<br/>files `0644` or `0755`|executes|**Netdata plugins**<br/>permissions depend on the file - not all of them should have the executable flag.<br/>there are a few plugins that run with escalated privileges (Linux capabilities or `setuid`) - these plugins should be executable only by group `netdata`.|
+| `/usr/share/netdata`|user `root`<br/>group `netdata`|readable by anyone<br/>dirs `0755`<br/>files `0644`|reads and sends over the network|**Netdata web static files**<br/>these files are sent over the network to anyone that has access to the Netdata web server. Netdata checks the ownership of these files (using settings at the `[web]` section of `netdata.conf`) and refuses to serve them if they are not properly owned. Symbolic links are not supported. Netdata also refuses to serve URLs with `..` in their name.|
+| `/var/cache/netdata`|user `netdata`<br/>group `netdata`|dirs `0750`<br/>files `0660`|reads, writes, creates, deletes|**Netdata ephemeral database files**<br/>Netdata stores its ephemeral real-time database here.|
+| `/var/lib/netdata`|user `netdata`<br/>group `netdata`|dirs `0750`<br/>files `0660`|reads, writes, creates, deletes|**Netdata permanent database files**<br/>Netdata stores here the registry data, health alarm log db, etc.|
+| `/var/log/netdata`|user `netdata`<br/>group `root`|dirs `0755`<br/>files `0644`|writes, creates|**Netdata log files**<br/>all the Netdata applications, logs their errors or other informational messages to files in this directory. These files should be log rotated.|
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fnetdata-security&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fnetdata-security&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/privacy-policy.md b/docs/privacy-policy.md
index e46d783e..4f1c7459 100644
--- a/docs/privacy-policy.md
+++ b/docs/privacy-policy.md
@@ -6,7 +6,7 @@ This Privacy Policy explains the collection, use, processing, transferring and d
This Privacy Policy is incorporated into and made part of the Netdata Master Terms of Use (“Master Terms”) located [here](terms-of-use.md).
-Unless otherwise noted on a particular website or service hosted by Netdata, this Privacy Policy applies to your use of all websites that Netdata operates. These include https://my-netdata.io and https://netdata.cloud, together with all other subdomains thereof, (collectively, the “Websites”). This Privacy Policy also applies to all products, information, and services provided through the Websites, including without limitation the ND agent, the ND registry, the ND hub and the ND cloud website (together with the Websites, the “Services”).
+Unless otherwise noted on a particular website or service hosted by Netdata, this Privacy Policy applies to your use of all websites that Netdata operates. These include <https://my-netdata.io> and <https://netdata.cloud>, together with all other subdomains thereof, (collectively, the “Websites”). This Privacy Policy also applies to all products, information, and services provided through the Websites, including without limitation the ND agent, the ND registry, the ND hub and the ND cloud website (together with the Websites, the “Services”).
In addition, supplemental Privacy Policy terms (“Supplemental Privacy Policy Terms”) may apply to a particular Service. All such Supplemental Privacy Policy Terms will be accessible for you to read either within, or through your use of, that particular Service.
@@ -28,7 +28,7 @@ ND collects and uses personal information in the following ways:
Website and Analytics: When you visit our Websites and use our Services, ND collects some information about your activities through tools such as Google Analytics. The type of information that we collect focuses on general information such as country or city where you are located, pages visited, time spent on pages, heat-map of visitors’ activity on the site, information about the browser you are using, etc. ND collects and uses this information pursuant to our legitimate interest in enhancing the security and utility of our Services. The information we gather and process is used in the aggregate to spot trends without deliberately identifying individuals.
-Note that you can learn about Google’s practices in connection with its analytics services and how to opt out of it by downloading the Google Analytics opt-out browser add-on, available at https://tools.google.com/dlpage/gaoptout.
+Note that you can learn about Google’s practices in connection with its analytics services and how to opt out of it by downloading the Google Analytics opt-out browser add-on, available at <https://tools.google.com/dlpage/gaoptout>.
Information from Cookies: We and our service providers (for example, Google Analytics as described above) may collect information using cookies or similar technologies for the purposes described above and below. Cookies are pieces of information that are stored by your browser on the hard drive or memory of your computer or other Internet access device. Cookies may enable us to personalize your experience on the Services, maintain a persistent session, passively collect demographic information about your computer, and monitor advertisements and other activities. The Websites may use different kinds of cookies and other types of local storage (such as browser-based or plugin-based local storage).
@@ -37,22 +37,22 @@ The menu lists the Netdata servers you have visited. For example, when you jump
(like the currently viewed charts, the current zoom and pan operations on the charts, etc.) are propagated to the new server, so that the new dashboard will come with exactly the
same view. The global registry keeps track of 4 entities:
-1. **machines**: i.e. the netdata installations (a random GUID generated by each netdata the first time it starts; we call this **machine_guid**)
+1. **machines**: i.e. the Netdata installations (a random GUID generated by each Netdata the first time it starts; we call this **machine_guid**)
- For each netdata installation (each `machine_guid`) the registry keeps track of the different URLs it is accessed.
+ For each Netdata installation (each `machine_guid`) the registry keeps track of the different URLs it is accessed.
-2. **persons**: i.e. the web browsers accessing the netdata installations (a random GUID generated by the registry the first time it sees a new web browser; we call this **person_guid**)
+2. **persons**: i.e. the web browsers accessing the Netdata installations (a random GUID generated by the registry the first time it sees a new web browser; we call this **person_guid**)
- For each person, the registry keeps track of the netdata installations it has accessed and their URLs.
+ For each person, the registry keeps track of the Netdata installations it has accessed and their URLs.
-3. **URLs** of netdata installations (as seen by the web browsers)
+3. **URLs** of Netdata installations (as seen by the web browsers)
- For each URL, the registry keeps the URL and nothing more. Each URL is linked to *persons* and *machines*. The only way to find a URL is to know its **machine_guid** or have a **person_guid** it is linked to it.
+ For each URL, the registry keeps the URL and nothing more. Each URL is linked to _persons_ and _machines_. The only way to find a URL is to know its **machine_guid** or have a **person_guid** it is linked to it.
-4. **accounts**: i.e. the information used to sign-in via one of the available sign-in methods. Depending on the method, this may include an email, an email and a profile picture.
+4. **accounts**: i.e. the information used to sign-in via one of the available sign-in methods. Depending on the method, this may include an email, an email and a profile picture.
-For *persons*/*accounts* and *machines*, the registry keeps links to *URLs*, each link with 2 timestamps (first time seen, last time seen) and a counter (number of times it has been seen).
-*machines*, *persons*, and timestamps are stored in the netdata registry regardless of whether you sign in or not.
+For _persons/accounts_ and _machines_, the registry keeps links to _URLs_, each link with 2 timestamps (first time seen, last time seen) and a counter (number of times it has been seen).
+_machines_, _persons_, and timestamps are stored in the Netdata registry regardless of whether you sign in or not.
If sending this information is against your policies, you can [run your own registry](../registry/#run-your-own-registry).
Note that ND versions with the 'Sign in' feature of the ND Cloud do not use the global registry.
@@ -60,18 +60,19 @@ Note that ND versions with the 'Sign in' feature of the ND Cloud do not use the
ND Cloud: When you sign up to obtain a user account via the 'Sign in' link on the ND agent user interface, ND is granted access to personal information in the user profile of the authentication provider you choose (e.g. GitHub or Google). ND collects and uses this personal information pursuant to its legitimate interest in establishing and maintaining your account providing you with the features we provide Registered Users. We may use your email address to contact you regarding changes to this policy or other applicable policies. The login name or email address of your profile may be used to attribute you in connection with any content you submit to any Service.
Anonymous Usage Statistics: From Netdata v1.12 and above, anonymous usage information is collected by default on certain events of the ND daemon and send to Google Analytics. Every time the daemon is started or stopped and every time a fatal condition is encountered, Netdata collects system information and sends it to GA via an http call. The information collected for all events is:
- - Netdata version
- - OS name, version, id, id_like
- - Kernel name, version, architecture
- - Virtualization technology
- - Containerization technology
-Furthermore, the FATAL event sends the Netdata process & thread info, along with the file, function and line of the fatal error.
+
+- Netdata version
+- OS name, version, id, id_like
+- Kernel name, version, architecture
+- Virtualization technology
+- Containerization technology
+ Furthermore, the FATAL event sends the Netdata process & thread info, along with the file, function and line of the fatal error.
The statistics calculated from this information are used for:
-1. **Quality assurance**, to help us understand if Netdata behaves as expected and help us identify repeating issues for certain distributions or environment.
+1. **Quality assurance**, to help us understand if Netdata behaves as expected and help us identify repeating issues for certain distributions or environment.
-2. **Usage statistics**, to help us focus on the parts of Netdata that are used the most, or help us identify the extend our development decisions influence the community.
+2. **Usage statistics**, to help us focus on the parts of Netdata that are used the most, or help us identify the extend our development decisions influence the community.
To opt-out from sending anonymous statistics, you can create reate a file called `.opt-out-from-anonymous-statistics` under the user configuration directory (usually `/etc/netdata`).
@@ -125,4 +126,4 @@ We may occasionally update this Privacy Policy. When we do, we will provide you
Effective Date: 8 January 2019.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fprivacy-policy&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fprivacy-policy&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/terms-of-use.md b/docs/terms-of-use.md
index 5565f605..bf77fabb 100644
--- a/docs/terms-of-use.md
+++ b/docs/terms-of-use.md
@@ -1,11 +1,11 @@
# Terms of Use
Netdata Master Terms of Use
-Effective as of 25 May 2018
+Effective as of 09 Aug 2019
## 1. General Information Regarding These Terms of Use
-Master terms: Welcome, and thank you for your interest in Netdata (“Netdata, Inc.” “ND,” “we,” “our,” or “us”). Unless otherwise noted on a particular site or service, these master terms of use (“Master Terms”) apply to your use of all of the websites that Netdata Corporation operates. These include https://my-netdata.io and https://netdata.cloud, together with all other subdomains thereof, (collectively, the “Websites”). The Master Terms also apply to all products, information, and services provided through the Websites, such as the NDID Login Service.
+Master terms: Welcome, and thank you for your interest in Netdata (“Netdata, Inc.” “ND,” “we,” “our,” or “us”). Unless otherwise noted on a particular site or service, these master terms of use (“Master Terms”) apply to your use of all of the websites that Netdata Corporation operates. These include <https://my-netdata.io> and <https://netdata.cloud>, together with all other subdomains thereof, (collectively, the “Websites”). The Master Terms also apply to all products, information, and services provided through the Websites, such as the NDID Login Service.
Additional terms: In addition to the Master Terms, your use of any Services may also be subject to specific terms applicable to a particular Service (“Additional Terms”). If there is any conflict between the Additional Terms and the Master Terms, then the Additional Terms apply in relation to the relevant Service.
@@ -61,7 +61,7 @@ You agree not to engage in any of the following activities:
### 1. Violating laws and rights:
-You may not (a) use any Service for any illegal purpose or in violation of any local, state, national, or international laws, (b) violate or encourage others to violate any right of or obligation to a third party, including by infringing, misappropriating, or violating intellectual property, confidentiality, or privacy rights.
+You may not (a) use any Service for any illegal purpose or in violation of any local, state, national, or international laws, including without limitation U.S. export controls and economic sanctions regulations, or (b) violate or encourage others to violate any right of or obligation to a third party, including by infringing, misappropriating, or violating intellectual property, confidentiality, or privacy rights.
### 2. Solicitation:
@@ -158,4 +158,4 @@ Integration: These Master Terms and any applicable Additional Terms constitute t
Human-readable summary of Sec 15: If there is a lawsuit arising from these terms, it should be in Delaware and governed by Delaware law. We are glad you use our sites, but this agreement does not mean we are partners.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fterms-of-use&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fterms-of-use&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/what-is-netdata.md b/docs/what-is-netdata.md
index 6664897d..b134dc2c 100644
--- a/docs/what-is-netdata.md
+++ b/docs/what-is-netdata.md
@@ -1,8 +1,8 @@
-# What is Netdata?
+# What is Netdata?
-[![Build Status](https://travis-ci.com/netdata/netdata.svg?branch=master)](https://travis-ci.com/netdata/netdata) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2231/badge)](https://bestpractices.coreinfrastructure.org/projects/2231) [![License: GPL v3+](https://img.shields.io/badge/License-GPL%20v3%2B-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Freadme&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![Build Status](https://travis-ci.com/netdata/netdata.svg?branch=master)](https://travis-ci.com/netdata/netdata) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2231/badge)](https://bestpractices.coreinfrastructure.org/projects/2231) [![License: GPL v3+](https://img.shields.io/badge/License-GPL%20v3%2B-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Freadme&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
-[![Code Climate](https://codeclimate.com/github/netdata/netdata/badges/gpa.svg)](https://codeclimate.com/github/netdata/netdata) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498)](https://www.codacy.com/app/netdata/netdata?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=netdata/netdata&amp;utm_campaign=Badge_Grade) [![LGTM C](https://img.shields.io/lgtm/grade/cpp/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:cpp) [![LGTM JS](https://img.shields.io/lgtm/grade/javascript/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:javascript) [![LGTM PYTHON](https://img.shields.io/lgtm/grade/python/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:python)
+[![Code Climate](https://codeclimate.com/github/netdata/netdata/badges/gpa.svg)](https://codeclimate.com/github/netdata/netdata) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498)](https://www.codacy.com/app/netdata/netdata?utm_source=github.com&utm_medium=referral&utm_content=netdata/netdata&utm_campaign=Badge_Grade) [![LGTM C](https://img.shields.io/lgtm/grade/cpp/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:cpp) [![LGTM JS](https://img.shields.io/lgtm/grade/javascript/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:javascript) [![LGTM PYTHON](https://img.shields.io/lgtm/grade/python/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:python)
---
@@ -22,9 +22,9 @@ The following animated image, shows the top part of a typical Netdata dashboard.
![peek 2018-11-11 02-40](https://user-images.githubusercontent.com/2662304/48307727-9175c800-e55b-11e8-92d8-a581d60a4889.gif)
-*A typical Netdata dashboard, in 1:1 timing. Charts can be panned by dragging them, zoomed in/out with `SHIFT` + `mouse wheel`, an area can be selected for zoom-in with `SHIFT` + `mouse selection`. Netdata is highly interactive and **real-time**, optimized to get the work done!*
+_A typical Netdata dashboard, in 1:1 timing. Charts can be panned by dragging them, zoomed in/out with `SHIFT` + `mouse wheel`, an area can be selected for zoom-in with `SHIFT` + `mouse selection`. Netdata is highly interactive and **real-time**, optimized to get the work done!_
-> *We have a few online demos to experience it live: [https://www.netdata.cloud](https://www.netdata.cloud/#live-demo)*
+> _We have a few online demos to experience it live: [https://www.netdata.cloud](https://www.netdata.cloud/#live-demo)_
## User base
@@ -36,16 +36,18 @@ You will find people working for **Amazon**, **Atos**, **Baidu**, **Cisco System
**Vimeo**, and many more!
### Docker pulls
+
We provide docker images for the most common architectures. These are statistics reported by docker hub:
[![netdata/netdata (official)](https://img.shields.io/docker/pulls/netdata/netdata.svg?label=netdata/netdata+%28official%29)](https://hub.docker.com/r/netdata/netdata/) [![firehol/netdata (deprecated)](https://img.shields.io/docker/pulls/firehol/netdata.svg?label=firehol/netdata+%28deprecated%29)](https://hub.docker.com/r/firehol/netdata/) [![titpetric/netdata (donated)](https://img.shields.io/docker/pulls/titpetric/netdata.svg?label=titpetric/netdata+%28third+party%29)](https://hub.docker.com/r/titpetric/netdata/)
### Registry
+
When you install multiple Netdata, they are integrated into **one distributed application**, via a [Netdata registry](../registry/#registry). This is a web browser feature and it allows us to count the number of unique users and unique Netdata servers installed. The following information comes from the global public Netdata registry we run:
[![User Base](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=M&value_color=blue&precision=2&divide=1000000&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Monitored Servers](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=k&divide=1000&value_color=orange&precision=2&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Served](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=M&value_color=yellowgreen&precision=2&divide=1000000&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry)
-*in the last 24 hours:*<br/> [![New Users Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![New Machines Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry)
+_in the last 24 hours:_<br/> [![New Users Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![New Machines Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v42)](https://registry.my-netdata.io/#menu_netdata_submenu_registry)
## Why Netdata
@@ -53,25 +55,25 @@ Netdata has a quite different approach to monitoring.
Netdata is a monitoring agent you install on all your systems. It is:
-- a **metrics collector** - for system and application metrics (including web servers, databases, containers, etc)
-- a **time-series database** - all stored in memory (does not touch the disks while it runs)
-- a **metrics visualizer** - super fast, interactive, modern, optimized for anomaly detection
-- an **alarms notification engine** - an advanced watchdog for detecting performance and availability issues
+- a **metrics collector** - for system and application metrics (including web servers, databases, containers, etc)
+- a **time-series database** - all stored in memory (does not touch the disks while it runs)
+- a **metrics visualizer** - super fast, interactive, modern, optimized for anomaly detection
+- an **alarms notification engine** - an advanced watchdog for detecting performance and availability issues
All the above, are packaged together in a very flexible, extremely modular, distributed application.
This is how Netdata compares to other monitoring solutions:
-Netdata|others (open-source and commercial)
-:---:|:---:
-**High resolution metrics** (1s granularity)|Low resolution metrics (10s granularity at best)
-Monitors everything, **thousands of metrics per node**|Monitor just a few metrics
-UI is super fast, optimized for **anomaly detection**|UI is good for just an abstract view
-**Meaningful presentation**, to help you understand the metrics|You have to know the metrics before you start
-Install and get results **immediately**|Long preparation is required to get any useful results
-Use it for **troubleshooting** performance problems|Use them to get *statistics of past performance*
-**Kills the console** for tracing performance issues|The console is always required for troubleshooting
-Requires **zero dedicated resources**|Require large dedicated resources
+| Netdata | others (open-source and commercial)|
+|:-----:|:---------------------------------:|
+| **High resolution metrics** (1s granularity)|Low resolution metrics (10s granularity at best)|
+| Monitors everything, **thousands of metrics per node**|Monitor just a few metrics|
+| UI is super fast, optimized for **anomaly detection**|UI is good for just an abstract view|
+| **Meaningful presentation**, to help you understand the metrics|You have to know the metrics before you start|
+| Install and get results **immediately**|Long preparation is required to get any useful results|
+| Use it for **troubleshooting** performance problems|Use them to get *statistics of past performance*|
+| **Kills the console** for tracing performance issues|The console is always required for troubleshooting|
+| Requires **zero dedicated resources**|Require large dedicated resources|
Netdata is **open-source**, **free**, super **fast**, very **easy**, completely **open**, extremely **efficient**,
**flexible** and integrate-able.
@@ -87,14 +89,14 @@ Netdata is a highly efficient, highly modular, metrics management engine. Its lo
This is how it works:
-Function|Description|Documentation
-:---:|:---|:---:
-**Collect**|Multiple independent data collection workers are collecting metrics from their sources using the optimal protocol for each application and push the metrics to the database. Each data collection worker has lockless write access to the metrics it collects.|[`collectors`](../collectors/#data-collection-plugins)
-**Store**|Metrics are stored in RAM in a round robin database (ring buffer), using a custom made floating point number for minimal footprint.|[`database`](../database/#database)
-**Check**|A lockless independent watchdog is evaluating **health checks** on the collected metrics, triggers alarms, maintains a health transaction log and dispatches alarm notifications.|[`health`](../health/#health-monitoring)
-**Stream**|An lockless independent worker is streaming metrics, in full detail and in real-time, to remote Netdata servers, as soon as they are collected.|[`streaming`](../streaming/#streaming-and-replication)
-**Archive**|A lockless independent worker is down-sampling the metrics and pushes them to **backend** time-series databases.|[`backends`](../backends/)
-**Query**|Multiple independent workers are attached to the [internal web server](../web/server/#web-server), servicing API requests, including [data queries](../web/api/queries/#database-queries).|[`web/api`](../web/api/#api)
+|Function|Description|Documentation|
+|:------:|:----------|:-----------:|
+|**Collect**|Multiple independent data collection workers are collecting metrics from their sources using the optimal protocol for each application and push the metrics to the database. Each data collection worker has lockless write access to the metrics it collects.|[`collectors`](../collectors/#data-collection-plugins)|
+|**Store**|Metrics are stored in RAM in a round robin database (ring buffer), using a custom made floating point number for minimal footprint.|[`database`](../database/#database)|
+|**Check**|A lockless independent watchdog is evaluating **health checks** on the collected metrics, triggers alarms, maintains a health transaction log and dispatches alarm notifications.|[`health`](../health/#health-monitoring)|
+|**Stream**|An lockless independent worker is streaming metrics, in full detail and in real-time, to remote Netdata servers, as soon as they are collected.|[`streaming`](../streaming/#streaming-and-replication)|
+|**Archive**|A lockless independent worker is down-sampling the metrics and pushes them to **backend** time-series databases.|[`backends`](../backends/)|
+|**Query**|Multiple independent workers are attached to the [internal web server](../web/server/#web-server), servicing API requests, including [data queries](../web/api/queries/#database-queries).|[`web/api`](../web/api/#api)|
The result is a highly efficient, low latency system, supporting multiple readers and one writer on each metric.
@@ -105,7 +107,6 @@ Click it to to interact with it (it has direct links to documentation).
[![image](https://user-images.githubusercontent.com/43294513/60951037-8ba5d180-a2f8-11e9-906e-e27356f168bc.png)](https://my-netdata.io/infographic.html)
-
## Features
![finger-video](https://user-images.githubusercontent.com/2662304/48346998-96cf3180-e685-11e8-9f4e-059d23aa3aa5.gif)
@@ -113,31 +114,34 @@ Click it to to interact with it (it has direct links to documentation).
This is what you should expect from Netdata:
### General
-- **1s granularity** - the highest possible resolution for all metrics.
-- **Unlimited metrics** - collects all the available metrics, the more the better.
-- **1% CPU utilization of a single core** - it is super fast, unbelievably optimized.
-- **A few MB of RAM** - by default it uses 25MB RAM. [You size it](../database).
-- **Zero disk I/O** - while it runs, it does not load or save anything (except `error` and `access` logs).
-- **Zero configuration** - auto-detects everything, it can collect up to 10000 metrics per server out of the box.
-- **Zero maintenance** - You just run it, it does the rest.
-- **Zero dependencies** - it is even its own web server, for its static web files and its web API (though its plugins may require additional libraries, depending on the applications monitored).
-- **Scales to infinity** - you can install it on all your servers, containers, VMs and IoTs. Metrics are not centralized by default, so there is no limit.
-- **Several operating modes** - Autonomous host monitoring (the default), headless data collector, forwarding proxy, store and forward proxy, central multi-host monitoring, in all possible configurations. Each node may have different metrics retention policy and run with or without health monitoring.
+
+- **1s granularity** - the highest possible resolution for all metrics.
+- **Unlimited metrics** - collects all the available metrics, the more the better.
+- **1% CPU utilization of a single core** - it is super fast, unbelievably optimized.
+- **A few MB of RAM** - by default it uses 25MB RAM. [You size it](../database).
+- **Zero disk I/O** - while it runs, it does not load or save anything (except `error` and `access` logs).
+- **Zero configuration** - auto-detects everything, it can collect up to 10000 metrics per server out of the box.
+- **Zero maintenance** - You just run it, it does the rest.
+- **Zero dependencies** - it is even its own web server, for its static web files and its web API (though its plugins may require additional libraries, depending on the applications monitored).
+- **Scales to infinity** - you can install it on all your servers, containers, VMs and IoTs. Metrics are not centralized by default, so there is no limit.
+- **Several operating modes** - Autonomous host monitoring (the default), headless data collector, forwarding proxy, store and forward proxy, central multi-host monitoring, in all possible configurations. Each node may have different metrics retention policy and run with or without health monitoring.
### Health Monitoring & Alarms
-- **Sophisticated alerting** - comes with hundreds of alarms, **out of the box**! Supports dynamic thresholds, hysteresis, alarm templates, multiple role-based notification methods.
-- **Notifications**: [alerta.io](../health/notifications/alerta/), [amazon sns](../health/notifications/awssns/), [discordapp.com](../health/notifications/discord/), [email](../health/notifications/email/), [flock.com](../health/notifications/flock/), [irc](../health/notifications/irc/), [kavenegar.com](../health/notifications/kavenegar/), [messagebird.com](../health/notifications/messagebird/), [pagerduty.com](../health/notifications/pagerduty/), [prowl](../health/notifications/prowl/), [pushbullet.com](../health/notifications/pushbullet/), [pushover.net](../health/notifications/pushover/), [rocket.chat](../health/notifications/rocketchat/), [slack.com](../health/notifications/slack/), [smstools3](../health/notifications/smstools3/), [syslog](../health/notifications/syslog/), [telegram.org](../health/notifications/telegram/), [twilio.com](../health/notifications/twilio/), [web](../health/notifications/web/) and [custom notifications](../health/notifications/custom/).
+
+- **Sophisticated alerting** - comes with hundreds of alarms, **out of the box**! Supports dynamic thresholds, hysteresis, alarm templates, multiple role-based notification methods.
+- **Notifications**: [alerta.io](../health/notifications/alerta/), [amazon sns](../health/notifications/awssns/), [discordapp.com](../health/notifications/discord/), [email](../health/notifications/email/), [flock.com](../health/notifications/flock/), [irc](../health/notifications/irc/), [kavenegar.com](../health/notifications/kavenegar/), [messagebird.com](../health/notifications/messagebird/), [pagerduty.com](../health/notifications/pagerduty/), [prowl](../health/notifications/prowl/), [pushbullet.com](../health/notifications/pushbullet/), [pushover.net](../health/notifications/pushover/), [rocket.chat](../health/notifications/rocketchat/), [slack.com](../health/notifications/slack/), [smstools3](../health/notifications/smstools3/), [syslog](../health/notifications/syslog/), [telegram.org](../health/notifications/telegram/), [twilio.com](../health/notifications/twilio/), [web](../health/notifications/web/) and [custom notifications](../health/notifications/custom/).
### Integrations
-- **time-series dbs** - can archive its metrics to **Graphite**, **OpenTSDB**, **Prometheus**, **AWS Kinesis**, **JSON document DBs**, in the same or lower resolution (lower: to prevent it from congesting these servers due to the amount of data collected). Netdata also supports **Prometheus remote write API** which allows storing metrics to **Elasticsearch**, **Gnocchi**, **InfluxDB**, **Kafka**, **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics** and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
+
+- **time-series dbs** - can archive its metrics to **Graphite**, **OpenTSDB**, **Prometheus**, **AWS Kinesis**, **JSON document DBs**, in the same or lower resolution (lower: to prevent it from congesting these servers due to the amount of data collected). Netdata also supports **Prometheus remote write API** which allows storing metrics to **Elasticsearch**, **Gnocchi**, **InfluxDB**, **Kafka**, **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics** and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
## Visualization
-- **Stunning interactive dashboards** - mouse, touchpad and touch-screen friendly in 2 themes: `slate` (dark) and `white`.
-- **Amazingly fast visualization** - responds to all queries in less than 1 ms per metric, even on low-end hardware.
-- **Visual anomaly detection** - the dashboards are optimized for detecting anomalies visually.
-- **Embeddable** - its charts can be embedded on your web pages, wikis and blogs. You can even use [Atlassian's Confluence as a monitoring dashboard](../web/gui/confluence/).
-- **Customizable** - custom dashboards can be built using simple HTML (no javascript necessary).
+- **Stunning interactive dashboards** - mouse, touchpad and touch-screen friendly in 2 themes: `slate` (dark) and `white`.
+- **Amazingly fast visualization** - responds to all queries in less than 1 ms per metric, even on low-end hardware.
+- **Visual anomaly detection** - the dashboards are optimized for detecting anomalies visually.
+- **Embeddable** - its charts can be embedded on your web pages, wikis and blogs. You can even use [Atlassian's Confluence as a monitoring dashboard](../web/gui/confluence/).
+- **Customizable** - custom dashboards can be built using simple HTML (no javascript necessary).
### Positive and negative values
@@ -161,7 +165,7 @@ Charts on Netdata dashboards are synchronized to each other. There is no master
![charts-are-synchronized](https://user-images.githubusercontent.com/2662304/48309003-b4fb3b80-e578-11e8-86f6-f505c7059c15.gif)
-*Charts are panned by dragging them with the mouse. Charts can be zoomed in/out with`SHIFT` + `mouse wheel` while the mouse pointer is over a chart.*
+_Charts are panned by dragging them with the mouse. Charts can be zoomed in/out with`SHIFT` + `mouse wheel` while the mouse pointer is over a chart._
> The visible time-frame (pan and zoom) is propagated from Netdata server to Netdata server, when navigating via the [node menu](../registry#registry).
@@ -171,196 +175,225 @@ To improve visual anomaly detection across charts, the user can highlight a time
![highlighted-timeframe](https://user-images.githubusercontent.com/2662304/48311876-f9093300-e5ae-11e8-9c74-e3e291741990.gif)
-*A highlighted time-frame can be given by pressing `ALT` + `mouse selection` on any chart. Netdata will highlight the same range on all charts.*
+_A highlighted time-frame can be given by pressing `ALT` + `mouse selection` on any chart. Netdata will highlight the same range on all charts._
> Highlighted ranges are propagated from Netdata server to Netdata server, when navigating via the [node menu](../registry#registry).
-
## What does it monitor
Netdata data collection is **extensible** - you can monitor anything you can get a metric for.
Its [Plugin API](../collectors/plugins.d/) supports all programing languages (anything can be a Netdata plugin, BASH, python, perl, node.js, java, Go, ruby, etc).
-- For better performance, most system related plugins (cpu, memory, disks, filesystems, networking, etc) have been written in `C`.
-- For faster development and easier contributions, most application related plugins (databases, web servers, etc) have been written in `python`.
+- For better performance, most system related plugins (cpu, memory, disks, filesystems, networking, etc) have been written in `C`.
+- For faster development and easier contributions, most application related plugins (databases, web servers, etc) have been written in `python`.
#### APM (Application Performance Monitoring)
-- **[statsd](../collectors/statsd.plugin/)** - Netdata is a fully featured statsd server.
-- **[Go expvar](../collectors/python.d.plugin/go_expvar/)** - collects metrics exposed by applications written in the Go programming language using the expvar package.
-- **[Spring Boot](../collectors/python.d.plugin/springboot/)** - monitors running Java Spring Boot applications that expose their metrics with the use of the Spring Boot Actuator included in Spring Boot library.
-- **[uWSGI](../collectors/python.d.plugin/uwsgi/)** - collects performance metrics from uWSGI applications.
+
+- **[statsd](../collectors/statsd.plugin/)** - Netdata is a fully featured statsd server.
+- **[Go expvar](../collectors/python.d.plugin/go_expvar/)** - collects metrics exposed by applications written in the Go programming language using the expvar package.
+- **[Spring Boot](../collectors/python.d.plugin/springboot/)** - monitors running Java Spring Boot applications that expose their metrics with the use of the Spring Boot Actuator included in Spring Boot library.
+- **[uWSGI](../collectors/python.d.plugin/uwsgi/)** - collects performance metrics from uWSGI applications.
#### System Resources
-- **[CPU Utilization](../collectors/proc.plugin/)** - total and per core CPU usage.
-- **[Interrupts](../collectors/proc.plugin/)** - total and per core CPU interrupts.
-- **[SoftIRQs](../collectors/proc.plugin/)** - total and per core SoftIRQs.
-- **[SoftNet](../collectors/proc.plugin/)** - total and per core SoftIRQs related to network activity.
-- **[CPU Throttling](../collectors/proc.plugin/)** - collects per core CPU throttling.
-- **[CPU Frequency](../collectors/proc.plugin/)** - collects the current CPU frequency.
-- **[CPU Idle](../collectors/proc.plugin/)** - collects the time spent per processor state.
-- **[IdleJitter](../collectors/idlejitter.plugin/)** - measures CPU latency.
-- **[Entropy](../collectors/proc.plugin/)** - random numbers pool, using in cryptography.
-- **[Interprocess Communication - IPC](../collectors/proc.plugin/)** - such as semaphores and semaphores arrays.
+
+- **[CPU Utilization](../collectors/proc.plugin/)** - total and per core CPU usage.
+- **[Interrupts](../collectors/proc.plugin/)** - total and per core CPU interrupts.
+- **[SoftIRQs](../collectors/proc.plugin/)** - total and per core SoftIRQs.
+- **[SoftNet](../collectors/proc.plugin/)** - total and per core SoftIRQs related to network activity.
+- **[CPU Throttling](../collectors/proc.plugin/)** - collects per core CPU throttling.
+- **[CPU Frequency](../collectors/proc.plugin/)** - collects the current CPU frequency.
+- **[CPU Idle](../collectors/proc.plugin/)** - collects the time spent per processor state.
+- **[IdleJitter](../collectors/idlejitter.plugin/)** - measures CPU latency.
+- **[Entropy](../collectors/proc.plugin/)** - random numbers pool, using in cryptography.
+- **[Interprocess Communication - IPC](../collectors/proc.plugin/)** - such as semaphores and semaphores arrays.
#### Memory
-- **[ram](../collectors/proc.plugin/)** - collects info about RAM usage.
-- **[swap](../collectors/proc.plugin/)** - collects info about swap memory usage.
-- **[available memory](../collectors/proc.plugin/)** - collects the amount of RAM available for userspace processes.
-- **[committed memory](../collectors/proc.plugin/)** - collects the amount of RAM committed to userspace processes.
-- **[Page Faults](../collectors/proc.plugin/)** - collects the system page faults (major and minor).
-- **[writeback memory](../collectors/proc.plugin/)** - collects the system dirty memory and writeback activity.
-- **[huge pages](../collectors/proc.plugin/)** - collects the amount of RAM used for huge pages.
-- **[KSM](../collectors/proc.plugin/)** - collects info about Kernel Same Merging (memory dedupper).
-- **[Numa](../collectors/proc.plugin/)** - collects Numa info on systems that support it.
-- **[slab](../collectors/proc.plugin/)** - collects info about the Linux kernel memory usage.
+
+- **[ram](../collectors/proc.plugin/)** - collects info about RAM usage.
+- **[swap](../collectors/proc.plugin/)** - collects info about swap memory usage.
+- **[available memory](../collectors/proc.plugin/)** - collects the amount of RAM available for userspace processes.
+- **[committed memory](../collectors/proc.plugin/)** - collects the amount of RAM committed to userspace processes.
+- **[Page Faults](../collectors/proc.plugin/)** - collects the system page faults (major and minor).
+- **[writeback memory](../collectors/proc.plugin/)** - collects the system dirty memory and writeback activity.
+- **[huge pages](../collectors/proc.plugin/)** - collects the amount of RAM used for huge pages.
+- **[KSM](../collectors/proc.plugin/)** - collects info about Kernel Same Merging (memory dedupper).
+- **[Numa](../collectors/proc.plugin/)** - collects Numa info on systems that support it.
+- **[slab](../collectors/proc.plugin/)** - collects info about the Linux kernel memory usage.
#### Disks
-- **[block devices](../collectors/proc.plugin/)** - per disk: I/O, operations, backlog, utilization, space, etc.
-- **[BCACHE](../collectors/proc.plugin/)** - detailed performance of SSD caching devices.
-- **[DiskSpace](../collectors/proc.plugin/)** - monitors disk space usage.
-- **[mdstat](../collectors/proc.plugin/)** - software RAID.
-- **[hddtemp](../collectors/python.d.plugin/hddtemp/)** - disk temperatures.
-- **[smartd](../collectors/python.d.plugin/smartd_log/)** - disk S.M.A.R.T. values.
-- **[device mapper](../collectors/proc.plugin/)** - naming disks.
-- **[Veritas Volume Manager](../collectors/proc.plugin/)** - naming disks.
-- **[megacli](../collectors/python.d.plugin/megacli/)** - adapter, physical drives and battery stats.
-- **[adaptec_raid](../collectors/python.d.plugin/adaptec_raid/)** - logical and physical devices health metrics.
-- **[ioping](../collectors/ioping.plugin/)** - to measure disk read/write latency.
+
+- **[block devices](../collectors/proc.plugin/)** - per disk: I/O, operations, backlog, utilization, space, etc.
+- **[BCACHE](../collectors/proc.plugin/)** - detailed performance of SSD caching devices.
+- **[DiskSpace](../collectors/proc.plugin/)** - monitors disk space usage.
+- **[mdstat](../collectors/proc.plugin/)** - software RAID.
+- **[hddtemp](../collectors/python.d.plugin/hddtemp/)** - disk temperatures.
+- **[smartd](../collectors/python.d.plugin/smartd_log/)** - disk S.M.A.R.T. values.
+- **[device mapper](../collectors/proc.plugin/)** - naming disks.
+- **[Veritas Volume Manager](../collectors/proc.plugin/)** - naming disks.
+- **[megacli](../collectors/python.d.plugin/megacli/)** - adapter, physical drives and battery stats.
+- **[adaptec_raid](../collectors/python.d.plugin/adaptec_raid/)** - logical and physical devices health metrics.
+- **[ioping](../collectors/ioping.plugin/)** - to measure disk read/write latency.
#### Filesystems
-- **[BTRFS](../collectors/proc.plugin/)** - detailed disk space allocation and usage.
-- **[Ceph](../collectors/python.d.plugin/ceph/)** - OSD usage, Pool usage, number of objects, etc.
-- **[NFS file servers and clients](../collectors/proc.plugin/)** - NFS v2, v3, v4: I/O, cache, read ahead, RPC calls
-- **[Samba](../collectors/python.d.plugin/samba/)** - performance metrics of Samba SMB2 file sharing.
-- **[ZFS](../collectors/proc.plugin/)** - detailed performance and resource usage.
+
+- **[BTRFS](../collectors/proc.plugin/)** - detailed disk space allocation and usage.
+- **[Ceph](../collectors/python.d.plugin/ceph/)** - OSD usage, Pool usage, number of objects, etc.
+- **[NFS file servers and clients](../collectors/proc.plugin/)** - NFS v2, v3, v4: I/O, cache, read ahead, RPC calls
+- **[Samba](../collectors/python.d.plugin/samba/)** - performance metrics of Samba SMB2 file sharing.
+- **[ZFS](../collectors/proc.plugin/)** - detailed performance and resource usage.
#### Networking
-- **[Network Stack](../collectors/proc.plugin/)** - everything about the networking stack (both IPv4 and IPv6 for all protocols: TCP, UDP, SCTP, UDPLite, ICMP, Multicast, Broadcast, etc), and all network interfaces (per interface: bandwidth, packets, errors, drops).
-- **[Netfilter](../collectors/proc.plugin/)** - everything about the netfilter connection tracker.
-- **[SynProxy](../collectors/proc.plugin/)** - collects performance data about the linux SYNPROXY (DDoS).
-- **[NFacct](../collectors/nfacct.plugin/)** - collects accounting data from iptables.
-- **[Network QoS](../collectors/tc.plugin/)** - the only tool that visualizes network `tc` classes in real-time
-- **[FPing](../collectors/fping.plugin/)** - to measure latency and packet loss between any number of hosts.
-- **[ISC dhcpd](../collectors/python.d.plugin/isc_dhcpd/)** - pools utilization, leases, etc.
-- **[AP](../collectors/charts.d.plugin/ap/)** - collects Linux access point performance data (`hostapd`).
-- **[SNMP](../collectors/node.d.plugin/snmp/)** - SNMP devices can be monitored too (although you will need to configure these).
-- **[port_check](../collectors/python.d.plugin/portcheck/)** - checks TCP ports for availability and response time.
+
+- **[Network Stack](../collectors/proc.plugin/)** - everything about the networking stack (both IPv4 and IPv6 for all protocols: TCP, UDP, SCTP, UDPLite, ICMP, Multicast, Broadcast, etc), and all network interfaces (per interface: bandwidth, packets, errors, drops).
+- **[Netfilter](../collectors/proc.plugin/)** - everything about the netfilter connection tracker.
+- **[SynProxy](../collectors/proc.plugin/)** - collects performance data about the linux SYNPROXY (DDoS).
+- **[NFacct](../collectors/nfacct.plugin/)** - collects accounting data from iptables.
+- **[Network QoS](../collectors/tc.plugin/)** - the only tool that visualizes network `tc` classes in real-time
+- **[FPing](../collectors/fping.plugin/)** - to measure latency and packet loss between any number of hosts.
+- **[ISC dhcpd](../collectors/python.d.plugin/isc_dhcpd/)** - pools utilization, leases, etc.
+- **[AP](../collectors/charts.d.plugin/ap/)** - collects Linux access point performance data (`hostapd`).
+- **[SNMP](../collectors/node.d.plugin/snmp/)** - SNMP devices can be monitored too (although you will need to configure these).
+- **[port_check](../collectors/python.d.plugin/portcheck/)** - checks TCP ports for availability and response time.
#### Virtual Private Networks
-- **[OpenVPN](../collectors/python.d.plugin/ovpn_status_log/)** - collects status per tunnel.
-- **[LibreSwan](../collectors/charts.d.plugin/libreswan/)** - collects metrics per IPSEC tunnel.
-- **[Tor](../collectors/python.d.plugin/tor/)** - collects Tor traffic statistics.
+
+- **[OpenVPN](../collectors/python.d.plugin/ovpn_status_log/)** - collects status per tunnel.
+- **[LibreSwan](../collectors/charts.d.plugin/libreswan/)** - collects metrics per IPSEC tunnel.
+- **[Tor](../collectors/python.d.plugin/tor/)** - collects Tor traffic statistics.
#### Processes
-- **[System Processes](../collectors/proc.plugin/)** - running, blocked, forks, active.
-- **[Applications](../collectors/apps.plugin/)** - by grouping the process tree and reporting CPU, memory, disk reads, disk writes, swap, threads, pipes, sockets - per process group.
-- **[systemd](../collectors/cgroups.plugin/)** - monitors systemd services using CGROUPS.
+
+- **[System Processes](../collectors/proc.plugin/)** - running, blocked, forks, active.
+- **[Applications](../collectors/apps.plugin/)** - by grouping the process tree and reporting CPU, memory, disk reads, disk writes, swap, threads, pipes, sockets - per process group.
+- **[systemd](../collectors/cgroups.plugin/)** - monitors systemd services using CGROUPS.
#### Users
-- **[Users and User Groups resource usage](../collectors/apps.plugin/)** - by summarizing the process tree per user and group, reporting: CPU, memory, disk reads, disk writes, swap, threads, pipes, sockets
-- **[logind](../collectors/python.d.plugin/logind/)** - collects sessions, users and seats connected.
+
+- **[Users and User Groups resource usage](../collectors/apps.plugin/)** - by summarizing the process tree per user and group, reporting: CPU, memory, disk reads, disk writes, swap, threads, pipes, sockets
+- **[logind](../collectors/python.d.plugin/logind/)** - collects sessions, users and seats connected.
#### Containers and VMs
-- **[Containers](../collectors/cgroups.plugin/)** - collects resource usage for all kinds of containers, using CGROUPS (systemd-nspawn, lxc, lxd, docker, kubernetes, etc).
-- **[libvirt VMs](../collectors/cgroups.plugin/)** - collects resource usage for all kinds of VMs, using CGROUPS.
-- **[dockerd](../collectors/python.d.plugin/dockerd/)** - collects docker health metrics.
+
+- **[Containers](../collectors/cgroups.plugin/)** - collects resource usage for all kinds of containers, using CGROUPS (systemd-nspawn, lxc, lxd, docker, kubernetes, etc).
+- **[libvirt VMs](../collectors/cgroups.plugin/)** - collects resource usage for all kinds of VMs, using CGROUPS.
+- **[dockerd](../collectors/python.d.plugin/dockerd/)** - collects docker health metrics.
#### Web Servers
-- **[Apache and lighttpd](../collectors/python.d.plugin/apache/)** - `mod-status` (v2.2, v2.4) and cache log statistics, for multiple servers.
-- **[IPFS](../collectors/python.d.plugin/ipfs/)** - bandwidth, peers.
-- **[LiteSpeed](../collectors/python.d.plugin/litespeed/)** - reads the litespeed rtreport files to collect metrics.
-- **[Nginx](../collectors/python.d.plugin/nginx/)** - `stub-status`, for multiple servers.
-- **[Nginx+](../collectors/python.d.plugin/nginx_plus/)** - connects to multiple nginx_plus servers (local or remote) to collect real-time performance metrics.
-- **[PHP-FPM](../collectors/python.d.plugin/phpfpm/)** - multiple instances, each reporting connections, requests, performance, etc.
-- **[Tomcat](../collectors/python.d.plugin/tomcat/)** - accesses, threads, free memory, volume, etc.
-- **[web server `access.log` files](../collectors/python.d.plugin/web_log/)** - extracting in real-time, web server and proxy performance metrics and applying several health checks, etc.
-- **[HTTP check](../collectors/python.d.plugin/httpcheck/)** - checks one or more web servers for HTTP status code and returned content.
+
+- **[Apache and lighttpd](../collectors/python.d.plugin/apache/)** - `mod-status` (v2.2, v2.4) and cache log statistics, for multiple servers.
+- **[IPFS](../collectors/python.d.plugin/ipfs/)** - bandwidth, peers.
+- **[LiteSpeed](../collectors/python.d.plugin/litespeed/)** - reads the litespeed rtreport files to collect metrics.
+- **[Nginx](../collectors/python.d.plugin/nginx/)** - `stub-status`, for multiple servers.
+- **[Nginx+](../collectors/python.d.plugin/nginx_plus/)** - connects to multiple nginx_plus servers (local or remote) to collect real-time performance metrics.
+- **[PHP-FPM](../collectors/python.d.plugin/phpfpm/)** - multiple instances, each reporting connections, requests, performance, etc.
+- **[Tomcat](../collectors/python.d.plugin/tomcat/)** - accesses, threads, free memory, volume, etc.
+- **[web server `access.log` files](../collectors/python.d.plugin/web_log/)** - extracting in real-time, web server and proxy performance metrics and applying several health checks, etc.
+- **[HTTP check](../collectors/python.d.plugin/httpcheck/)** - checks one or more web servers for HTTP status code and returned content.
#### Proxies, Balancers, Accelerators
-- **[HAproxy](../collectors/python.d.plugin/haproxy/)** - bandwidth, sessions, backends, etc.
-- **[Squid](../collectors/python.d.plugin/squid/)** - multiple servers, each showing: clients bandwidth and requests, servers bandwidth and requests.
-- **[Traefik](../collectors/python.d.plugin/traefik/)** - connects to multiple traefik instances (local or remote) to collect API metrics (response status code, response time, average response time and server uptime).
-- **[Varnish](../collectors/python.d.plugin/varnish/)** - threads, sessions, hits, objects, backends, etc.
-- **[IPVS](../collectors/proc.plugin/)** - collects metrics from the Linux IPVS load balancer.
+
+- **[HAproxy](../collectors/python.d.plugin/haproxy/)** - bandwidth, sessions, backends, etc.
+- **[Squid](../collectors/python.d.plugin/squid/)** - multiple servers, each showing: clients bandwidth and requests, servers bandwidth and requests.
+- **[Traefik](../collectors/python.d.plugin/traefik/)** - connects to multiple traefik instances (local or remote) to collect API metrics (response status code, response time, average response time and server uptime).
+- **[Varnish](../collectors/python.d.plugin/varnish/)** - threads, sessions, hits, objects, backends, etc.
+- **[IPVS](../collectors/proc.plugin/)** - collects metrics from the Linux IPVS load balancer.
#### Database Servers
-- **[CouchDB](../collectors/python.d.plugin/couchdb/)** - reads/writes, request methods, status codes, tasks, replication, per-db, etc.
-- **[MemCached](../collectors/python.d.plugin/memcached/)** - multiple servers, each showing: bandwidth, connections, items, etc.
-- **[MongoDB](../collectors/python.d.plugin/mongodb/)** - operations, clients, transactions, cursors, connections, asserts, locks, etc.
-- **[MySQL and mariadb](../collectors/python.d.plugin/mysql/)** - multiple servers, each showing: bandwidth, queries/s, handlers, locks, issues, tmp operations, connections, binlog metrics, threads, innodb metrics, and more.
-- **[PostgreSQL](../collectors/python.d.plugin/postgres/)** - multiple servers, each showing: per database statistics (connections, tuples read - written - returned, transactions, locks), backend processes, indexes, tables, write ahead, background writer and more.
-- **[Proxy SQL](../collectors/python.d.plugin/proxysql/)** - collects Proxy SQL backend and frontend performance metrics.
-- **[Redis](../collectors/python.d.plugin/redis/)** - multiple servers, each showing: operations, hit rate, memory, keys, clients, slaves.
-- **[RethinkDB](../collectors/python.d.plugin/rethinkdbs/)** - connects to multiple rethinkdb servers (local or remote) to collect real-time metrics.
+
+- **[CouchDB](../collectors/python.d.plugin/couchdb/)** - reads/writes, request methods, status codes, tasks, replication, per-db, etc.
+- **[MemCached](../collectors/python.d.plugin/memcached/)** - multiple servers, each showing: bandwidth, connections, items, etc.
+- **[MongoDB](../collectors/python.d.plugin/mongodb/)** - operations, clients, transactions, cursors, connections, asserts, locks, etc.
+- **[MySQL and mariadb](../collectors/python.d.plugin/mysql/)** - multiple servers, each showing: bandwidth, queries/s, handlers, locks, issues, tmp operations, connections, binlog metrics, threads, innodb metrics, and more.
+- **[PostgreSQL](../collectors/python.d.plugin/postgres/)** - multiple servers, each showing: per database statistics (connections, tuples read - written - returned, transactions, locks), backend processes, indexes, tables, write ahead, background writer and more.
+- **[Proxy SQL](../collectors/python.d.plugin/proxysql/)** - collects Proxy SQL backend and frontend performance metrics.
+- **[Redis](../collectors/python.d.plugin/redis/)** - multiple servers, each showing: operations, hit rate, memory, keys, clients, slaves.
+- **[RethinkDB](../collectors/python.d.plugin/rethinkdbs/)** - connects to multiple rethinkdb servers (local or remote) to collect real-time metrics.
#### Message Brokers
-- **[beanstalkd](../collectors/python.d.plugin/beanstalk/)** - global and per tube monitoring.
-- **[RabbitMQ](../collectors/python.d.plugin/rabbitmq/)** - performance and health metrics.
+
+- **[beanstalkd](../collectors/python.d.plugin/beanstalk/)** - global and per tube monitoring.
+- **[RabbitMQ](../collectors/python.d.plugin/rabbitmq/)** - performance and health metrics.
#### Search and Indexing
-- **[ElasticSearch](../collectors/python.d.plugin/elasticsearch/)** - search and index performance, latency, timings, cluster statistics, threads statistics, etc.
+
+- **[ElasticSearch](../collectors/python.d.plugin/elasticsearch/)** - search and index performance, latency, timings, cluster statistics, threads statistics, etc.
#### DNS Servers
-- **[bind_rndc](../collectors/python.d.plugin/bind_rndc/)** - parses `named.stats` dump file to collect real-time performance metrics. All versions of bind after 9.6 are supported.
-- **[dnsdist](../collectors/python.d.plugin/dnsdist/)** - performance and health metrics.
-- **[ISC Bind (named)](../collectors/node.d.plugin/named/)** - multiple servers, each showing: clients, requests, queries, updates, failures and several per view metrics. All versions of bind after 9.9.10 are supported.
-- **[NSD](../collectors/python.d.plugin/nsd/)** - queries, zones, protocols, query types, transfers, etc.
-- **[PowerDNS](../collectors/python.d.plugin/powerdns/)** - queries, answers, cache, latency, etc.
-- **[unbound](../collectors/python.d.plugin/unbound/)** - performance and resource usage metrics.
-- **[dns_query_time](../collectors/python.d.plugin/dns_query_time/)** - DNS query time statistics.
+
+- **[bind_rndc](../collectors/python.d.plugin/bind_rndc/)** - parses `named.stats` dump file to collect real-time performance metrics. All versions of bind after 9.6 are supported.
+- **[dnsdist](../collectors/python.d.plugin/dnsdist/)** - performance and health metrics.
+- **[ISC Bind (named)](../collectors/node.d.plugin/named/)** - multiple servers, each showing: clients, requests, queries, updates, failures and several per view metrics. All versions of bind after 9.9.10 are supported.
+- **[NSD](../collectors/python.d.plugin/nsd/)** - queries, zones, protocols, query types, transfers, etc.
+- **[PowerDNS](../collectors/python.d.plugin/powerdns/)** - queries, answers, cache, latency, etc.
+- **[unbound](../collectors/python.d.plugin/unbound/)** - performance and resource usage metrics.
+- **[dns_query_time](../collectors/python.d.plugin/dns_query_time/)** - DNS query time statistics.
#### Time Servers
-- **[chrony](../collectors/python.d.plugin/chrony/)** - uses the `chronyc` command to collect chrony statistics (Frequency, Last offset, RMS offset, Residual freq, Root delay, Root dispersion, Skew, System time).
-- **[ntpd](../collectors/python.d.plugin/ntpd/)** - connects to multiple ntpd servers (local or remote) to provide statistics of system variables and optional also peer variables.
+
+- **[chrony](../collectors/python.d.plugin/chrony/)** - uses the `chronyc` command to collect chrony statistics (Frequency, Last offset, RMS offset, Residual freq, Root delay, Root dispersion, Skew, System time).
+- **[ntpd](../collectors/python.d.plugin/ntpd/)** - connects to multiple ntpd servers (local or remote) to provide statistics of system variables and optional also peer variables.
#### Mail Servers
-- **[Dovecot](../collectors/python.d.plugin/dovecot/)** - POP3/IMAP servers.
-- **[Exim](../collectors/python.d.plugin/exim/)** - message queue (emails queued).
-- **[Postfix](../collectors/python.d.plugin/postfix/)** - message queue (entries, size).
+
+- **[Dovecot](../collectors/python.d.plugin/dovecot/)** - POP3/IMAP servers.
+- **[Exim](../collectors/python.d.plugin/exim/)** - message queue (emails queued).
+- **[Postfix](../collectors/python.d.plugin/postfix/)** - message queue (entries, size).
#### Hardware Sensors
-- **[IPMI](../collectors/freeipmi.plugin/)** - enterprise hardware sensors and events.
-- **[lm-sensors](../collectors/python.d.plugin/sensors/)** - temperature, voltage, fans, power, humidity, etc.
-- **[Nvidia](../collectors/python.d.plugin/nvidia_smi/)** - collects information for Nvidia GPUs.
-- **[RPi](../collectors/charts.d.plugin/sensors/)** - Raspberry Pi temperature sensors.
-- **[w1sensor](../collectors/python.d.plugin/w1sensor/)** - collects data from connected 1-Wire sensors.
+
+- **[IPMI](../collectors/freeipmi.plugin/)** - enterprise hardware sensors and events.
+- **[lm-sensors](../collectors/python.d.plugin/sensors/)** - temperature, voltage, fans, power, humidity, etc.
+- **[Nvidia](../collectors/python.d.plugin/nvidia_smi/)** - collects information for Nvidia GPUs.
+- **[RPi](../collectors/charts.d.plugin/sensors/)** - Raspberry Pi temperature sensors.
+- **[w1sensor](../collectors/python.d.plugin/w1sensor/)** - collects data from connected 1-Wire sensors.
#### UPSes
-- **[apcupsd](../collectors/charts.d.plugin/apcupsd/)** - load, charge, battery voltage, temperature, utility metrics, output metrics
-- **[NUT](../collectors/charts.d.plugin/nut/)** - load, charge, battery voltage, temperature, utility metrics, output metrics
-- **[Linux Power Supply](../collectors/proc.plugin/)** - collects metrics reported by power supply drivers on Linux.
+
+- **[apcupsd](../collectors/charts.d.plugin/apcupsd/)** - load, charge, battery voltage, temperature, utility metrics, output metrics
+- **[NUT](../collectors/charts.d.plugin/nut/)** - load, charge, battery voltage, temperature, utility metrics, output metrics
+- **[Linux Power Supply](../collectors/proc.plugin/)** - collects metrics reported by power supply drivers on Linux.
#### Social Sharing Servers
-- **[RetroShare](../collectors/python.d.plugin/retroshare/)** - connects to multiple retroshare servers (local or remote) to collect real-time performance metrics.
+
+- **[RetroShare](../collectors/python.d.plugin/retroshare/)** - connects to multiple retroshare servers (local or remote) to collect real-time performance metrics.
#### Security
-- **[Fail2Ban](../collectors/python.d.plugin/fail2ban/)** - monitors the fail2ban log file to check all bans for all active jails.
+
+- **[Fail2Ban](../collectors/python.d.plugin/fail2ban/)** - monitors the fail2ban log file to check all bans for all active jails.
#### Authentication, Authorization, Accounting (AAA, RADIUS, LDAP) Servers
-- **[FreeRadius](../collectors/python.d.plugin/freeradius/)** - uses the `radclient` command to provide freeradius statistics (authentication, accounting, proxy-authentication, proxy-accounting).
+
+- **[FreeRadius](../collectors/python.d.plugin/freeradius/)** - uses the `radclient` command to provide freeradius statistics (authentication, accounting, proxy-authentication, proxy-accounting).
#### Telephony Servers
-- **[opensips](../collectors/charts.d.plugin/opensips/)** - connects to an opensips server (localhost only) to collect real-time performance metrics.
+
+- **[opensips](../collectors/charts.d.plugin/opensips/)** - connects to an opensips server (localhost only) to collect real-time performance metrics.
#### Household Appliances
-- **[SMA webbox](../collectors/node.d.plugin/sma_webbox/)** - connects to multiple remote SMA webboxes to collect real-time performance metrics of the photovoltaic (solar) power generation.
-- **[Fronius](../collectors/node.d.plugin/fronius/)** - connects to multiple remote Fronius Symo servers to collect real-time performance metrics of the photovoltaic (solar) power generation.
-- **[StiebelEltron](../collectors/node.d.plugin/stiebeleltron/)** - collects the temperatures and other metrics from your Stiebel Eltron heating system using their Internet Service Gateway (ISG web).
+
+- **[SMA webbox](../collectors/node.d.plugin/sma_webbox/)** - connects to multiple remote SMA webboxes to collect real-time performance metrics of the photovoltaic (solar) power generation.
+- **[Fronius](../collectors/node.d.plugin/fronius/)** - connects to multiple remote Fronius Symo servers to collect real-time performance metrics of the photovoltaic (solar) power generation.
+- **[StiebelEltron](../collectors/node.d.plugin/stiebeleltron/)** - collects the temperatures and other metrics from your Stiebel Eltron heating system using their Internet Service Gateway (ISG web).
#### Game Servers
-- **[SpigotMC](../collectors/python.d.plugin/spigotmc/)** - monitors Spigot Minecraft server ticks per second and number of online players using the Minecraft remote console.
+
+- **[SpigotMC](../collectors/python.d.plugin/spigotmc/)** - monitors Spigot Minecraft server ticks per second and number of online players using the Minecraft remote console.
#### Distributed Computing
-- **[BOINC](../collectors/python.d.plugin/boinc/)** - monitors task states for local and remote BOINC client software using the remote GUI RPC interface. Also provides alarms for a handful of error conditions.
+
+- **[BOINC](../collectors/python.d.plugin/boinc/)** - monitors task states for local and remote BOINC client software using the remote GUI RPC interface. Also provides alarms for a handful of error conditions.
#### Media Streaming Servers
-- **[IceCast](../collectors/python.d.plugin/icecast/)** - collects the number of listeners for active sources.
+
+- **[IceCast](../collectors/python.d.plugin/icecast/)** - collects the number of listeners for active sources.
### Monitoring Systems
-- **[Monit](../collectors/python.d.plugin/monit/)** - collects metrics about monit targets (filesystems, applications, networks).
+
+- **[Monit](../collectors/python.d.plugin/monit/)** - collects metrics about monit targets (filesystems, applications, networks).
#### Provisioning Systems
-- **[Puppet](../collectors/python.d.plugin/puppet/)** - connects to multiple Puppet Server and Puppet DB instances (local or remote) to collect real-time status metrics.
+
+- **[Puppet](../collectors/python.d.plugin/puppet/)** - connects to multiple Puppet Server and Puppet DB instances (local or remote) to collect real-time status metrics.
You can easily extend Netdata, by writing plugins that collect data from any source, using any computer language.
@@ -372,14 +405,14 @@ To report bugs, or get help, use [GitHub Issues](https://github.com/netdata/netd
You can also find Netdata on:
-- [Facebook](https://www.facebook.com/linuxnetdata/)
-- [Twitter](https://twitter.com/linuxnetdata)
-- [OpenHub](https://www.openhub.net/p/netdata)
-- [Repology](https://repology.org/metapackage/netdata/versions)
-- [StackShare](https://stackshare.io/netdata)
+- [Facebook](https://www.facebook.com/linuxnetdata/)
+- [Twitter](https://twitter.com/linuxnetdata)
+- [OpenHub](https://www.openhub.net/p/netdata)
+- [Repology](https://repology.org/metapackage/netdata/versions)
+- [StackShare](https://stackshare.io/netdata)
## License
Netdata is [GPLv3+](../LICENSE).
-Netdata re-distributes other open-source tools and libraries. Please check the [third party licenses](../REDISTRIBUTED.md). \ No newline at end of file
+Netdata re-distributes other open-source tools and libraries. Please check the [third party licenses](../REDISTRIBUTED.md).
diff --git a/docs/why-netdata/1s-granularity.md b/docs/why-netdata/1s-granularity.md
index 0d12a2d4..195a0d8f 100644
--- a/docs/why-netdata/1s-granularity.md
+++ b/docs/why-netdata/1s-granularity.md
@@ -4,9 +4,9 @@ High resolution metrics are required to effectively monitor and troubleshoot sys
## Why?
-- The world is going real-time. Today, customer experience is significantly affected by response time, so SLAs are tighter than ever before. It is just not practical to monitor a 2-second SLA with 10-second metrics.
+- The world is going real-time. Today, customer experience is significantly affected by response time, so SLAs are tighter than ever before. It is just not practical to monitor a 2-second SLA with 10-second metrics.
-- IT goes virtual. Unlike real hardware, virtual environments are not linear, nor predictable. You cannot expect resources to be available when your applications need them. They will eventually be, but not exactly at the time they are needed. The latency of virtual environments is affected by many factors, most of which are outside our control, like: the maintenance policy of the hosting provider, the work load of third party virtual machines running on the same physical servers combined with the resource allocation and throttling policy among virtual machines, the provisioning system of the hosting provider, etc.
+- IT goes virtual. Unlike real hardware, virtual environments are not linear, nor predictable. You cannot expect resources to be available when your applications need them. They will eventually be, but not exactly at the time they are needed. The latency of virtual environments is affected by many factors, most of which are outside our control, like: the maintenance policy of the hosting provider, the work load of third party virtual machines running on the same physical servers combined with the resource allocation and throttling policy among virtual machines, the provisioning system of the hosting provider, etc.
## What do others do?
@@ -16,9 +16,9 @@ They want to, but they can't, at least not massively.
The reasons lie in their design decisions:
-1. Time-series databases (prometheus, graphite, opentsdb, influxdb, etc) centralize all the metrics. At scale, these databases can easily become the bottleneck of the whole infrastructure.
+1. Time-series databases (prometheus, graphite, opentsdb, influxdb, etc) centralize all the metrics. At scale, these databases can easily become the bottleneck of the whole infrastructure.
-2. SaaS providers base their business models on centralizing all the metrics. On top of the time-series database bottleneck they also have increased bandwidth costs. So, massively supporting high resolution metrics, destroys their business model.
+2. SaaS providers base their business models on centralizing all the metrics. On top of the time-series database bottleneck they also have increased bandwidth costs. So, massively supporting high resolution metrics, destroys their business model.
Of course, since a couple of decades the world has fixed this kind of scaling problems: instead of scaling up, scale out, horizontally. That is, instead of investing on bigger and bigger central components, decentralize the application so that it can scale by adding more smaller nodes to it.
@@ -30,9 +30,9 @@ Finally, per second data collection is a lot harder. Busy virtual environments h
So, the monitoring industry fails to massively provide high resolution metrics, mainly for 3 reasons:
-1. Centralization of metrics makes monitoring cost inefficient at that rate.
-2. Data collection needs optimization, otherwise it will significantly affect the monitored systems.
-3. Data collection is a lot harder, especially on busy virtual environments.
+1. Centralization of metrics makes monitoring cost inefficient at that rate.
+2. Data collection needs optimization, otherwise it will significantly affect the monitored systems.
+3. Data collection is a lot harder, especially on busy virtual environments.
## What does Netdata do differently?
@@ -45,9 +45,10 @@ To eliminate the error introduced by data collection latencies on busy virtual e
Finally, Netdata is really fast. Optimization is a core product feature. On modern hardware, Netdata can collect metrics with a rate of above 1M metrics per second per core (this includes everything, parsing data sources, interpolating data, storing data in the time series database, etc). So, for a few thousands metrics per second per node, Netdata needs negligible CPU resources (just 1-2% of a single core).
Netdata has been designed to:
-- Solve the centralization problem of monitoring
-- Replace the console for performance troubleshooting.
+
+- Solve the centralization problem of monitoring
+- Replace the console for performance troubleshooting.
So, for Netdata 1s granularity is easy, the natural outcome...
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2F1s-granularity&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2F1s-granularity&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/why-netdata/README.md b/docs/why-netdata/README.md
index df8c0d02..1003b07a 100644
--- a/docs/why-netdata/README.md
+++ b/docs/why-netdata/README.md
@@ -6,25 +6,25 @@
Netdata is built around 4 principles:
-1. **[Per second data collection for all metrics.](1s-granularity.md)**
+1. **[Per second data collection for all metrics.](1s-granularity.md)**
- *It is impossible to monitor a 2 second SLA, with 10 second metrics.*
+ _It is impossible to monitor a 2 second SLA, with 10 second metrics._
-2. **[Collect and visualize all the metrics from all possible sources.](unlimited-metrics.md)**
+2. **[Collect and visualize all the metrics from all possible sources.](unlimited-metrics.md)**
- *To troubleshoot slowdowns, we need all the available metrics. The console should not provide more metrics.*
+ _To troubleshoot slowdowns, we need all the available metrics. The console should not provide more metrics._
-3. **[Meaningful presentation, optimized for visual anomaly detection.](meaningful-presentation.md)**
+3. **[Meaningful presentation, optimized for visual anomaly detection.](meaningful-presentation.md)**
- *Metrics are a lot more than name-value pairs over time. The monitoring tool should know all the metrics. Users should not!*
+ _Metrics are a lot more than name-value pairs over time. The monitoring tool should know all the metrics. Users should not!_
-4. **[Immediate results, just install and use.](immediate-results.md)**
+4. **[Immediate results, just install and use.](immediate-results.md)**
- *Most of our infrastructure is standardized. There is no point to configure everything metric by metric.*
+ _Most of our infrastructure is standardized. There is no point to configure everything metric by metric._
Unlike other monitoring solutions that focus on metrics visualization,
Netdata's helps us troubleshoot slowdowns without touching the console.
So, everything is a bit different.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FWhy-Netdata&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FWhy-Netdata&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/why-netdata/immediate-results.md b/docs/why-netdata/immediate-results.md
index 12333671..f1f452ca 100644
--- a/docs/why-netdata/immediate-results.md
+++ b/docs/why-netdata/immediate-results.md
@@ -1,7 +1,7 @@
# Immediate results
Most of our infrastructure is based on standardized systems and applications.
-
+
It is a tremendous waste of time and effort, in a global scale, to require from all users to configure their infrastructure dashboards and alarms metric by metric.
## Why?
@@ -22,20 +22,20 @@ Monitoring SaaS providers offer a very basic set of pre-configured metrics, dash
## What does Netdata do?
-1. Metrics are auto-detected, so for 99% of the cases data collection works out of the box.
-2. Metrics are converted to human readable units, right after data collection, before storing them into the database.
-3. Metrics are structured, organized in charts, families and applications, so that they can be browsed.
-4. Dashboards are automatically generated, so all metrics are available for exploration immediately after installation.
-5. Dashboards are not just visualizing metrics; they are a tool, optimized for visual anomaly detection.
-6. Hundreds of pre-configured alarm templates are automatically attached to collected metrics.
+1. Metrics are auto-detected, so for 99% of the cases data collection works out of the box.
+2. Metrics are converted to human readable units, right after data collection, before storing them into the database.
+3. Metrics are structured, organized in charts, families and applications, so that they can be browsed.
+4. Dashboards are automatically generated, so all metrics are available for exploration immediately after installation.
+5. Dashboards are not just visualizing metrics; they are a tool, optimized for visual anomaly detection.
+6. Hundreds of pre-configured alarm templates are automatically attached to collected metrics.
The result is that Netdata can be used immediately after installation!
Netdata:
-- Helps engineers understand and learn what the metrics are.
-- Does not require any configuration. Of course there are thousands of options to tweak, but the defaults are pretty good for most systems.
-- Does not introduce any query languages or any other technology to be learned. Of course some familiarity with the tool is required, but nothing too complicated.
-- Includes all the community expertise and experience for monitoring systems and applications.
+- Helps engineers understand and learn what the metrics are.
+- Does not require any configuration. Of course there are thousands of options to tweak, but the defaults are pretty good for most systems.
+- Does not introduce any query languages or any other technology to be learned. Of course some familiarity with the tool is required, but nothing too complicated.
+- Includes all the community expertise and experience for monitoring systems and applications.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2Fimmediate-results&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2Fimmediate-results&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/why-netdata/meaningful-presentation.md b/docs/why-netdata/meaningful-presentation.md
index f6fd0756..2623d152 100644
--- a/docs/why-netdata/meaningful-presentation.md
+++ b/docs/why-netdata/meaningful-presentation.md
@@ -15,20 +15,20 @@ The result is that for most of the world, monitoring sucks. It is incomplete, in
But even if all the metrics are collected, an even bigger challenge is revealed: What to do with them? How to use them?
The existing monitoring solutions, assume the engineers will:
-
-- Design dashboards
-- Configure alarms
-- Use a query language to investigate issues
+
+- Design dashboards
+- Configure alarms
+- Use a query language to investigate issues
However, all these have to be configured metric by metric.
The monitoring industry believes there is this "IT Operations Hero", a person combining these abilities:
-1. Has a deep understanding of IT architectures and is a skillful SysAdmin.
-2. Is a superb Network Administrator (can even read and understand the Linux kernel networking stack).
-3. Is a exceptional database administrator.
-4. Is fluent in software engineering, capable of understanding the internal workings of applications.
-5. Masters Data Science, statistical algorithms and is fluent in writing advanced mathematical queries to reveal the meaning of metrics.
+1. Has a deep understanding of IT architectures and is a skillful SysAdmin.
+2. Is a superb Network Administrator (can even read and understand the Linux kernel networking stack).
+3. Is a exceptional database administrator.
+4. Is fluent in software engineering, capable of understanding the internal workings of applications.
+5. Masters Data Science, statistical algorithms and is fluent in writing advanced mathematical queries to reveal the meaning of metrics.
Of course this person does not exist!
@@ -46,11 +46,11 @@ So, they collect very limited metrics. Basic dashboards can be created with thes
In Netdata, the meaning of metrics is incorporated into the database:
-1. all metrics are converted and stored to human-friendly units. This is a data-collection process, not a visualization process. For example, cpu utilization in Netdata is stored as percentage, not as kernel ticks.
+1. all metrics are converted and stored to human-friendly units. This is a data-collection process, not a visualization process. For example, cpu utilization in Netdata is stored as percentage, not as kernel ticks.
-2. all metrics are organized into human-friendly charts, sharing the same context and units (similar to what other monitoring solutions call `cardinality`). So, when Netdata developer collect metrics, they configure the correlation of the metrics right in data collection, which is stored in the database too.
+2. all metrics are organized into human-friendly charts, sharing the same context and units (similar to what other monitoring solutions call `cardinality`). So, when Netdata developer collect metrics, they configure the correlation of the metrics right in data collection, which is stored in the database too.
-3. all charts are then organized in families, and chart families are organized in applications. These structures are responsible for providing the menu at the right side of Netdata dashboards for exploring the whole database.
+3. all charts are then organized in families, and chart families are organized in applications. These structures are responsible for providing the menu at the right side of Netdata dashboards for exploring the whole database.
The result is a system that can be browsed by humans, even if the database has 100,000 unique metrics. It is pretty natural for everyone to browse them, understand their meaning and their scope.
@@ -60,4 +60,4 @@ But it simplifies everything else. Data collection, metrics database and visuali
Netdata goes a step further, by enriching the dashboard with information that is useful for most people. So, to improve clarity and help users be more effective, Netdata includes right in the dashboard the community knowledge and expertise about the metrics. So, that Netdata users can focus on solving their infrastructure problem, not on the technicalities of data collection and visualization.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2Fmeaningful-presentation&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2Fmeaningful-presentation&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/why-netdata/unlimited-metrics.md b/docs/why-netdata/unlimited-metrics.md
index a4ecaf3f..827138ff 100644
--- a/docs/why-netdata/unlimited-metrics.md
+++ b/docs/why-netdata/unlimited-metrics.md
@@ -10,8 +10,8 @@ Unfortunately, this does not work! Filtering out most metrics is like reading a
For many people, monitoring is about:
-- Detecting outages
-- Capacity planning
+- Detecting outages
+- Capacity planning
However, **slowdowns are 10 times more common** compared to outages (check slide 14 of [Online Performance is Business Performance ](https://www.slideshare.net/KenGodskind/alertsitetrac) reported by Trac Research/AlertSite). Designing a monitoring system targeting only outages and capacity planning solves just a tiny part of the operational problems we face. Check also [Downtime vs. Slowtime: Which Hurts More?](https://dzone.com/articles/downtime-vs-slowtime-which-hurts-more).
@@ -29,9 +29,9 @@ So, why do monitoring solutions and SaaS providers filter out metrics?
They can't do otherwise!
-1. Centralization of metrics depends on metrics filtering, to control monitoring costs. Time-series databases limit the number of metrics collected, because the number of metrics influences their performance significantly. They get congested at scale.
-2. It is a lot easier to provide an illusion of monitoring by using a few basic metrics.
-3. Troubleshooting slowdowns is the hardest IT problem to solve, so most solutions just avoid it.
+1. Centralization of metrics depends on metrics filtering, to control monitoring costs. Time-series databases limit the number of metrics collected, because the number of metrics influences their performance significantly. They get congested at scale.
+2. It is a lot easier to provide an illusion of monitoring by using a few basic metrics.
+3. Troubleshooting slowdowns is the hardest IT problem to solve, so most solutions just avoid it.
## What does Netdata do?
@@ -41,4 +41,4 @@ Due to Netdata's distributed nature, the number of metrics collected does not ha
Of course, since Netdata is also about [meaningful presentation](meaningful-presentation.md), the number of metrics makes Netdata development slower. We, the Netdata developers, need to have a good understanding of the metrics before adding them into Netdata. We need to organize the metrics, add information related to them, configure alarms for them, so that you, the Netdata users, will have the best out-of-the-box experience and all the information required to kill the console for troubleshooting slowdowns.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2Funlimited-metrics&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2Funlimited-metrics&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/Makefile.am b/health/Makefile.am
index 5310bd8a..e9fceddb 100644
--- a/health/Makefile.am
+++ b/health/Makefile.am
@@ -84,6 +84,7 @@ dist_healthconfig_DATA = \
health.d/tcp_resets.conf \
health.d/udp_errors.conf \
health.d/varnish.conf \
+ health.d/vsphere.conf \
health.d/web_log.conf \
health.d/wmi.conf \
health.d/x509check.conf \
diff --git a/health/Makefile.in b/health/Makefile.in
new file mode 100644
index 00000000..dca19e88
--- /dev/null
+++ b/health/Makefile.in
@@ -0,0 +1,861 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = health
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_healthconfig_DATA) \
+ $(dist_noinst_DATA) $(dist_userhealthconfig_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(healthconfigdir)" \
+ "$(DESTDIR)$(userhealthconfigdir)"
+DATA = $(dist_healthconfig_DATA) $(dist_noinst_DATA) \
+ $(dist_userhealthconfig_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ notifications \
+ $(NULL)
+
+CLEANFILES = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+userhealthconfigdir = $(configdir)/health.d
+dist_userhealthconfig_DATA = \
+ .keep \
+ $(NULL)
+
+healthconfigdir = $(libconfigdir)/health.d
+dist_healthconfig_DATA = \
+ health.d/adaptec_raid.conf \
+ health.d/apache.conf \
+ health.d/apcupsd.conf \
+ health.d/backend.conf \
+ health.d/bcache.conf \
+ health.d/beanstalkd.conf \
+ health.d/bind_rndc.conf \
+ health.d/boinc.conf \
+ health.d/btrfs.conf \
+ health.d/ceph.conf \
+ health.d/cgroups.conf \
+ health.d/cpu.conf \
+ health.d/couchdb.conf \
+ health.d/disks.conf \
+ health.d/dnsmasq_dhcp.conf \
+ health.d/dockerd.conf \
+ health.d/elasticsearch.conf \
+ health.d/entropy.conf \
+ health.d/fping.conf \
+ health.d/ioping.conf \
+ health.d/fronius.conf \
+ health.d/haproxy.conf \
+ health.d/httpcheck.conf \
+ health.d/ipc.conf \
+ health.d/ipfs.conf \
+ health.d/ipmi.conf \
+ health.d/isc_dhcpd.conf \
+ health.d/kubelet.conf \
+ health.d/lighttpd.conf \
+ health.d/linux_power_supply.conf \
+ health.d/load.conf \
+ health.d/mdstat.conf \
+ health.d/megacli.conf \
+ health.d/memcached.conf \
+ health.d/memory.conf \
+ health.d/mongodb.conf \
+ health.d/mysql.conf \
+ health.d/named.conf \
+ health.d/net.conf \
+ health.d/netfilter.conf \
+ health.d/nginx.conf \
+ health.d/nginx_plus.conf \
+ health.d/pihole.conf \
+ health.d/phpfpm.conf \
+ health.d/portcheck.conf \
+ health.d/postgres.conf \
+ health.d/processes.conf \
+ health.d/qos.conf \
+ health.d/ram.conf \
+ health.d/redis.conf \
+ health.d/retroshare.conf \
+ health.d/riakkv.conf \
+ health.d/softnet.conf \
+ health.d/squid.conf \
+ health.d/stiebeleltron.conf \
+ health.d/swap.conf \
+ health.d/tcp_conn.conf \
+ health.d/tcp_listen.conf \
+ health.d/tcp_mem.conf \
+ health.d/tcp_orphans.conf \
+ health.d/tcp_resets.conf \
+ health.d/udp_errors.conf \
+ health.d/varnish.conf \
+ health.d/vsphere.conf \
+ health.d/web_log.conf \
+ health.d/wmi.conf \
+ health.d/x509check.conf \
+ health.d/zfs.conf \
+ health.d/dbengine.conf \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu health/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu health/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_healthconfigDATA: $(dist_healthconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_healthconfig_DATA)'; test -n "$(healthconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(healthconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(healthconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(healthconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(healthconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_healthconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_healthconfig_DATA)'; test -n "$(healthconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(healthconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_userhealthconfigDATA: $(dist_userhealthconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_userhealthconfig_DATA)'; test -n "$(userhealthconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(userhealthconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(userhealthconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userhealthconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(userhealthconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_userhealthconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_userhealthconfig_DATA)'; test -n "$(userhealthconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(userhealthconfigdir)'; $(am__uninstall_files_from_dir)
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+ for dir in "$(DESTDIR)$(healthconfigdir)" "$(DESTDIR)$(userhealthconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am: install-dist_healthconfigDATA \
+ install-dist_userhealthconfigDATA
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am: uninstall-dist_healthconfigDATA \
+ uninstall-dist_userhealthconfigDATA
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dist_healthconfigDATA \
+ install-dist_userhealthconfigDATA install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs installdirs-am \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags tags-am uninstall \
+ uninstall-am uninstall-dist_healthconfigDATA \
+ uninstall-dist_userhealthconfigDATA
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/health/README.md b/health/README.md
index 345f7fc7..848c1bc3 100644
--- a/health/README.md
+++ b/health/README.md
@@ -1,9 +1,9 @@
# Health monitoring
-Each netdata node runs an independent thread evaluating health monitoring checks.
+Each Netdata node runs an independent thread evaluating health monitoring checks.
This thread has lock free access to the database, so that it can operate as a watchdog.
-Health checks (alarms) are attached to netdata charts, allowing netdata to automatically
+Health checks (alarms) are attached to Netdata charts, allowing Netdata to automatically
activate an alarm as soon as a chart is created. This is very important for
netdata, since many charts are dynamically created during runtime (for example, the
chart tracking network interface packet drops, is automatically created on the first
@@ -20,15 +20,15 @@ use expressions combining the latest value of any number of metrics.
## Health configuration reference
-Stock netdata health configuration is in `/usr/lib/netdata/conf.d/health.d`.
+Stock Netdata health configuration is in `/usr/lib/netdata/conf.d/health.d`.
These files can be overwritten by copying them and editing them in `/etc/netdata/health.d`
(run `/etc/netdata/edit-config` to edit them).
In `/etc/netdata/health.d` you can also put any number of files (in any number of sub-directories)
-with a suffix `.conf` to have them processed by netdata.
+with a suffix `.conf` to have them processed by Netdata.
-Health configuration can be reloaded at any time, without restarting netdata.
-Just send netdata the SIGUSR2 signal, like this:
+Health configuration can be reloaded at any time, without restarting Netdata.
+Just send Netdata the SIGUSR2 signal, like this:
```sh
killall -USR2 netdata
@@ -38,11 +38,11 @@ killall -USR2 netdata
There are 2 entities:
-1. **alarms**, which are attached to specific charts, and
+1. **alarms**, which are attached to specific charts, and
-1. **templates**, which define rules that should be applied to all charts having a
- specific `context`. You can use this feature to apply **alarms** to all disks,
- all network interfaces, all mysql databases, all nginx web servers, etc.
+2. **templates**, which define rules that should be applied to all charts having a
+ specific `context`. You can use this feature to apply **alarms** to all disks,
+ all network interfaces, all mysql databases, all nginx web servers, etc.
Both of these entities have exactly the same format and feature set.
The only difference is the label `alarm` or `template`.
@@ -50,7 +50,7 @@ The only difference is the label `alarm` or `template`.
Netdata supports overriding **templates** with **alarms**.
For example, when a template is defined for a set of charts, an alarm with exactly the
same name attached to the same chart the template matches, will have higher precedence
-(i.e. netdata will use the alarm on this chart and prevent the template from being applied
+(i.e. Netdata will use the alarm on this chart and prevent the template from being applied
to it).
### The format
@@ -135,7 +135,7 @@ hosts: server1 server2 database* !redis3 redis*
The above says: use this alarm on all hosts named `server1`, `server2`, `database*`, and
all `redis*` except `redis3`.
-This is useful when you centralize metrics from multiple hosts, to one netdata.
+This is useful when you centralize metrics from multiple hosts, to one Netdata.
---
@@ -168,27 +168,27 @@ lookup: METHOD AFTER [at BEFORE] [every DURATION] [OPTIONS] [of DIMENSIONS]
Everything is the same with [badges](../web/api/badges/). In short:
-- `METHOD` is one of `average`, `min`, `max`, `sum`, `incremental-sum`.
- This is required.
+- `METHOD` is one of `average`, `min`, `max`, `sum`, `incremental-sum`.
+ This is required.
-- `AFTER` is a relative number of seconds, but it also accepts a single letter for changing
- the units, like `-1s` = 1 second in the past, `-1m` = 1 minute in the past, `-1h` = 1 hour
- in the past, `-1d` = 1 day in the past. You need a negative number (i.e. how far in the past
- to look for the value). **This is required**.
+- `AFTER` is a relative number of seconds, but it also accepts a single letter for changing
+ the units, like `-1s` = 1 second in the past, `-1m` = 1 minute in the past, `-1h` = 1 hour
+ in the past, `-1d` = 1 day in the past. You need a negative number (i.e. how far in the past
+ to look for the value). **This is required**.
-- `at BEFORE` is by default 0 and is not required. Using this you can define the end of the
- lookup. So data will be evaluated between `AFTER` and `BEFORE`.
+- `at BEFORE` is by default 0 and is not required. Using this you can define the end of the
+ lookup. So data will be evaluated between `AFTER` and `BEFORE`.
-- `every DURATION` sets the updated frequency of the lookup (supports single letter units as
- above too).
+- `every DURATION` sets the updated frequency of the lookup (supports single letter units as
+ above too).
-- `OPTIONS` is a space separated list of `percentage`, `absolute`, `min2max`, `unaligned`,
- `match-ids`, `match-names`. Check the badges documentation for more info.
+- `OPTIONS` is a space separated list of `percentage`, `absolute`, `min2max`, `unaligned`,
+ `match-ids`, `match-names`. Check the badges documentation for more info.
-- `of DIMENSIONS` is optional and has to be the last parameter. Dimensions have to be separated
- by `,` or `|`. The space characters found in dimensions will be kept as-is (a few dimensions
- have spaces in their names). This accepts netdata simple patterns and the `match-ids` and
- `match-names` options affect the searches for dimensions.
+- `of DIMENSIONS` is optional and has to be the last parameter. Dimensions have to be separated
+ by `,` or `|`. The space characters found in dimensions will be kept as-is (a few dimensions
+ have spaces in their names). This accepts Netdata simple patterns and the `match-ids` and
+ `match-names` options affect the searches for dimensions.
The result of the lookup will be available as `$this` and `$NAME` in expressions.
The timestamps of the timeframe evaluated by the database lookup is available as variables
@@ -259,6 +259,7 @@ Format:
warn: EXPRESSION
crit: EXPRESSION
```
+
Check [Expressions](#expressions) for more information.
---
@@ -289,8 +290,8 @@ Format:
exec: SCRIPT
```
-The default `SCRIPT` is netdata's `alarm-notify.sh`, which supports all the notifications
-methods netdata supports, including custom hooks.
+The default `SCRIPT` is Netdata's `alarm-notify.sh`, which supports all the notifications
+methods Netdata supports, including custom hooks.
---
@@ -306,40 +307,41 @@ Format:
delay: [[[up U] [down D] multiplier M] max X]
```
-- `up U` defines the delay to be applied to a notification for an alarm that raised its status
- (i.e. CLEAR to WARNING, CLEAR to CRITICAL, WARNING to CRITICAL). For example, `up 10s`, the
- notification for this event will be sent 10 seconds after the actual event. This is used in
- hope the alarm will get back to its previous state within the duration given. The default `U`
- is zero.
+- `up U` defines the delay to be applied to a notification for an alarm that raised its status
+ (i.e. CLEAR to WARNING, CLEAR to CRITICAL, WARNING to CRITICAL). For example, `up 10s`, the
+ notification for this event will be sent 10 seconds after the actual event. This is used in
+ hope the alarm will get back to its previous state within the duration given. The default `U`
+ is zero.
+
+- `down D` defines the delay to be applied to a notification for an alarm that moves to lower
+ state (i.e. CRITICAL to WARNING, CRITICAL to CLEAR, WARNING to CLEAR). For example, `down 1m`
+ will delay the notification by 1 minute. This is used to prevent notifications for flapping
+ alarms. The default `D` is zero.
-- `down D` defines the delay to be applied to a notification for an alarm that moves to lower
- state (i.e. CRITICAL to WARNING, CRITICAL to CLEAR, WARNING to CLEAR). For example, `down 1m`
- will delay the notification by 1 minute. This is used to prevent notifications for flapping
- alarms. The default `D` is zero.
+- `mutliplier M` multiplies `U` and `D` when an alarm changes state, while a notification is
+ delayed. The default multiplier is `1.0`.
-- `mutliplier M` multiplies `U` and `D` when an alarm changes state, while a notification is
- delayed. The default multiplier is `1.0`.
+- `max X` defines the maximum absolute notification delay an alarm may get. The default `X`
+ is `max(U * M, D * M)` (i.e. the max duration of `U` or `D` multiplied once with `M`).
-- `max X` defines the maximum absolute notification delay an alarm may get. The default `X`
- is `max(U * M, D * M)` (i.e. the max duration of `U` or `D` multiplied once with `M`).
+ Example:
- Example:
+ `delay: up 10s down 15m multiplier 2 max 1h`
- `delay: up 10s down 15m multiplier 2 max 1h`
+ The time is `00:00:00` and the status of the alarm is CLEAR.
- The time is `00:00:00` and the status of the alarm is CLEAR.
+ | time of event | new status | delay | notification will be sent|why|
+ |-------------|----------|:---:|-------------------------|---|
+ | 00:00:01 | WARNING | `up 10s` | 00:00:11|first state switch|
+ | 00:00:05 | CLEAR | `down 15m x2` | 00:30:05|the alarm changes state while a notification is delayed, so it was multiplied|
+ | 00:00:06 | WARNING | `up 10s x2 x2` | 00:00:26|multiplied twice|
+ | 00:00:07 | CLEAR | `down 15m x2 x2 x2` | 00:45:07|multiplied 3 times.|
- time of event|new status|delay|notification will be sent|why
- -------------|----------|:---:|-------------------------|---
- 00:00:01 | WARNING | `up 10s` | 00:00:11 |first state switch
- 00:00:05 | CLEAR | `down 15m x2`| 00:30:05 |the alarm changes state while a notification is delayed, so it was multiplied
- 00:00:06 | WARNING | `up 10s x2 x2` | 00:00:26 |multiplied twice
- 00:00:07|CLEAR|`down 15m x2 x2 x2`|00:45:07|multiplied 3 times.
+ So:
- So:
- - `U` and `D` are multiplied by `M` every time the alarm changes state (any state, not just
- their matching one) and a delay is in place.
- - All are reset to their defaults when the alarm switches state without a delay in place.
+ - `U` and `D` are multiplied by `M` every time the alarm changes state (any state, not just
+ their matching one) and a delay is in place.
+ - All are reset to their defaults when the alarm switches state without a delay in place.
---
@@ -353,9 +355,9 @@ Format:
repeat: [off] [warning DURATION] [critical DURATION]
```
-* `off`: Turns off the repeating feature for the current alarm. This is effective when the default repeat settings has been enabled in health configuration.
-* `warning DURATION`: Defines the interval when the alarm is in WARNING state. Use `0s` to turn off the repeating notification for WARNING mode.
-* `critical DURATION`: Defines the interval when the alarm is in CRITICAL state. Use `0s` to turn off the repeating notification for CRITICAL mode.
+- `off`: Turns off the repeating feature for the current alarm. This is effective when the default repeat settings has been enabled in health configuration.
+- `warning DURATION`: Defines the interval when the alarm is in WARNING state. Use `0s` to turn off the repeating notification for WARNING mode.
+- `critical DURATION`: Defines the interval when the alarm is in CRITICAL state. Use `0s` to turn off the repeating notification for CRITICAL mode.
---
@@ -373,19 +375,17 @@ For some alarms we need compare two time-frames, to detect anomalies. For exampl
### Expressions
-netdata has an internal [infix expression parser](../libnetdata/eval).
+Netdata has an internal [infix expression parser](../libnetdata/eval).
This parses expressions and creates an internal structure that allows fast execution of them.
These operators are supported `+`, `-`, `*`, `/`, `<`, `<=`, `<>`, `!=`, `>`, `>=`, `&&`, `||`,
`!`, `AND`, `OR`, `NOT`. Boolean operators result in either `1` (true) or `0` (false).
-The conditional evaluation operator `?` is supported too. Using this operator IF-THEN-ELSE
-conditional statements can be specified. The format is: `(condition) ? (true expression) :
-(false expression)`. So, netdata will first evaluate the `condition` and based on the result
-will either evaluate `true expression` or `false expression`.
+The conditional evaluation operator `?` is supported too. Using this operator IF-THEN-ELSE conditional statements can be specified. The format is: `(condition) ? (true expression) :(false expression)`. So, Netdata will first evaluate the `condition` and based on the result will either evaluate `true expression` or `false expression`.
+
Example: `($this > 0) ? ($avail * 2) : ($used / 2)`.
-Nested such expressions are also supported (i.e. `true expression` and `false expression` can
-contain conditional evaluations).
+
+Nested such expressions are also supported (i.e. `true expression` and `false expression` can contain conditional evaluations).
Expressions also support the `abs()` function.
@@ -393,9 +393,9 @@ Expressions can have variables. Variables start with `$`. Check below for more i
There are two special values you can use:
-- `nan`, for example `$this != nan` will check if the variable `this` is available. A variable can be `nan` if the database lookup failed. All calculations (i.e. addition, multiplication, etc) with a `nan` result in a `nan`.
+- `nan`, for example `$this != nan` will check if the variable `this` is available. A variable can be `nan` if the database lookup failed. All calculations (i.e. addition, multiplication, etc) with a `nan` result in a `nan`.
-- `inf`, for example `$this != inf` will check if `this` is not infinite. A value or variable can be infinite if divided by zero. All calculations (i.e. addition, multiplication, etc) with a `inf` result in a `inf`.
+- `inf`, for example `$this != inf` will check if `this` is not infinite. A value or variable can be infinite if divided by zero. All calculations (i.e. addition, multiplication, etc) with a `inf` result in a `inf`.
---
@@ -407,7 +407,7 @@ or warning thresholds. This usage helps to avoid bogus messages resulting from
variations in the value when it is varying regularly but staying close to the threshold
value, without needing to delay sending messages at all.
-An example of such usage from the default CPU usage alarms bundled with netdata is:
+An example of such usage from the default CPU usage alarms bundled with Netdata is:
```
warn: $this > (($status >= $WARNING) ? (75) : (85))
@@ -415,25 +415,27 @@ crit: $this > (($status == $CRITICAL) ? (85) : (95))
```
The above say:
-* If the alarm is currently a warning, then the threshold for being considered a warning
- is 75, otherwise it's 85.
-* If the alarm is currently critical, then the threshold for being considered critical
- is 85, otherwise it's 95.
+- If the alarm is currently a warning, then the threshold for being considered a warning
+ is 75, otherwise it's 85.
+
+- If the alarm is currently critical, then the threshold for being considered critical
+ is 85, otherwise it's 95.
Which in turn, results in the following behavior:
-* While the value is rising, it will trigger a warning when it exceeds 85, and a critical
- alert when it exceeds 95.
-* While the value is falling, it will return to a warning state when it goes below 85,
- and a normal state when it goes below 75.
+- While the value is rising, it will trigger a warning when it exceeds 85, and a critical
+ alert when it exceeds 95.
+
+- While the value is falling, it will return to a warning state when it goes below 85,
+ and a normal state when it goes below 75.
-* If the value is constantly varying between 80 and 90, then it will trigger a warning the
- first time it goes above 85, but will remain a warning until it goes below 75 (or goes above 85).
+- If the value is constantly varying between 80 and 90, then it will trigger a warning the
+ first time it goes above 85, but will remain a warning until it goes below 75 (or goes above 85).
-* If the value is constantly varying between 90 and 100, then it will trigger a critical alert
- the first time it goes above 95, but will remain a critical alert goes below 85 (at which
- point it will return to being a warning).
+- If the value is constantly varying between 90 and 100, then it will trigger a critical alert
+ the first time it goes above 95, but will remain a critical alert goes below 85 (at which
+ point it will return to being a warning).
---
@@ -445,21 +447,21 @@ Example: [variables for the `system.cpu` chart of the registry](https://registry
_Hint: If you don't know how to find the CHART_NAME, you can read about it [here](../docs/Charts.md#charts)._
-
Netdata supports 3 internal indexes for variables that will be used in health monitoring.
+
<details markdown="1"><summary>The variables below can be used in both chart alarms and context templates.</summary>
Although the `alarm_variables` link shows you variables for a particular chart, the same variables can also be used in templates for charts belonging to the same [context](../docs/Charts.md#contexts). The reason is that all charts of a given contexts are essentially identical, with the only difference being the [family](../docs/Charts.md#families) that identifies a particular hardware or software instance. Charts and templates do not apply to specific families anyway, unless if you explicitly limit an alarm with the [alarm line `families`](#alarm-line-families).
</details>
- - **chart local variables**. All the dimensions of the chart are exposed as local variables. The value of $this for the other configured alarms of the chart also appears, under the name of each configured alarm.
+- **chart local variables**. All the dimensions of the chart are exposed as local variables. The value of $this for the other configured alarms of the chart also appears, under the name of each configured alarm.
Charts also define a few special variables:
- - `$last_collected_t` is the unix timestamp of the last data collection
- - `$collected_total_raw` is the sum of all the dimensions (their last collected values)
- - `$update_every` is the update frequency of the chart
- - `$green` and `$red` the threshold defined in alarms (these are per chart - the charts
- inherits them from the the first alarm that defined them)
+ - `$last_collected_t` is the unix timestamp of the last data collection
+ - `$collected_total_raw` is the sum of all the dimensions (their last collected values)
+ - `$update_every` is the update frequency of the chart
+ - `$green` and `$red` the threshold defined in alarms (these are per chart - the charts
+ inherits them from the the first alarm that defined them)
Chart dimensions define their last calculated (i.e. interpolated) value, exactly as
shown on the charts, but also a variable with their name and suffix `_raw` that resolves
@@ -467,49 +469,49 @@ Although the `alarm_variables` link shows you variables for a particular chart,
that resolves to unix timestamp the dimension was last collected (there may be dimensions
that fail to be collected while others continue normally).
- - **family variables**. Families are used to group charts together. For example all `eth0`
+- **family variables**. Families are used to group charts together. For example all `eth0`
charts, have `family = eth0`. This index includes all local variables, but if there are
overlapping variables, only the first are exposed.
- - **host variables**. All the dimensions of all charts, including all alarms, in fullname.
+- **host variables**. All the dimensions of all charts, including all alarms, in fullname.
Fullname is `CHART.VARIABLE`, where `CHART` is either the chart id or the chart name (both
are supported).
- - **special variables*** are:
+- **special variables\*** are:
- - `$this`, which is resolved to the value of the current alarm.
+ - `$this`, which is resolved to the value of the current alarm.
- - `$status`, which is resolved to the current status of the alarm (the current = the last
- status, i.e. before the current database lookup and the evaluation of the `calc` line).
- This values can be compared with `$REMOVED`, `$UNINITIALIZED`, `$UNDEFINED`, `$CLEAR`,
- `$WARNING`, `$CRITICAL`. These values are incremental, ie. `$status > $CLEAR` works as
- expected.
+ - `$status`, which is resolved to the current status of the alarm (the current = the last
+ status, i.e. before the current database lookup and the evaluation of the `calc` line).
+ This values can be compared with `$REMOVED`, `$UNINITIALIZED`, `$UNDEFINED`, `$CLEAR`,
+ `$WARNING`, `$CRITICAL`. These values are incremental, ie. `$status > $CLEAR` works as
+ expected.
- - `$now`, which is resolved to current unix timestamp.
+ - `$now`, which is resolved to current unix timestamp.
## Alarm Statuses
Alarms can have the following statuses:
- - `REMOVED` - the alarm has been deleted (this happens when a SIGUSR2 is sent to netdata
+- `REMOVED` - the alarm has been deleted (this happens when a SIGUSR2 is sent to Netdata
to reload health configuration)
- - `UNINITIALIZED` - the alarm is not initialized yet
+- `UNINITIALIZED` - the alarm is not initialized yet
- - `UNDEFINED` - the alarm failed to be calculated (i.e. the database lookup failed,
+- `UNDEFINED` - the alarm failed to be calculated (i.e. the database lookup failed,
a division by zero occurred, etc)
- - `CLEAR` - the alarm is not armed / raised (i.e. is OK)
+- `CLEAR` - the alarm is not armed / raised (i.e. is OK)
- - `WARNING` - the warning expression resulted in true or non-zero
+- `WARNING` - the warning expression resulted in true or non-zero
- - `CRITICAL` - the critical expression resulted in true or non-zero
+- `CRITICAL` - the critical expression resulted in true or non-zero
The external script will be called for all status changes.
## Examples
-Check the `health/health.d/` directory for all alarms shipped with netdata.
+Check the `health/health.d/` directory for all alarms shipped with Netdata.
Here are a few examples:
@@ -526,7 +528,7 @@ template: apache_last_collected_secs
crit: $this > (10 * $update_every)
```
-The above checks that netdata is able to collect data from apache. In detail:
+The above checks that Netdata is able to collect data from apache. In detail:
```
template: apache_last_collected_secs
@@ -547,10 +549,10 @@ The above applies the **template** to all charts that have `context = apache.req
calc: $now - $last_collected_t
```
-- `$now` is a standard variable that resolves to the current timestamp.
+- `$now` is a standard variable that resolves to the current timestamp.
-- `$last_collected_t` is the last data collection timestamp of the chart.
- So this calculation gives the number of seconds passed since the last data collection.
+- `$last_collected_t` is the last data collection timestamp of the chart.
+ So this calculation gives the number of seconds passed since the last data collection.
```
every: 10s
@@ -565,8 +567,8 @@ The alarm will be evaluated every 10 seconds.
If these result in non-zero or true, they trigger the alarm.
-- `$this` refers to the value of this alarm (i.e. the result of the `calc` line.
- We could also use `$apache_last_collected_secs`.
+- `$this` refers to the value of this alarm (i.e. the result of the `calc` line.
+ We could also use `$apache_last_collected_secs`.
`$update_every` is the update frequency of the chart, in seconds.
@@ -653,12 +655,12 @@ The `lookup` line will calculate the sum of the all dropped packets in the last
The `crit` line will issue a critical alarm if even a single packet has been dropped.
Note that the drops chart does not exist if a network interface has never dropped a single packet.
-When netdata detects a dropped packet, it will add the chart and it will automatically attach this
+When Netdata detects a dropped packet, it will add the chart and it will automatically attach this
alarm to it.
## Troubleshooting
-You can compile netdata with [debugging](../daemon#debugging) and then set in `netdata.conf`:
+You can compile Netdata with [debugging](../daemon#debugging) and then set in `netdata.conf`:
```
[global]
@@ -671,10 +673,10 @@ Important: this will generate a lot of output in debug.log.
You can find the context of charts by looking up the chart in either
`http://your.netdata:19999/netdata.conf` or `http://your.netdata:19999/api/v1/charts`.
-You can find how netdata interpreted the expressions by examining the alarm at `http://your.netdata:19999/api/v1/alarms?all`. For each expression, netdata will return the expression as given in its config file, and the same expression with additional parentheses added to indicate the evaluation flow of the expression.
+You can find how Netdata interpreted the expressions by examining the alarm at `http://your.netdata:19999/api/v1/alarms?all`. For each expression, Netdata will return the expression as given in its config file, and the same expression with additional parentheses added to indicate the evaluation flow of the expression.
## Disabling health checks or silencing notifications at runtime
It's currently not possible to schedule notifications from within the alarm template. For those scenarios where you need to temporary disable notifications (for instance when running backups triggers a disk alert) you can disable or silence notifications are runtime. The health checks can be controlled at runtime via the [health management api](../web/api/health/#health-management-api).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/health.c b/health/health.c
index 55bd7284..1ee1a372 100644
--- a/health/health.c
+++ b/health/health.c
@@ -235,15 +235,15 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) {
const char *exec = (ae->exec) ? ae->exec : host->health_default_exec;
const char *recipient = (ae->recipient) ? ae->recipient : host->health_default_recipient;
- int n_warn=0, n_crit=0;
- RRDCALC *rc;
+ int n_warn=0, n_crit=0;
+ RRDCALC *rc;
EVAL_EXPRESSION *expr=NULL;
- for(rc = host->alarms; rc ; rc = rc->next) {
- if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
- continue;
+ for(rc = host->alarms; rc ; rc = rc->next) {
+ if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
+ continue;
- if(unlikely(rc->status == RRDCALC_STATUS_WARNING)) {
+ if(unlikely(rc->status == RRDCALC_STATUS_WARNING)) {
n_warn++;
if (ae->alarm_id == rc->id)
expr=rc->warning;
@@ -254,8 +254,8 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) {
} else if (unlikely(rc->status == RRDCALC_STATUS_CLEAR)) {
if (ae->alarm_id == rc->id)
expr=rc->warning;
- }
- }
+ }
+ }
snprintfz(command_to_run, ALARM_EXEC_COMMAND_LENGTH, "exec %s '%s' '%s' '%u' '%u' '%u' '%lu' '%s' '%s' '%s' '%s' '%s' '" CALCULATED_NUMBER_FORMAT_ZERO "' '" CALCULATED_NUMBER_FORMAT_ZERO "' '%s' '%u' '%u' '%s' '%s' '%s' '%s' '%s' '%s' '%d' '%d'",
exec,
@@ -467,7 +467,7 @@ static void health_main_cleanup(void *ptr) {
}
SILENCE_TYPE check_silenced(RRDCALC *rc, char* host, SILENCERS *silencers) {
- SILENCER *s;
+ SILENCER *s;
debug(D_HEALTH, "Checking if alarm was silenced via the command API. Alarm info name:%s context:%s chart:%s host:%s family:%s",
rc->name, (rc->rrdset)?rc->rrdset->context:"", rc->chart, host, (rc->rrdset)?rc->rrdset->family:"");
@@ -509,32 +509,32 @@ SILENCE_TYPE check_silenced(RRDCALC *rc, char* host, SILENCERS *silencers) {
* @return It returns 1 case rrdcalc_flags is DISABLED or 0 otherwise
*/
int update_disabled_silenced(RRDHOST *host, RRDCALC *rc) {
- uint32_t rrdcalc_flags_old = rc->rrdcalc_flags;
- // Clear the flags
- rc->rrdcalc_flags &= ~(RRDCALC_FLAG_DISABLED | RRDCALC_FLAG_SILENCED);
- if (unlikely(silencers->all_alarms)) {
- if (silencers->stype == STYPE_DISABLE_ALARMS) rc->rrdcalc_flags |= RRDCALC_FLAG_DISABLED;
- else if (silencers->stype == STYPE_SILENCE_NOTIFICATIONS) rc->rrdcalc_flags |= RRDCALC_FLAG_SILENCED;
- } else {
- SILENCE_TYPE st = check_silenced(rc, host->hostname, silencers);
- if (st == STYPE_DISABLE_ALARMS) rc->rrdcalc_flags |= RRDCALC_FLAG_DISABLED;
- else if (st == STYPE_SILENCE_NOTIFICATIONS) rc->rrdcalc_flags |= RRDCALC_FLAG_SILENCED;
- }
-
- if (rrdcalc_flags_old != rc->rrdcalc_flags) {
- info("Alarm silencing changed for host '%s' alarm '%s': Disabled %s->%s Silenced %s->%s",
- host->hostname,
- rc->name,
- (rrdcalc_flags_old & RRDCALC_FLAG_DISABLED)?"true":"false",
- (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED)?"true":"false",
- (rrdcalc_flags_old & RRDCALC_FLAG_SILENCED)?"true":"false",
- (rc->rrdcalc_flags & RRDCALC_FLAG_SILENCED)?"true":"false"
- );
- }
- if (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED)
- return 1;
- else
- return 0;
+ uint32_t rrdcalc_flags_old = rc->rrdcalc_flags;
+ // Clear the flags
+ rc->rrdcalc_flags &= ~(RRDCALC_FLAG_DISABLED | RRDCALC_FLAG_SILENCED);
+ if (unlikely(silencers->all_alarms)) {
+ if (silencers->stype == STYPE_DISABLE_ALARMS) rc->rrdcalc_flags |= RRDCALC_FLAG_DISABLED;
+ else if (silencers->stype == STYPE_SILENCE_NOTIFICATIONS) rc->rrdcalc_flags |= RRDCALC_FLAG_SILENCED;
+ } else {
+ SILENCE_TYPE st = check_silenced(rc, host->hostname, silencers);
+ if (st == STYPE_DISABLE_ALARMS) rc->rrdcalc_flags |= RRDCALC_FLAG_DISABLED;
+ else if (st == STYPE_SILENCE_NOTIFICATIONS) rc->rrdcalc_flags |= RRDCALC_FLAG_SILENCED;
+ }
+
+ if (rrdcalc_flags_old != rc->rrdcalc_flags) {
+ info("Alarm silencing changed for host '%s' alarm '%s': Disabled %s->%s Silenced %s->%s",
+ host->hostname,
+ rc->name,
+ (rrdcalc_flags_old & RRDCALC_FLAG_DISABLED)?"true":"false",
+ (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED)?"true":"false",
+ (rrdcalc_flags_old & RRDCALC_FLAG_SILENCED)?"true":"false",
+ (rc->rrdcalc_flags & RRDCALC_FLAG_SILENCED)?"true":"false"
+ );
+ }
+ if (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED)
+ return 1;
+ else
+ return 0;
}
/**
@@ -557,290 +557,290 @@ void *health_main(void *ptr) {
unsigned int loop = 0;
while(!netdata_exit) {
- loop++;
- debug(D_HEALTH, "Health monitoring iteration no %u started", loop);
+ loop++;
+ debug(D_HEALTH, "Health monitoring iteration no %u started", loop);
+
+ int runnable = 0, apply_hibernation_delay = 0;
+ time_t next_run = now + min_run_every;
+ RRDCALC *rc;
+
+ if (unlikely(check_if_resumed_from_suspention())) {
+ apply_hibernation_delay = 1;
+
+ info("Postponing alarm checks for %ld seconds, because it seems that the system was just resumed from suspension.",
+ hibernation_delay
+ );
+ }
+
+ if (unlikely(silencers->all_alarms && silencers->stype == STYPE_DISABLE_ALARMS)) {
+ static int logged=0;
+ if (!logged) {
+ info("Skipping health checks, because all alarms are disabled via a %s command.",
+ HEALTH_CMDAPI_CMD_DISABLEALL);
+ logged = 1;
+ }
+ }
+
+ rrd_rdlock();
+
+ RRDHOST *host;
+ rrdhost_foreach_read(host) {
+ if (unlikely(!host->health_enabled))
+ continue;
+
+ if (unlikely(apply_hibernation_delay)) {
+
+ info("Postponing health checks for %ld seconds, on host '%s'.", hibernation_delay, host->hostname
+ );
+
+ host->health_delay_up_to = now + hibernation_delay;
+ }
+
+ if (unlikely(host->health_delay_up_to)) {
+ if (unlikely(now < host->health_delay_up_to))
+ continue;
+
+ info("Resuming health checks on host '%s'.", host->hostname);
+ host->health_delay_up_to = 0;
+ }
+
+ rrdhost_rdlock(host);
+
+ // the first loop is to lookup values from the db
+ for (rc = host->alarms; rc; rc = rc->next) {
+
+ if (update_disabled_silenced(host, rc))
+ continue;
+
+ if (unlikely(!rrdcalc_isrunnable(rc, now, &next_run))) {
+ if (unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE))
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_RUNNABLE;
+ continue;
+ }
+
+ runnable++;
+ rc->old_value = rc->value;
+ rc->rrdcalc_flags |= RRDCALC_FLAG_RUNNABLE;
+
+ // ------------------------------------------------------------
+ // if there is database lookup, do it
+
+ if (unlikely(RRDCALC_HAS_DB_LOOKUP(rc))) {
+ /* time_t old_db_timestamp = rc->db_before; */
+ int value_is_null = 0;
+
+ int ret = rrdset2value_api_v1(rc->rrdset, NULL, &rc->value, rc->dimensions, 1, rc->after,
+ rc->before, rc->group, 0, rc->options, &rc->db_after,
+ &rc->db_before, &value_is_null
+ );
+
+ if (unlikely(ret != 200)) {
+ // database lookup failed
+ rc->value = NAN;
+ rc->rrdcalc_flags |= RRDCALC_FLAG_DB_ERROR;
+
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database lookup returned error %d",
+ host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name, ret
+ );
+ } else
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_ERROR;
+
+ /* - RRDCALC_FLAG_DB_STALE not currently used
+ if (unlikely(old_db_timestamp == rc->db_before)) {
+ // database is stale
+
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name);
+
+ if (unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE))) {
+ rc->rrdcalc_flags |= RRDCALC_FLAG_DB_STALE;
+ error("Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name);
+ }
+ }
+ else if (unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE))
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_STALE;
+ */
+
+ if (unlikely(value_is_null)) {
+ // collected value is null
+ rc->value = NAN;
+ rc->rrdcalc_flags |= RRDCALC_FLAG_DB_NAN;
+
+ debug(D_HEALTH,
+ "Health on host '%s', alarm '%s.%s': database lookup returned empty value (possibly value is not collected yet)",
+ host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name
+ );
+ } else
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_NAN;
+
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database lookup gave value "
+ CALCULATED_NUMBER_FORMAT, host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
+ rc->value
+ );
+ }
+
+ // ------------------------------------------------------------
+ // if there is calculation expression, run it
+
+ if (unlikely(rc->calculation)) {
+ if (unlikely(!expression_evaluate(rc->calculation))) {
+ // calculation failed
+ rc->value = NAN;
+ rc->rrdcalc_flags |= RRDCALC_FLAG_CALC_ERROR;
+
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' failed: %s",
+ host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
+ rc->calculation->parsed_as, buffer_tostring(rc->calculation->error_msg)
+ );
+ } else {
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_CALC_ERROR;
- int runnable = 0, apply_hibernation_delay = 0;
- time_t next_run = now + min_run_every;
- RRDCALC *rc;
-
- if (unlikely(check_if_resumed_from_suspention())) {
- apply_hibernation_delay = 1;
-
- info("Postponing alarm checks for %ld seconds, because it seems that the system was just resumed from suspension.",
- hibernation_delay
- );
- }
-
- if (unlikely(silencers->all_alarms && silencers->stype == STYPE_DISABLE_ALARMS)) {
- static int logged=0;
- if (!logged) {
- info("Skipping health checks, because all alarms are disabled via a %s command.",
- HEALTH_CMDAPI_CMD_DISABLEALL);
- logged = 1;
- }
- }
-
- rrd_rdlock();
-
- RRDHOST *host;
- rrdhost_foreach_read(host) {
- if (unlikely(!host->health_enabled))
- continue;
-
- if (unlikely(apply_hibernation_delay)) {
-
- info("Postponing health checks for %ld seconds, on host '%s'.", hibernation_delay, host->hostname
- );
-
- host->health_delay_up_to = now + hibernation_delay;
- }
-
- if (unlikely(host->health_delay_up_to)) {
- if (unlikely(now < host->health_delay_up_to))
- continue;
-
- info("Resuming health checks on host '%s'.", host->hostname);
- host->health_delay_up_to = 0;
- }
-
- rrdhost_rdlock(host);
-
- // the first loop is to lookup values from the db
- for (rc = host->alarms; rc; rc = rc->next) {
-
- if (update_disabled_silenced(host, rc))
- continue;
-
- if (unlikely(!rrdcalc_isrunnable(rc, now, &next_run))) {
- if (unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE))
- rc->rrdcalc_flags &= ~RRDCALC_FLAG_RUNNABLE;
- continue;
- }
-
- runnable++;
- rc->old_value = rc->value;
- rc->rrdcalc_flags |= RRDCALC_FLAG_RUNNABLE;
-
- // ------------------------------------------------------------
- // if there is database lookup, do it
-
- if (unlikely(RRDCALC_HAS_DB_LOOKUP(rc))) {
- /* time_t old_db_timestamp = rc->db_before; */
- int value_is_null = 0;
-
- int ret = rrdset2value_api_v1(rc->rrdset, NULL, &rc->value, rc->dimensions, 1, rc->after,
- rc->before, rc->group, 0, rc->options, &rc->db_after,
- &rc->db_before, &value_is_null
- );
-
- if (unlikely(ret != 200)) {
- // database lookup failed
- rc->value = NAN;
- rc->rrdcalc_flags |= RRDCALC_FLAG_DB_ERROR;
-
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database lookup returned error %d",
- host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name, ret
- );
- } else
- rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_ERROR;
-
- /* - RRDCALC_FLAG_DB_STALE not currently used
- if (unlikely(old_db_timestamp == rc->db_before)) {
- // database is stale
-
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name);
-
- if (unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE))) {
- rc->rrdcalc_flags |= RRDCALC_FLAG_DB_STALE;
- error("Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name);
- }
- }
- else if (unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE))
- rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_STALE;
- */
-
- if (unlikely(value_is_null)) {
- // collected value is null
- rc->value = NAN;
- rc->rrdcalc_flags |= RRDCALC_FLAG_DB_NAN;
-
- debug(D_HEALTH,
- "Health on host '%s', alarm '%s.%s': database lookup returned empty value (possibly value is not collected yet)",
- host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name
- );
- } else
- rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_NAN;
-
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database lookup gave value "
- CALCULATED_NUMBER_FORMAT, host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
- rc->value
- );
- }
-
- // ------------------------------------------------------------
- // if there is calculation expression, run it
-
- if (unlikely(rc->calculation)) {
- if (unlikely(!expression_evaluate(rc->calculation))) {
- // calculation failed
- rc->value = NAN;
- rc->rrdcalc_flags |= RRDCALC_FLAG_CALC_ERROR;
-
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' failed: %s",
- host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
- rc->calculation->parsed_as, buffer_tostring(rc->calculation->error_msg)
- );
- } else {
- rc->rrdcalc_flags &= ~RRDCALC_FLAG_CALC_ERROR;
-
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' gave value "
- CALCULATED_NUMBER_FORMAT
- ": %s (source: %s)", host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
- rc->calculation->parsed_as, rc->calculation->result,
- buffer_tostring(rc->calculation->error_msg), rc->source
- );
-
- rc->value = rc->calculation->result;
-
- if (rc->local) rc->local->last_updated = now;
- if (rc->family) rc->family->last_updated = now;
- if (rc->hostid) rc->hostid->last_updated = now;
- if (rc->hostname) rc->hostname->last_updated = now;
- }
- }
- }
-
- rrdhost_unlock(host);
-
- if (unlikely(runnable && !netdata_exit)) {
- rrdhost_rdlock(host);
-
- for (rc = host->alarms; rc; rc = rc->next) {
- if (unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE)))
- continue;
-
- if (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED) {
- continue;
- }
- RRDCALC_STATUS warning_status = RRDCALC_STATUS_UNDEFINED;
- RRDCALC_STATUS critical_status = RRDCALC_STATUS_UNDEFINED;
-
- // --------------------------------------------------------
- // check the warning expression
-
- if (likely(rc->warning)) {
- if (unlikely(!expression_evaluate(rc->warning))) {
- // calculation failed
- rc->rrdcalc_flags |= RRDCALC_FLAG_WARN_ERROR;
-
- debug(D_HEALTH,
- "Health on host '%s', alarm '%s.%s': warning expression failed with error: %s",
- host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
- buffer_tostring(rc->warning->error_msg)
- );
- } else {
- rc->rrdcalc_flags &= ~RRDCALC_FLAG_WARN_ERROR;
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': warning expression gave value "
- CALCULATED_NUMBER_FORMAT
- ": %s (source: %s)", host->hostname, rc->chart ? rc->chart : "NOCHART",
- rc->name, rc->warning->result, buffer_tostring(rc->warning->error_msg), rc->source
- );
- warning_status = rrdcalc_value2status(rc->warning->result);
- }
- }
-
- // --------------------------------------------------------
- // check the critical expression
-
- if (likely(rc->critical)) {
- if (unlikely(!expression_evaluate(rc->critical))) {
- // calculation failed
- rc->rrdcalc_flags |= RRDCALC_FLAG_CRIT_ERROR;
-
- debug(D_HEALTH,
- "Health on host '%s', alarm '%s.%s': critical expression failed with error: %s",
- host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
- buffer_tostring(rc->critical->error_msg)
- );
- } else {
- rc->rrdcalc_flags &= ~RRDCALC_FLAG_CRIT_ERROR;
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': critical expression gave value "
- CALCULATED_NUMBER_FORMAT
- ": %s (source: %s)", host->hostname, rc->chart ? rc->chart : "NOCHART",
- rc->name, rc->critical->result, buffer_tostring(rc->critical->error_msg),
- rc->source
- );
- critical_status = rrdcalc_value2status(rc->critical->result);
- }
- }
-
- // --------------------------------------------------------
- // decide the final alarm status
-
- RRDCALC_STATUS status = RRDCALC_STATUS_UNDEFINED;
-
- switch (warning_status) {
- case RRDCALC_STATUS_CLEAR:
- status = RRDCALC_STATUS_CLEAR;
- break;
-
- case RRDCALC_STATUS_RAISED:
- status = RRDCALC_STATUS_WARNING;
- break;
-
- default:
- break;
- }
-
- switch (critical_status) {
- case RRDCALC_STATUS_CLEAR:
- if (status == RRDCALC_STATUS_UNDEFINED)
- status = RRDCALC_STATUS_CLEAR;
- break;
-
- case RRDCALC_STATUS_RAISED:
- status = RRDCALC_STATUS_CRITICAL;
- break;
-
- default:
- break;
- }
-
- // --------------------------------------------------------
- // check if the new status and the old differ
-
- if (status != rc->status) {
- int delay = 0;
-
- // apply trigger hysteresis
-
- if (now > rc->delay_up_to_timestamp) {
- rc->delay_up_current = rc->delay_up_duration;
- rc->delay_down_current = rc->delay_down_duration;
- rc->delay_last = 0;
- rc->delay_up_to_timestamp = 0;
- } else {
- rc->delay_up_current = (int) (rc->delay_up_current * rc->delay_multiplier);
- if (rc->delay_up_current > rc->delay_max_duration)
- rc->delay_up_current = rc->delay_max_duration;
-
- rc->delay_down_current = (int) (rc->delay_down_current * rc->delay_multiplier);
- if (rc->delay_down_current > rc->delay_max_duration)
- rc->delay_down_current = rc->delay_max_duration;
- }
-
- if (status > rc->status)
- delay = rc->delay_up_current;
- else
- delay = rc->delay_down_current;
-
- // COMMENTED: because we do need to send raising alarms
- // if(now + delay < rc->delay_up_to_timestamp)
- // delay = (int)(rc->delay_up_to_timestamp - now);
-
- rc->delay_last = delay;
- rc->delay_up_to_timestamp = now + delay;
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' gave value "
+ CALCULATED_NUMBER_FORMAT
+ ": %s (source: %s)", host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
+ rc->calculation->parsed_as, rc->calculation->result,
+ buffer_tostring(rc->calculation->error_msg), rc->source
+ );
+
+ rc->value = rc->calculation->result;
+
+ if (rc->local) rc->local->last_updated = now;
+ if (rc->family) rc->family->last_updated = now;
+ if (rc->hostid) rc->hostid->last_updated = now;
+ if (rc->hostname) rc->hostname->last_updated = now;
+ }
+ }
+ }
+
+ rrdhost_unlock(host);
+
+ if (unlikely(runnable && !netdata_exit)) {
+ rrdhost_rdlock(host);
+
+ for (rc = host->alarms; rc; rc = rc->next) {
+ if (unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE)))
+ continue;
+
+ if (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED) {
+ continue;
+ }
+ RRDCALC_STATUS warning_status = RRDCALC_STATUS_UNDEFINED;
+ RRDCALC_STATUS critical_status = RRDCALC_STATUS_UNDEFINED;
+
+ // --------------------------------------------------------
+ // check the warning expression
+
+ if (likely(rc->warning)) {
+ if (unlikely(!expression_evaluate(rc->warning))) {
+ // calculation failed
+ rc->rrdcalc_flags |= RRDCALC_FLAG_WARN_ERROR;
+
+ debug(D_HEALTH,
+ "Health on host '%s', alarm '%s.%s': warning expression failed with error: %s",
+ host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
+ buffer_tostring(rc->warning->error_msg)
+ );
+ } else {
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_WARN_ERROR;
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': warning expression gave value "
+ CALCULATED_NUMBER_FORMAT
+ ": %s (source: %s)", host->hostname, rc->chart ? rc->chart : "NOCHART",
+ rc->name, rc->warning->result, buffer_tostring(rc->warning->error_msg), rc->source
+ );
+ warning_status = rrdcalc_value2status(rc->warning->result);
+ }
+ }
+
+ // --------------------------------------------------------
+ // check the critical expression
+
+ if (likely(rc->critical)) {
+ if (unlikely(!expression_evaluate(rc->critical))) {
+ // calculation failed
+ rc->rrdcalc_flags |= RRDCALC_FLAG_CRIT_ERROR;
+
+ debug(D_HEALTH,
+ "Health on host '%s', alarm '%s.%s': critical expression failed with error: %s",
+ host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name,
+ buffer_tostring(rc->critical->error_msg)
+ );
+ } else {
+ rc->rrdcalc_flags &= ~RRDCALC_FLAG_CRIT_ERROR;
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': critical expression gave value "
+ CALCULATED_NUMBER_FORMAT
+ ": %s (source: %s)", host->hostname, rc->chart ? rc->chart : "NOCHART",
+ rc->name, rc->critical->result, buffer_tostring(rc->critical->error_msg),
+ rc->source
+ );
+ critical_status = rrdcalc_value2status(rc->critical->result);
+ }
+ }
+
+ // --------------------------------------------------------
+ // decide the final alarm status
+
+ RRDCALC_STATUS status = RRDCALC_STATUS_UNDEFINED;
+
+ switch (warning_status) {
+ case RRDCALC_STATUS_CLEAR:
+ status = RRDCALC_STATUS_CLEAR;
+ break;
+
+ case RRDCALC_STATUS_RAISED:
+ status = RRDCALC_STATUS_WARNING;
+ break;
+
+ default:
+ break;
+ }
+
+ switch (critical_status) {
+ case RRDCALC_STATUS_CLEAR:
+ if (status == RRDCALC_STATUS_UNDEFINED)
+ status = RRDCALC_STATUS_CLEAR;
+ break;
+
+ case RRDCALC_STATUS_RAISED:
+ status = RRDCALC_STATUS_CRITICAL;
+ break;
+
+ default:
+ break;
+ }
+
+ // --------------------------------------------------------
+ // check if the new status and the old differ
+
+ if (status != rc->status) {
+ int delay = 0;
+
+ // apply trigger hysteresis
+
+ if (now > rc->delay_up_to_timestamp) {
+ rc->delay_up_current = rc->delay_up_duration;
+ rc->delay_down_current = rc->delay_down_duration;
+ rc->delay_last = 0;
+ rc->delay_up_to_timestamp = 0;
+ } else {
+ rc->delay_up_current = (int) (rc->delay_up_current * rc->delay_multiplier);
+ if (rc->delay_up_current > rc->delay_max_duration)
+ rc->delay_up_current = rc->delay_max_duration;
+
+ rc->delay_down_current = (int) (rc->delay_down_current * rc->delay_multiplier);
+ if (rc->delay_down_current > rc->delay_max_duration)
+ rc->delay_down_current = rc->delay_max_duration;
+ }
+
+ if (status > rc->status)
+ delay = rc->delay_up_current;
+ else
+ delay = rc->delay_down_current;
+
+ // COMMENTED: because we do need to send raising alarms
+ // if(now + delay < rc->delay_up_to_timestamp)
+ // delay = (int)(rc->delay_up_to_timestamp - now);
+
+ rc->delay_last = delay;
+ rc->delay_up_to_timestamp = now + delay;
if(likely(!rrdcalc_isrepeating(rc))) {
ALARM_ENTRY *ae = health_create_alarm_entry(
@@ -858,14 +858,14 @@ void *health_main(void *ptr) {
rc->last_status_change = now;
rc->old_status = rc->status;
rc->status = status;
- }
+ }
- rc->last_updated = now;
- rc->next_update = now + rc->update_every;
+ rc->last_updated = now;
+ rc->next_update = now + rc->update_every;
- if (next_run > rc->next_update)
- next_run = rc->next_update;
- }
+ if (next_run > rc->next_update)
+ next_run = rc->next_update;
+ }
// process repeating alarms
RRDCALC *rc;
@@ -896,22 +896,22 @@ void *health_main(void *ptr) {
}
}
- rrdhost_unlock(host);
- }
+ rrdhost_unlock(host);
+ }
- if (unlikely(netdata_exit))
- break;
+ if (unlikely(netdata_exit))
+ break;
- // execute notifications
- // and cleanup
- health_alarm_log_process(host);
+ // execute notifications
+ // and cleanup
+ health_alarm_log_process(host);
- if (unlikely(netdata_exit))
- break;
+ if (unlikely(netdata_exit))
+ break;
- } /* rrdhost_foreach */
+ } /* rrdhost_foreach */
- rrd_unlock();
+ rrd_unlock();
if(unlikely(netdata_exit))
diff --git a/health/health.d/vsphere.conf b/health/health.d/vsphere.conf
new file mode 100644
index 00000000..d8b2be19
--- /dev/null
+++ b/health/health.d/vsphere.conf
@@ -0,0 +1,157 @@
+
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+# -----------------------------------------------VM Specific------------------------------------------------------------
+# Memory
+
+template: vsphere_vm_mem_usage
+ on: vsphere.vm_mem_usage_percentage
+ hosts: *
+ calc: $used
+ units: %
+ every: 20s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: used RAM
+
+# -----------------------------------------------HOST Specific----------------------------------------------------------
+# Memory
+
+template: vsphere_host_mem_usage
+ on: vsphere.host_mem_usage_percentage
+ hosts: *
+ calc: $used
+ units: %
+ every: 20s
+ warn: $this > (($status >= $WARNING) ? (80) : (90))
+ crit: $this > (($status == $CRITICAL) ? (90) : (98))
+ delay: down 15m multiplier 1.5 max 1h
+ info: used RAM
+
+# Network errors
+
+template: vsphere_inbound_packets_errors
+ on: vsphere.net_errors_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of rx
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface inbound dropped packets in the last 10 minutes
+ to: sysadmin
+
+template: vsphere_outbound_packets_errors
+ on: vsphere.net_errors_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of tx
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface outbound dropped packets in the last 10 minutes
+ to: sysadmin
+
+# Network errors ratio
+
+template: vsphere_inbound_packets_errors_ratio
+ on: vsphere.net_packets_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of rx
+ calc: (($vsphere_inbound_packets_errors != nan AND $this > 0) ? ($vsphere_inbound_packets_errors * 100 / $this) : (0))
+ units: %
+ every: 1m
+ warn: $this >= 0.1
+ crit: $this >= 2
+ delay: down 1h multiplier 1.5 max 2h
+ info: the ratio of inbound errors vs the total number of received packets of the network interface, during the last 10 minutes
+ to: sysadmin
+
+template: vsphere_outbound_packets_errors_ratio
+ on: vsphere.net_packets_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of tx
+ calc: (($vsphere_outbound_packets_errors != nan AND $this > 0) ? ($vsphere_outbound_packets_errors * 100 / $this) : (0))
+ units: %
+ every: 1m
+ warn: $this >= 0.1
+ crit: $this >= 2
+ delay: down 1h multiplier 1.5 max 2h
+ info: the ratio of outbound errors vs the total number of sent packets of the network interface, during the last 10 minutes
+ to: sysadmin
+
+# -----------------------------------------------Common-------------------------------------------------------------------
+# CPU
+
+template: vsphere_cpu_usage
+ on: vsphere.cpu_usage_total
+ hosts: *
+ lookup: average -10m unaligned match-names of used
+ units: %
+ every: 20s
+ warn: $this > (($status >= $WARNING) ? (75) : (85))
+ crit: $this > (($status == $CRITICAL) ? (85) : (95))
+ delay: down 15m multiplier 1.5 max 1h
+ info: cpu utilization for the last 10 minutes
+ to: sysadmin
+
+# Network drops
+
+template: vsphere_inbound_packets_dropped
+ on: vsphere.net_drops_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of rx
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface inbound dropped packets in the last 10 minutes
+ to: sysadmin
+
+template: vsphere_outbound_packets_dropped
+ on: vsphere.net_drops_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of tx
+ units: packets
+ every: 1m
+ warn: $this >= 5
+ delay: down 1h multiplier 1.5 max 2h
+ info: interface outbound dropped packets in the last 10 minutes
+ to: sysadmin
+
+# Network drops ratio
+
+template: vsphere_inbound_packets_dropped_ratio
+ on: vsphere.net_packets_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of rx
+ calc: (($vsphere_inbound_packets_dropped != nan AND $this > 0) ? ($vsphere_inbound_packets_dropped * 100 / $this) : (0))
+ units: %
+ every: 1m
+ warn: $this >= 0.1
+ crit: $this >= 2
+ delay: down 1h multiplier 1.5 max 2h
+ info: the ratio of inbound dropped packets vs the total number of received packets of the network interface, during the last 10 minutes
+ to: sysadmin
+
+template: vsphere_outbound_packets_dropped_ratio
+ on: vsphere.net_packets_total
+ hosts: *
+families: *
+ lookup: sum -10m unaligned absolute match-names of tx
+ calc: (($vsphere_outbound_packets_dropped != nan AND $this > 0) ? ($vsphere_outbound_packets_dropped * 100 / $this) : (0))
+ units: %
+ every: 1m
+ warn: $this >= 0.1
+ crit: $this >= 2
+ delay: down 1h multiplier 1.5 max 2h
+ info: the ratio of outbound dropped packets vs the total number of sent packets of the network interface, during the last 10 minutes
+ to: sysadmin
diff --git a/health/health.h b/health/health.h
index 6920d12d..6c000bf4 100644
--- a/health/health.h
+++ b/health/health.h
@@ -58,10 +58,12 @@ extern void *health_main(void *ptr);
extern void health_reload(void);
extern int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result);
+extern void health_aggregate_alarms(RRDHOST *host, BUFFER *wb, BUFFER* context, RRDCALC_STATUS status);
extern void health_alarms2json(RRDHOST *host, BUFFER *wb, int all);
extern void health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after);
void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *buf);
+void health_api_v1_chart_custom_variables2json(RRDSET *st, BUFFER *buf);
extern int health_alarm_log_open(RRDHOST *host);
extern void health_alarm_log_close(RRDHOST *host);
diff --git a/health/health_json.c b/health/health_json.c
index e923b05c..f6ff1b1a 100644
--- a/health/health_json.c
+++ b/health/health_json.c
@@ -231,6 +231,43 @@ static inline void health_rrdcalc2json_nolock(RRDHOST *host, BUFFER *wb, RRDCALC
//
//}
+void health_aggregate_alarms(RRDHOST *host, BUFFER *wb, BUFFER* contexts, RRDCALC_STATUS status) {
+ RRDCALC *rc;
+ int numberOfAlarms = 0;
+ char *tok = NULL;
+ char *p = NULL;
+
+ rrdhost_rdlock(host);
+
+ if (contexts) {
+ p = (char*)buffer_tostring(contexts);
+ while(p && *p && (tok = mystrsep(&p, ", |"))) {
+ if(!*tok) continue;
+
+ for(rc = host->alarms; rc ; rc = rc->next) {
+ if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
+ continue;
+ if(unlikely(rc->rrdset && rc->rrdset->hash_context == simple_hash(tok)
+ && !strcmp(rc->rrdset->context, tok)
+ && ((status==RRDCALC_STATUS_RAISED)?(rc->status >= RRDCALC_STATUS_WARNING):rc->status == status)))
+ numberOfAlarms++;
+ }
+ }
+ }
+ else {
+ for(rc = host->alarms; rc ; rc = rc->next) {
+ if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
+ continue;
+
+ if(unlikely((status==RRDCALC_STATUS_RAISED)?(rc->status >= RRDCALC_STATUS_WARNING):rc->status == status))
+ numberOfAlarms++;
+ }
+ }
+
+ buffer_sprintf(wb, "%d", numberOfAlarms);
+ rrdhost_unlock(host);
+}
+
void health_alarms2json(RRDHOST *host, BUFFER *wb, int all) {
int i;
diff --git a/health/notifications/Makefile.in b/health/notifications/Makefile.in
new file mode 100644
index 00000000..16a89b58
--- /dev/null
+++ b/health/notifications/Makefile.in
@@ -0,0 +1,821 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = health/notifications
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_libconfig_DATA) $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)" \
+ "$(DESTDIR)$(libconfigdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/alerta/Makefile.inc \
+ $(srcdir)/awssns/Makefile.inc $(srcdir)/custom/Makefile.inc \
+ $(srcdir)/discord/Makefile.inc $(srcdir)/email/Makefile.inc \
+ $(srcdir)/flock/Makefile.inc $(srcdir)/irc/Makefile.inc \
+ $(srcdir)/kavenegar/Makefile.inc \
+ $(srcdir)/messagebird/Makefile.inc \
+ $(srcdir)/pagerduty/Makefile.inc \
+ $(srcdir)/pushbullet/Makefile.inc \
+ $(srcdir)/pushover/Makefile.inc \
+ $(srcdir)/rocketchat/Makefile.inc $(srcdir)/slack/Makefile.inc \
+ $(srcdir)/smstools3/Makefile.inc $(srcdir)/syslog/Makefile.inc \
+ $(srcdir)/telegram/Makefile.inc $(srcdir)/twilio/Makefile.inc \
+ $(srcdir)/web/Makefile.inc $(top_srcdir)/build/subst.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ alarm-notify.sh \
+ $(NULL)
+
+SUFFIXES = .in
+dist_libconfig_DATA = \
+ health_alarm_notify.conf \
+ health_email_recipients.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ alarm-notify.sh \
+ alarm-email.sh \
+ alarm-test.sh \
+ $(NULL)
+
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+dist_noinst_DATA = alarm-notify.sh.in README.md $(NULL) \
+ alerta/README.md alerta/Makefile.inc $(NULL) awssns/README.md \
+ awssns/Makefile.inc $(NULL) discord/README.md \
+ discord/Makefile.inc $(NULL) email/README.md \
+ email/Makefile.inc $(NULL) flock/README.md flock/Makefile.inc \
+ $(NULL) irc/README.md irc/Makefile.inc $(NULL) \
+ kavenegar/README.md kavenegar/Makefile.inc $(NULL) \
+ messagebird/README.md messagebird/Makefile.inc $(NULL) \
+ pagerduty/README.md pagerduty/Makefile.inc $(NULL) \
+ pushbullet/README.md pushbullet/Makefile.inc $(NULL) \
+ pushover/README.md pushover/Makefile.inc $(NULL) \
+ rocketchat/README.md rocketchat/Makefile.inc $(NULL) \
+ slack/README.md slack/Makefile.inc $(NULL) smstools3/README.md \
+ smstools3/Makefile.inc $(NULL) syslog/README.md \
+ syslog/Makefile.inc $(NULL) telegram/README.md \
+ telegram/Makefile.inc $(NULL) twilio/README.md \
+ twilio/Makefile.inc $(NULL) web/README.md web/Makefile.inc \
+ $(NULL) custom/README.md custom/Makefile.inc $(NULL)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/alerta/Makefile.inc $(srcdir)/awssns/Makefile.inc $(srcdir)/discord/Makefile.inc $(srcdir)/email/Makefile.inc $(srcdir)/flock/Makefile.inc $(srcdir)/irc/Makefile.inc $(srcdir)/kavenegar/Makefile.inc $(srcdir)/messagebird/Makefile.inc $(srcdir)/pagerduty/Makefile.inc $(srcdir)/pushbullet/Makefile.inc $(srcdir)/pushover/Makefile.inc $(srcdir)/rocketchat/Makefile.inc $(srcdir)/slack/Makefile.inc $(srcdir)/smstools3/Makefile.inc $(srcdir)/syslog/Makefile.inc $(srcdir)/telegram/Makefile.inc $(srcdir)/twilio/Makefile.inc $(srcdir)/web/Makefile.inc $(srcdir)/custom/Makefile.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu health/notifications/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu health/notifications/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(srcdir)/alerta/Makefile.inc $(srcdir)/awssns/Makefile.inc $(srcdir)/discord/Makefile.inc $(srcdir)/email/Makefile.inc $(srcdir)/flock/Makefile.inc $(srcdir)/irc/Makefile.inc $(srcdir)/kavenegar/Makefile.inc $(srcdir)/messagebird/Makefile.inc $(srcdir)/pagerduty/Makefile.inc $(srcdir)/pushbullet/Makefile.inc $(srcdir)/pushover/Makefile.inc $(srcdir)/rocketchat/Makefile.inc $(srcdir)/slack/Makefile.inc $(srcdir)/smstools3/Makefile.inc $(srcdir)/syslog/Makefile.inc $(srcdir)/telegram/Makefile.inc $(srcdir)/twilio/Makefile.inc $(srcdir)/web/Makefile.inc $(srcdir)/custom/Makefile.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_libconfigDATA \
+ install-dist_pluginsSCRIPTS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_libconfigDATA \
+ uninstall-dist_pluginsSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_libconfigDATA \
+ install-dist_pluginsSCRIPTS install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS
+
+.PRECIOUS: Makefile
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/health/notifications/README.md b/health/notifications/README.md
index 8c7ab66f..a0065729 100644
--- a/health/notifications/README.md
+++ b/health/notifications/README.md
@@ -7,9 +7,9 @@ You can change the default script globally by editing `/etc/netdata/netdata.conf
`alarm-notify.sh` is capable of sending notifications:
-- to multiple recipients
-- using multiple notification methods
-- filtering severity per recipient
+- to multiple recipients
+- using multiple notification methods
+- filtering severity per recipient
It uses **roles**. For example `sysadmin`, `webmaster`, `dba`, etc.
@@ -23,23 +23,23 @@ Each role may have one or more destinations.
So, for example the `sysadmin` role may send:
-1. emails to admin1@example.com and admin2@example.com
-2. pushover.net notifications to USERTOKENS `A`, `B` and `C`.
-3. pushbullet.com push notifications to admin1@example.com and admin2@example.com
-4. messages to slack.com channel `#alarms` and `#systems`.
-5. messages to Discord channels `#alarms` and `#systems`.
+1. emails to admin1@example.com and admin2@example.com
+2. pushover.net notifications to USERTOKENS `A`, `B` and `C`.
+3. pushbullet.com push notifications to admin1@example.com and admin2@example.com
+4. messages to slack.com channel `#alarms` and `#systems`.
+5. messages to Discord channels `#alarms` and `#systems`.
## Configuration
Edit [`/etc/netdata/health_alarm_notify.conf`](health_alarm_notify.conf)
by running `/etc/netdata/edit-config health_alarm_notify.conf`:
-- settings per notification method:
+- settings per notification method:
- all notification methods except email, require some configuration
- (i.e. API keys, tokens, destination rooms, channels, etc).
+ all notification methods except email, require some configuration
+ (i.e. API keys, tokens, destination rooms, channels, etc).
-2. **recipients** per **role** per **notification method**
+2. **recipients** per **role** per **notification method**
## Testing Notifications
@@ -62,8 +62,10 @@ export NETDATA_ALARM_NOTIFY_DEBUG=1
Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`). You can always find the location of the alarm-notify.sh script in `netdata.conf`.
If you need to dig even deeper, you can trace the execution with `bash -x`. Note that in test mode, alarm-notify.sh calls itself with many more arguments. So first do
- ```sh
- bash -x /usr/libexec/netdata/plugins.d/alarm-notify.sh test
- ```
+
+```sh
+bash -x /usr/libexec/netdata/plugins.d/alarm-notify.sh test
+```
+
Then look in the output for the alarm-notify.sh calls and run the one you want to trace with `bash -x`.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/alarm-notify.sh b/health/notifications/alarm-notify.sh
new file mode 100644
index 00000000..745a5257
--- /dev/null
+++ b/health/notifications/alarm-notify.sh
@@ -0,0 +1,2298 @@
+#!/usr/bin/env bash
+#shellcheck source=/dev/null disable=SC2086,SC2154
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Script to send alarm notifications for netdata
+#
+# Features:
+# - multiple notification methods
+# - multiple roles per alarm
+# - multiple recipients per role
+# - severity filtering per recipient
+#
+# Supported notification methods:
+# - emails by @ktsaou
+# - slack.com notifications by @ktsaou
+# - alerta.io notifications by @kattunga
+# - discordapp.com notifications by @lowfive
+# - pushover.net notifications by @ktsaou
+# - pushbullet.com push notifications by Tiago Peralta @tperalta82 #1070
+# - telegram.org notifications by @hashworks #1002
+# - twilio.com notifications by Levi Blaney @shadycuz #1211
+# - kafka notifications by @ktsaou #1342
+# - pagerduty.com notifications by Jim Cooley @jimcooley #1373
+# - messagebird.com notifications by @tech_no_logical #1453
+# - hipchat notifications by @ktsaou #1561
+# - fleep notifications by @Ferroin
+# - prowlapp.com notifications by @Ferroin
+# - custom notifications by @ktsaou
+# - syslog messages by @Ferroin
+# - Microsoft Team notification by @tioumen
+# - RocketChat notifications by @Hermsi1337 #3777
+
+# -----------------------------------------------------------------------------
+# testing notifications
+
+if { [ "${1}" = "test" ] || [ "${2}" = "test" ]; } && [ "${#}" -le 2 ]; then
+ if [ "${2}" = "test" ]; then
+ recipient="${1}"
+ else
+ recipient="${2}"
+ fi
+
+ [ -z "${recipient}" ] && recipient="sysadmin"
+
+ id=1
+ last="CLEAR"
+ test_res=0
+ for x in "WARNING" "CRITICAL" "CLEAR"; do
+ echo >&2
+ echo >&2 "# SENDING TEST ${x} ALARM TO ROLE: ${recipient}"
+
+ "${0}" "${recipient}" "$(hostname)" 1 1 "${id}" "$(date +%s)" "test_alarm" "test.chart" "test.family" "${x}" "${last}" 100 90 "${0}" 1 $((0 + id)) "units" "this is a test alarm to verify notifications work" "new value" "old value" "evaluated expression" "expression variable values" 0 0
+ #shellcheck disable=SC2181
+ if [ $? -ne 0 ]; then
+ echo >&2 "# FAILED"
+ test_res=1
+ else
+ echo >&2 "# OK"
+ fi
+
+ last="${x}"
+ id=$((id + 1))
+ done
+
+ exit $test_res
+fi
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
+export LC_ALL=C
+
+# -----------------------------------------------------------------------------
+
+PROGRAM_NAME="$(basename "${0}")"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+debug=${NETDATA_ALARM_NOTIFY_DEBUG-0}
+debug() {
+ [ "${debug}" = "1" ] && log DEBUG "${@}"
+}
+
+docurl() {
+ if [ -z "${curl}" ]; then
+ error "${curl} is unset."
+ return 1
+ fi
+
+ if [ "${debug}" = "1" ]; then
+ echo >&2 "--- BEGIN curl command ---"
+ printf >&2 "%q " ${curl} "${@}"
+ echo >&2
+ echo >&2 "--- END curl command ---"
+
+ local out code ret
+ out=$(mktemp /tmp/netdata-health-alarm-notify-XXXXXXXX)
+ code=$(${curl} ${curl_options} --write-out "%{http_code}" --output "${out}" --silent --show-error "${@}")
+ ret=$?
+ echo >&2 "--- BEGIN received response ---"
+ cat >&2 "${out}"
+ echo >&2
+ echo >&2 "--- END received response ---"
+ echo >&2 "RECEIVED HTTP RESPONSE CODE: ${code}"
+ rm "${out}"
+ echo "${code}"
+ return ${ret}
+ fi
+
+ ${curl} ${curl_options} --write-out "%{http_code}" --output /dev/null --silent --show-error "${@}"
+ return $?
+}
+
+# -----------------------------------------------------------------------------
+# List of all the notification mechanisms we support.
+# Used in a couple of places to write more compact code.
+
+method_names="
+email
+pushover
+pushbullet
+telegram
+slack
+alerta
+flock
+discord
+hipchat
+twilio
+messagebird
+pd
+fleep
+syslog
+custom
+msteam
+kavenegar
+prowl
+awssns
+rocketchat
+sms
+"
+
+# -----------------------------------------------------------------------------
+# this is to be overwritten by the config file
+
+custom_sender() {
+ info "not sending custom notification for ${status} of '${host}.${chart}.${name}'"
+}
+
+# -----------------------------------------------------------------------------
+
+# check for BASH v4+ (required for associative arrays)
+if [ ${BASH_VERSINFO[0]} -lt 4 ]; then
+ fatal "BASH version 4 or later is required (this is ${BASH_VERSION})."
+fi
+
+# -----------------------------------------------------------------------------
+# defaults to allow running this script by hand
+
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/lib/netdata/conf.d"
+[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="/var/cache/netdata"
+[ -z "${NETDATA_REGISTRY_URL}" ] && NETDATA_REGISTRY_URL="https://registry.my-netdata.io"
+[ -z "${NETDATA_REGISTRY_CLOUD_BASE_URL}" ] && NETDATA_REGISTRY_CLOUD_BASE_URL="https://netdata.cloud"
+
+# -----------------------------------------------------------------------------
+# parse command line parameters
+
+if [ ${1} = "unittest" ]; then
+ unittest=1 # enable unit testing mode
+ roles="${2}" # the role that should be used for unit testing
+ cfgfile="${3}" # the location of the config file to use for unit testing
+ status="${4}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+ old_status="${5}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+else
+ roles="${1}" # the roles that should be notified for this event
+ args_host="${2}" # the host generated this event
+ unique_id="${3}" # the unique id of this event
+ alarm_id="${4}" # the unique id of the alarm that generated this event
+ event_id="${5}" # the incremental id of the event, for this alarm id
+ when="${6}" # the timestamp this event occurred
+ name="${7}" # the name of the alarm, as given in netdata health.d entries
+ chart="${8}" # the name of the chart (type.id)
+ family="${9}" # the family of the chart
+ status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+ old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+ value="${12}" # the current value of the alarm
+ old_value="${13}" # the previous value of the alarm
+ src="${14}" # the line number and file the alarm has been configured
+ duration="${15}" # the duration in seconds of the previous alarm state
+ non_clear_duration="${16}" # the total duration in seconds this is/was non-clear
+ units="${17}" # the units of the value
+ info="${18}" # a short description of the alarm
+ value_string="${19}" # friendly value (with units)
+ # shellcheck disable=SC2034
+ # variable is unused, but https://github.com/netdata/netdata/pull/5164#discussion_r255572947
+ old_value_string="${20}" # friendly old value (with units), previously named "old_value_string"
+ calc_expression="${21}" # contains the expression that was evaluated to trigger the alarm
+ calc_param_values="${22}" # the values of the parameters in the expression, at the time of the evaluation
+ total_warnings="${23}" # Total number of alarms in WARNING state
+ total_critical="${24}" # Total number of alarms in CRITICAL state
+fi
+
+# -----------------------------------------------------------------------------
+# find a suitable hostname to use, if netdata did not supply a hostname
+
+if [ -z ${args_host} ]; then
+ this_host=$(hostname -s 2>/dev/null)
+ host="${this_host}"
+ args_host="${this_host}"
+else
+ host="${args_host}"
+fi
+
+# -----------------------------------------------------------------------------
+# screen statuses we don't need to send a notification
+
+# don't do anything if this is not WARNING, CRITICAL or CLEAR
+if [ "${status}" != "WARNING" ] && [ "${status}" != "CRITICAL" ] && [ "${status}" != "CLEAR" ]; then
+ info "not sending notification for ${status} of '${host}.${chart}.${name}'"
+ exit 1
+fi
+
+# don't do anything if this is CLEAR, but it was not WARNING or CRITICAL
+if [ "${clear_alarm_always}" != "YES" ] && [ "${old_status}" != "WARNING" ] && [ "${old_status}" != "CRITICAL" ] && [ "${status}" = "CLEAR" ]; then
+ info "not sending notification for ${status} of '${host}.${chart}.${name}' (last status was ${old_status})"
+ exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# load configuration
+
+# By default fetch images from the global public registry.
+# This is required by default, since all notification methods need to download
+# images via the Internet, and private registries might not be reachable.
+# This can be overwritten at the configuration file.
+images_base_url="https://registry.my-netdata.io"
+
+# curl options to use
+curl_options=""
+
+# hostname handling
+use_fqdn="NO"
+
+# needed commands
+# if empty they will be searched in the system path
+curl=
+sendmail=
+
+# enable / disable features
+for method_name in ${method_names^^}; do
+ declare SEND_${method_name}="YES"
+ declare DEFAULT_RECIPIENT_${method_name}
+done
+
+for method_name in ${method_names}; do
+ declare -A role_recipients_${method_name}
+done
+
+# slack configs
+SLACK_WEBHOOK_URL=
+
+# Microsoft Team configs
+MSTEAM_WEBHOOK_URL=
+
+# rocketchat configs
+ROCKETCHAT_WEBHOOK_URL=
+
+# alerta configs
+ALERTA_WEBHOOK_URL=
+ALERTA_API_KEY=
+
+# flock configs
+FLOCK_WEBHOOK_URL=
+
+# discord configs
+DISCORD_WEBHOOK_URL=
+
+# pushover configs
+PUSHOVER_APP_TOKEN=
+
+# pushbullet configs
+PUSHBULLET_ACCESS_TOKEN=
+PUSHBULLET_SOURCE_DEVICE=
+
+# twilio configs
+TWILIO_ACCOUNT_SID=
+TWILIO_ACCOUNT_TOKEN=
+TWILIO_NUMBER=
+
+# hipchat configs
+HIPCHAT_SERVER=
+HIPCHAT_AUTH_TOKEN=
+
+# messagebird configs
+MESSAGEBIRD_ACCESS_KEY=
+MESSAGEBIRD_NUMBER=
+
+# kavenegar configs
+KAVENEGAR_API_KEY=
+KAVENEGAR_SENDER=
+
+# telegram configs
+TELEGRAM_BOT_TOKEN=
+
+# kafka configs
+SEND_KAFKA="YES"
+KAFKA_URL=
+KAFKA_SENDER_IP=
+
+# pagerduty.com configs
+PD_SERVICE_KEY=
+
+# fleep.io configs
+FLEEP_SENDER="${host}"
+
+# Amazon SNS configs
+AWSSNS_MESSAGE_FORMAT=
+
+# syslog configs
+SYSLOG_FACILITY=
+
+# email configs
+EMAIL_SENDER=
+EMAIL_CHARSET=$(locale charmap 2>/dev/null)
+EMAIL_THREADING=
+EMAIL_PLAINTEXT_ONLY=
+
+# irc configs
+IRC_NICKNAME=
+IRC_REALNAME=
+IRC_NETWORK=
+
+# load the stock and user configuration files
+# these will overwrite the variables above
+
+if [ ${unittest} ]; then
+ if source "${cfgfile}"; then
+ error "Failed to load requested config file."
+ exit 1
+ fi
+else
+ for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/health_alarm_notify.conf" "${NETDATA_USER_CONFIG_DIR}/health_alarm_notify.conf"; do
+ if [ -f "${CONFIG}" ]; then
+ debug "Loading config file '${CONFIG}'..."
+ source "${CONFIG}" || error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+ done
+fi
+
+# If we didn't autodetect the character set for e-mail and it wasn't
+# set by the user, we need to set it to a reasonable default. UTF-8
+# should be correct for almost all modern UNIX systems.
+if [ -z ${EMAIL_CHARSET} ]; then
+ EMAIL_CHARSET="UTF-8"
+fi
+
+# If we've been asked to use FQDN's for the URL's in the alarm, do so,
+# unless we're sending an alarm for a slave system which we can't get the
+# FQDN of easily.
+if [ "${use_fqdn}" = "YES" ] && [ "${host}" = "$(hostname -s 2>/dev/null)" ]; then
+ host="$(hostname -f 2>/dev/null)"
+fi
+
+# -----------------------------------------------------------------------------
+# filter a recipient based on alarm event severity
+
+filter_recipient_by_criticality() {
+ local method="${1}" x="${2}" r s
+ shift
+
+ r="${x/|*/}" # the recipient
+ s="${x/*|/}" # the severity required for notifying this recipient
+
+ # no severity filtering for this person
+ [ "${r}" = "${s}" ] && return 0
+
+ # the severity is invalid
+ s="${s^^}"
+ if [ "${s}" != "CRITICAL" ]; then
+ error "SEVERITY FILTERING for ${x} VIA ${method}: invalid severity '${s,,}', only 'critical' is supported."
+ return 0
+ fi
+
+ # create the status tracking directory for this user
+ [ ! -d "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" ] &&
+ mkdir -p "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}"
+
+ case "${status}" in
+ CRITICAL)
+ # make sure he will get future notifications for this alarm too
+ touch "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}"
+ debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: the alarm is CRITICAL (will now receive next status change)"
+ return 0
+ ;;
+
+ WARNING)
+ if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ]; then
+ # we do not remove the file, so that he will get future notifications of this alarm
+ debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm in the past (will still receive next status change)"
+ return 0
+ fi
+ ;;
+
+ *)
+ if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ]; then
+ # remove the file, so that he will only receive notifications for CRITICAL states for this alarm
+ rm "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}"
+ debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm (will only receive CRITICAL notifications from now on)"
+ return 0
+ fi
+ ;;
+ esac
+
+ debug "SEVERITY FILTERING for ${x} VIA ${method}: BLOCK: recipient should not receive this notification"
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# verify the delivery methods supported
+
+# check slack
+[ -z "${SLACK_WEBHOOK_URL}" ] && SEND_SLACK="NO"
+
+# check rocketchat
+[ -z "${ROCKETCHAT_WEBHOOK_URL}" ] && SEND_ROCKETCHAT="NO"
+
+# check alerta
+[ -z "${ALERTA_WEBHOOK_URL}" ] && SEND_ALERTA="NO"
+
+# check flock
+[ -z "${FLOCK_WEBHOOK_URL}" ] && SEND_FLOCK="NO"
+
+# check discord
+[ -z "${DISCORD_WEBHOOK_URL}" ] && SEND_DISCORD="NO"
+
+# check pushover
+[ -z "${PUSHOVER_APP_TOKEN}" ] && SEND_PUSHOVER="NO"
+
+# check pushbullet
+[ -z "${PUSHBULLET_ACCESS_TOKEN}" ] && SEND_PUSHBULLET="NO"
+
+# check twilio
+{ [ -z "${TWILIO_ACCOUNT_TOKEN}" ] || [ -z "${TWILIO_ACCOUNT_SID}" ] || [ -z "${TWILIO_NUMBER}" ]; } && SEND_TWILIO="NO"
+
+# check hipchat
+[ -z "${HIPCHAT_AUTH_TOKEN}" ] && SEND_HIPCHAT="NO"
+
+# check messagebird
+{ [ -z "${MESSAGEBIRD_ACCESS_KEY}" ] || [ -z "${MESSAGEBIRD_NUMBER}" ]; } && SEND_MESSAGEBIRD="NO"
+
+# check kavenegar
+{ [ -z "${KAVENEGAR_API_KEY}" ] || [ -z "${KAVENEGAR_SENDER}" ]; } && SEND_KAVENEGAR="NO"
+
+# check telegram
+[ -z "${TELEGRAM_BOT_TOKEN}" ] && SEND_TELEGRAM="NO"
+
+# check kafka
+{ [ -z "${KAFKA_URL}" ] || [ -z "${KAFKA_SENDER_IP}" ]; } && SEND_KAFKA="NO"
+
+# check irc
+[ -z "${IRC_NETWORK}" ] && SEND_IRC="NO"
+
+# check fleep
+#shellcheck disable=SC2153
+{ [ -z "${FLEEP_SERVER}" ] || [ -z "${FLEEP_SENDER}" ]; } && SEND_FLEEP="NO"
+
+if [ "${SEND_PUSHOVER}" = "YES" ] ||
+ [ "${SEND_SLACK}" = "YES" ] ||
+ [ "${SEND_ROCKETCHAT}" = "YES" ] ||
+ [ "${SEND_ALERTA}" = "YES" ] ||
+ [ "${SEND_PD}" = "YES" ] ||
+ [ "${SEND_FLOCK}" = "YES" ] ||
+ [ "${SEND_DISCORD}" = "YES" ] ||
+ [ "${SEND_HIPCHAT}" = "YES" ] ||
+ [ "${SEND_TWILIO}" = "YES" ] ||
+ [ "${SEND_MESSAGEBIRD}" = "YES" ] ||
+ [ "${SEND_KAVENEGAR}" = "YES" ] ||
+ [ "${SEND_TELEGRAM}" = "YES" ] ||
+ [ "${SEND_PUSHBULLET}" = "YES" ] ||
+ [ "${SEND_KAFKA}" = "YES" ] ||
+ [ "${SEND_FLEEP}" = "YES" ] ||
+ [ "${SEND_PROWL}" = "YES" ] ||
+ [ "${SEND_CUSTOM}" = "YES" ] ||
+ [ "${SEND_MSTEAM}" = "YES" ]; then
+ # if we need curl, check for the curl command
+ if [ -z "${curl}" ]; then
+ curl="$(command -v curl 2>/dev/null)"
+ fi
+ if [ -z "${curl}" ]; then
+ error "Cannot find curl command in the system path. Disabling all curl based notifications."
+ SEND_PUSHOVER="NO"
+ SEND_PUSHBULLET="NO"
+ SEND_TELEGRAM="NO"
+ SEND_SLACK="NO"
+ SEND_MSTEAM="NO"
+ SEND_ROCKETCHAT="NO"
+ SEND_ALERTA="NO"
+ SEND_PD="NO"
+ SEND_FLOCK="NO"
+ SEND_DISCORD="NO"
+ SEND_TWILIO="NO"
+ SEND_HIPCHAT="NO"
+ SEND_MESSAGEBIRD="NO"
+ SEND_KAVENEGAR="NO"
+ SEND_KAFKA="NO"
+ SEND_FLEEP="NO"
+ SEND_PROWL="NO"
+ SEND_CUSTOM="NO"
+ fi
+fi
+
+if [ "${SEND_SMS}" = "YES" ]; then
+ if [ -z "${sendsms}" ]; then
+ sendsms="$(command -v sendsms 2>/dev/null)"
+ fi
+ if [ -z "${sendsms}" ]; then
+ SEND_SMS="NO"
+ fi
+fi
+# if we need sendmail, check for the sendmail command
+if [ "${SEND_EMAIL}" = "YES" ] && [ -z "${sendmail}" ]; then
+ sendmail="$(command -v sendmail 2>/dev/null)"
+ if [ -z "${sendmail}" ]; then
+ debug "Cannot find sendmail command in the system path. Disabling email notifications."
+ SEND_EMAIL="NO"
+ fi
+fi
+
+# if we need logger, check for the logger command
+if [ "${SEND_SYSLOG}" = "YES" ] && [ -z "${logger}" ]; then
+ logger="$(command -v logger 2>/dev/null)"
+ if [ -z "${logger}" ]; then
+ debug "Cannot find logger command in the system path. Disabling syslog notifications."
+ SEND_SYSLOG="NO"
+ fi
+fi
+
+# if we need aws, check for the aws command
+if [ "${SEND_AWSSNS}" = "YES" ] && [ -z "${aws}" ]; then
+ aws="$(command -v aws 2>/dev/null)"
+ if [ -z "${aws}" ]; then
+ debug "Cannot find aws command in the system path. Disabling Amazon SNS notifications."
+ SEND_AWSSNS="NO"
+ fi
+fi
+
+# -----------------------------------------------------------------------------
+# find the recipients' addresses per method
+
+# netdata may call us with multiple roles, and roles may have multiple but
+# overlapping recipients - so, here we find the unique recipients.
+for method_name in ${method_names}; do
+ send_var="SEND_${method_name^^}"
+ if [ "${!send_var}" = "NO" ]; then
+ continue
+ fi
+
+ declare -A arr_var=()
+
+ for x in ${roles//,/ }; do
+ # the roles 'silent' and 'disabled' mean:
+ # don't send a notification for this role
+ if [ "${x}" = "silent" ] || [ "${x}" = "disabled" ]; then
+ continue
+ fi
+
+ role_recipients="role_recipients_${method_name}[$x]"
+ default_recipient_var="DEFAULT_RECIPIENT_${method_name^^}"
+
+ a="${!role_recipients}"
+ [ -z "${a}" ] && a="${!default_recipient_var}"
+ for r in ${a//,/ }; do
+ [ "${r}" != "disabled" ] && filter_recipient_by_criticality ${method_name} "${r}" && arr_var[${r/|*/}]="1"
+ done
+ done
+
+ # build the list of recipients
+ to_var="to_${method_name}"
+ declare to_${method_name}="${!arr_var[*]}"
+
+ [ -z "${!to_var}" ] && declare ${send_var}="NO"
+done
+
+# -----------------------------------------------------------------------------
+# handle fixup of the email recipient list.
+
+fix_to_email() {
+ to_email=
+ while [ -n "${1}" ]; do
+ [ -n "${to_email}" ] && to_email="${to_email}, "
+ to_email="${to_email}${1}"
+ shift 1
+ done
+}
+
+# ${to_email} without quotes here
+fix_to_email ${to_email}
+
+# -----------------------------------------------------------------------------
+# handle output if we're running in unit test mode
+if [ ${unittest} ]; then
+ for method_name in ${method_names}; do
+ to_var="to_${method_name}"
+ echo "results: ${method_name}: ${!to_var}"
+ done
+ exit 0
+fi
+
+# -----------------------------------------------------------------------------
+# check that we have at least a method enabled
+proceed=0
+for method in "${SEND_EMAIL}" \
+ "${SEND_PUSHOVER}" \
+ "${SEND_TELEGRAM}" \
+ "${SEND_SLACK}" \
+ "${SEND_ROCKETCHAT}" \
+ "${SEND_ALERTA}" \
+ "${SEND_FLOCK}" \
+ "${SEND_DISCORD}" \
+ "${SEND_TWILIO}" \
+ "${SEND_HIPCHAT}" \
+ "${SEND_MESSAGEBIRD}" \
+ "${SEND_KAVENEGAR}" \
+ "${SEND_PUSHBULLET}" \
+ "${SEND_KAFKA}" \
+ "${SEND_PD}" \
+ "${SEND_FLEEP}" \
+ "${SEND_PROWL}" \
+ "${SEND_CUSTOM}" \
+ "${SEND_IRC}" \
+ "${SEND_AWSSNS}" \
+ "${SEND_SYSLOG}" \
+ "${SEND_SMS}" \
+ "${SEND_MSTEAM}"; do
+ if [ "${method}" == "YES" ]; then
+ proceed=1
+ break
+ fi
+done
+if [ "$proceed" -eq 0 ]; then
+ fatal "All notification methods are disabled. Not sending notification for host '${host}', chart '${chart}' to '${roles}' for '${name}' = '${value}' for status '${status}'."
+fi
+
+# -----------------------------------------------------------------------------
+# get the date the alarm happened
+
+date=$(date --date=@${when} "${date_format}" 2>/dev/null)
+[ -z "${date}" ] && date=$(date "${date_format}" 2>/dev/null)
+[ -z "${date}" ] && date=$(date --date=@${when} 2>/dev/null)
+[ -z "${date}" ] && date=$(date 2>/dev/null)
+
+# ----------------------------------------------------------------------------
+# prepare some extra headers if we've been asked to thread e-mails
+if [ "${SEND_EMAIL}" == "YES" ] && [ "${EMAIL_THREADING}" != "NO" ]; then
+ email_thread_headers="In-Reply-To: <${chart}-${name}@${host}>\\r\\nReferences: <${chart}-${name}@${host}>"
+else
+ email_thread_headers=
+fi
+
+# -----------------------------------------------------------------------------
+# function to URL encode a string
+
+urlencode() {
+ local string="${1}" strlen encoded pos c o
+
+ strlen=${#string}
+ for ((pos = 0; pos < strlen; pos++)); do
+ c=${string:pos:1}
+ case "${c}" in
+ [-_.~a-zA-Z0-9])
+ o="${c}"
+ ;;
+
+ *)
+ printf -v o '%%%02x' "'${c}"
+ ;;
+ esac
+ encoded+="${o}"
+ done
+
+ REPLY="${encoded}"
+ echo "${REPLY}"
+}
+
+# -----------------------------------------------------------------------------
+# function to convert a duration in seconds, to a human readable duration
+# using DAYS, MINUTES, SECONDS
+
+duration4human() {
+ local s="${1}" d=0 h=0 m=0 ds="day" hs="hour" ms="minute" ss="second" ret
+ d=$((s / 86400))
+ s=$((s - (d * 86400)))
+ h=$((s / 3600))
+ s=$((s - (h * 3600)))
+ m=$((s / 60))
+ s=$((s - (m * 60)))
+
+ if [ ${d} -gt 0 ]; then
+ [ ${m} -ge 30 ] && h=$((h + 1))
+ [ ${d} -gt 1 ] && ds="days"
+ [ ${h} -gt 1 ] && hs="hours"
+ if [ ${h} -gt 0 ]; then
+ ret="${d} ${ds} and ${h} ${hs}"
+ else
+ ret="${d} ${ds}"
+ fi
+ elif [ ${h} -gt 0 ]; then
+ [ ${s} -ge 30 ] && m=$((m + 1))
+ [ ${h} -gt 1 ] && hs="hours"
+ [ ${m} -gt 1 ] && ms="minutes"
+ if [ ${m} -gt 0 ]; then
+ ret="${h} ${hs} and ${m} ${ms}"
+ else
+ ret="${h} ${hs}"
+ fi
+ elif [ ${m} -gt 0 ]; then
+ [ ${m} -gt 1 ] && ms="minutes"
+ [ ${s} -gt 1 ] && ss="seconds"
+ if [ ${s} -gt 0 ]; then
+ ret="${m} ${ms} and ${s} ${ss}"
+ else
+ ret="${m} ${ms}"
+ fi
+ else
+ [ ${s} -gt 1 ] && ss="seconds"
+ ret="${s} ${ss}"
+ fi
+
+ REPLY="${ret}"
+ echo "${REPLY}"
+}
+
+# -----------------------------------------------------------------------------
+# email sender
+
+send_email() {
+ local ret opts=() sender_email="${EMAIL_SENDER}" sender_name=
+ if [ "${SEND_EMAIL}" = "YES" ]; then
+
+ if [ -n "${EMAIL_SENDER}" ]; then
+ if [[ ${EMAIL_SENDER} =~ ^\".*\"\ \<.*\>$ ]]; then
+ # the name includes double quotes
+ sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)"
+ sender_name="$(echo "${EMAIL_SENDER}" | cut -d '"' -f 2)"
+ elif [[ ${EMAIL_SENDER} =~ ^\'.*\'\ \<.*\>$ ]]; then
+ # the name includes single quotes
+ sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)"
+ sender_name="$(echo "${EMAIL_SENDER}" | cut -d "'" -f 2)"
+ elif [[ ${EMAIL_SENDER} =~ ^.*\ \<.*\>$ ]]; then
+ # the name does not have any quotes
+ sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)"
+ sender_name="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)"
+ fi
+ fi
+
+ [ -n "${sender_email}" ] && opts+=(-f "${sender_email}")
+ [ -n "${sender_name}" ] && opts+=(-F "${sender_name}")
+
+ if [ "${debug}" = "1" ]; then
+ echo >&2 "--- BEGIN sendmail command ---"
+ printf >&2 "%q " "${sendmail}" -t "${opts[@]}"
+ echo >&2
+ echo >&2 "--- END sendmail command ---"
+ fi
+
+ "${sendmail}" -t "${opts[@]}"
+ ret=$?
+
+ if [ ${ret} -eq 0 ]; then
+ info "sent email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}'"
+ return 0
+ else
+ error "failed to send email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}' with error code ${ret}."
+ return 1
+ fi
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# pushover sender
+
+send_pushover() {
+ local apptoken="${1}" usertokens="${2}" when="${3}" url="${4}" status="${5}" title="${6}" message="${7}" httpcode sent=0 user priority
+
+ if [ "${SEND_PUSHOVER}" = "YES" ] && [ -n "${apptoken}" ] && [ -n "${usertokens}" ] && [ -n "${title}" ] && [ -n "${message}" ]; then
+
+ # https://pushover.net/api
+ priority=-2
+ case "${status}" in
+ CLEAR) priority=-1 ;; # low priority: no sound or vibration
+ WARNING) priority=0 ;; # normal priority: respect quiet hours
+ CRITICAL) priority=1 ;; # high priority: bypass quiet hours
+ *) priority=-2 ;; # lowest priority: no notification at all
+ esac
+
+ for user in ${usertokens}; do
+ httpcode=$(docurl \
+ --form-string "token=${apptoken}" \
+ --form-string "user=${user}" \
+ --form-string "html=1" \
+ --form-string "title=${title}" \
+ --form-string "message=${message}" \
+ --form-string "timestamp=${when}" \
+ --form-string "url=${url}" \
+ --form-string "url_title=Open netdata dashboard to view the alarm" \
+ --form-string "priority=${priority}" \
+ https://api.pushover.net/1/messages.json)
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# pushbullet sender
+
+send_pushbullet() {
+ local userapikey="${1}" source_device="${2}" recipients="${3}" url="${4}" title="${5}" message="${6}" httpcode sent=0 user
+ if [ "${SEND_PUSHBULLET}" = "YES" ] && [ -n "${userapikey}" ] && [ -n "${recipients}" ] && [ -n "${message}" ] && [ -n "${title}" ]; then
+ #https://docs.pushbullet.com/#create-push
+ for user in ${recipients}; do
+ httpcode=$(docurl \
+ --header 'Access-Token: '${userapikey}'' \
+ --header 'Content-Type: application/json' \
+ --data-binary @<(
+ cat <<EOF
+ {"title": "${title}",
+ "type": "link",
+ "email": "${user}",
+ "body": "$(echo -n ${message})",
+ "url": "${url}",
+ "source_device_iden": "${source_device}"}
+EOF
+ ) "https://api.pushbullet.com/v2/pushes" -X POST)
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent pushbullet notification for: ${host} ${chart}.${name} is ${status} to '${user}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send pushbullet notification for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# kafka sender
+
+send_kafka() {
+ local httpcode sent=0
+ if [ "${SEND_KAFKA}" = "YES" ]; then
+ httpcode=$(docurl -X POST \
+ --data "{host_ip:\"${KAFKA_SENDER_IP}\",when:${when},name:\"${name}\",chart:\"${chart}\",family:\"${family}\",status:\"${status}\",old_status:\"${old_status}\",value:${value},old_value:${old_value},duration:${duration},non_clear_duration:${non_clear_duration},units:\"${units}\",info:\"${info}\"}" \
+ "${KAFKA_URL}")
+
+ if [ "${httpcode}" = "204" ]; then
+ info "sent kafka data for: ${host} ${chart}.${name} is ${status} and ip '${KAFKA_SENDER_IP}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send kafka data for: ${host} ${chart}.${name} is ${status} and ip '${KAFKA_SENDER_IP}' with HTTP error code ${httpcode}."
+ fi
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# pagerduty.com sender
+
+send_pd() {
+ local recipients="${1}" sent=0
+ unset t
+ case ${status} in
+ CLEAR) t='resolve' ;;
+ WARNING) t='trigger' ;;
+ CRITICAL) t='trigger' ;;
+ esac
+
+ if [ ${SEND_PD} = "YES" ] && [ -n "${t}" ]; then
+ for PD_SERVICE_KEY in ${recipients}; do
+ d="${status} ${name} = ${value_string} - ${host}, ${family}"
+ payload="$(
+ cat <<EOF
+ {
+ "service_key": "${PD_SERVICE_KEY}",
+ "event_type": "${t}",
+ "incident_key" : "${alarm_id}",
+ "description": "${d}",
+ "details": {
+ "value_w_units": "${value_string}",
+ "when": "${when}",
+ "duration" : "${duration}",
+ "roles": "${roles}",
+ "alarm_id" : "${alarm_id}",
+ "name" : "${name}",
+ "chart" : "${chart}",
+ "family" : "${family}",
+ "status" : "${status}",
+ "old_status" : "${old_status}",
+ "value" : "${value}",
+ "old_value" : "${old_value}",
+ "src" : "${src}",
+ "non_clear_duration" : "${non_clear_duration}",
+ "units" : "${units}",
+ "info" : "${info}"
+ }
+ }
+EOF
+ )"
+ httpcode=$(docurl -X POST --data "${payload}" "https://events.pagerduty.com/generic/2010-04-15/create_event.json")
+ if [ "${httpcode}" = "200" ]; then
+ info "sent pagerduty notification for: ${host} ${chart}.${name} is ${status}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send pagerduty notification for: ${host} ${chart}.${name} is ${status}, with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# twilio sender
+
+send_twilio() {
+ local accountsid="${1}" accounttoken="${2}" twilionumber="${3}" recipients="${4}" title="${5}" message="${6}" httpcode sent=0 user
+ if [ "${SEND_TWILIO}" = "YES" ] && [ -n "${accountsid}" ] && [ -n "${accounttoken}" ] && [ -n "${twilionumber}" ] && [ -n "${recipients}" ] && [ -n "${message}" ] && [ -n "${title}" ]; then
+ #https://www.twilio.com/packages/labs/code/bash/twilio-sms
+ for user in ${recipients}; do
+ httpcode=$(docurl -X POST \
+ --data-urlencode "From=${twilionumber}" \
+ --data-urlencode "To=${user}" \
+ --data-urlencode "Body=${title} ${message}" \
+ -u "${accountsid}:${accounttoken}" \
+ "https://api.twilio.com/2010-04-01/Accounts/${accountsid}/Messages.json")
+
+ if [ "${httpcode}" = "201" ]; then
+ info "sent Twilio SMS for: ${host} ${chart}.${name} is ${status} to '${user}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send Twilio SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# hipchat sender
+
+send_hipchat() {
+ local authtoken="${1}" recipients="${2}" message="${3}" httpcode sent=0 room color msg_format notify
+
+ # remove <small></small> from the message
+ message="${message//<small>/}"
+ message="${message//<\/small>/}"
+
+ if [ "${SEND_HIPCHAT}" = "YES" ] && [ -n "${HIPCHAT_SERVER}" ] && [ -n "${authtoken}" ] && [ -n "${recipients}" ] && [ -n "${message}" ]; then
+ # Valid values: html, text.
+ # Defaults to 'html'.
+ msg_format="html"
+
+ # Background color for message. Valid values: yellow, green, red, purple, gray, random. Defaults to 'yellow'.
+ case "${status}" in
+ WARNING) color="yellow" ;;
+ CRITICAL) color="red" ;;
+ CLEAR) color="green" ;;
+ *) color="gray" ;;
+ esac
+
+ # Whether this message should trigger a user notification (change the tab color, play a sound, notify mobile phones, etc).
+ # Each recipient's notification preferences are taken into account.
+ # Defaults to false.
+ notify="true"
+
+ for room in ${recipients}; do
+ httpcode=$(docurl -X POST \
+ -H "Content-type: application/json" \
+ -H "Authorization: Bearer ${authtoken}" \
+ -d "{\"color\": \"${color}\", \"from\": \"${host}\", \"message_format\": \"${msg_format}\", \"message\": \"${message}\", \"notify\": \"${notify}\"}" \
+ "https://${HIPCHAT_SERVER}/v2/room/${room}/notification")
+
+ if [ "${httpcode}" = "204" ]; then
+ info "sent HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}' with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# messagebird sender
+
+send_messagebird() {
+ local accesskey="${1}" messagebirdnumber="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user
+ if [ "${SEND_MESSAGEBIRD}" = "YES" ] && [ -n "${accesskey}" ] && [ -n "${messagebirdnumber}" ] && [ -n "${recipients}" ] && [ -n "${message}" ] && [ -n "${title}" ]; then
+ #https://developers.messagebird.com/docs/messaging
+ for user in ${recipients}; do
+ httpcode=$(docurl -X POST \
+ --data-urlencode "originator=${messagebirdnumber}" \
+ --data-urlencode "recipients=${user}" \
+ --data-urlencode "body=${title} ${message}" \
+ --data-urlencode "datacoding=auto" \
+ -H "Authorization: AccessKey ${accesskey}" \
+ "https://rest.messagebird.com/messages")
+
+ if [ "${httpcode}" = "201" ]; then
+ info "sent Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# kavenegar sender
+
+send_kavenegar() {
+ local API_KEY="${1}" kavenegarsender="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user
+ if [ "${SEND_KAVENEGAR}" = "YES" ] && [ -n "${API_KEY}" ] && [ -n "${kavenegarsender}" ] && [ -n "${recipients}" ] && [ -n "${message}" ] && [ -n "${title}" ]; then
+ # http://api.kavenegar.com/v1/{API-KEY}/sms/send.json
+ for user in ${recipients}; do
+ httpcode=$(docurl -X POST http://api.kavenegar.com/v1/${API_KEY}/sms/send.json \
+ --data-urlencode "sender=${kavenegarsender}" \
+ --data-urlencode "receptor=${user}" \
+ --data-urlencode "message=${title} ${message}")
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# telegram sender
+
+send_telegram() {
+ local bottoken="${1}" chatids="${2}" message="${3}" httpcode sent=0 chatid emoji disableNotification=""
+
+ if [ "${status}" = "CLEAR" ]; then disableNotification="--data-urlencode disable_notification=true"; fi
+
+ case "${status}" in
+ WARNING) emoji="⚠️" ;;
+ CRITICAL) emoji="🔴" ;;
+ CLEAR) emoji="✅" ;;
+ *) emoji="⚪️" ;;
+ esac
+
+ if [ "${SEND_TELEGRAM}" = "YES" ] && [ -n "${bottoken}" ] && [ -n "${chatids}" ] && [ -n "${message}" ]; then
+ for chatid in ${chatids}; do
+ # https://core.telegram.org/bots/api#sendmessage
+ httpcode=$(docurl ${disableNotification} \
+ --data-urlencode "parse_mode=HTML" \
+ --data-urlencode "disable_web_page_preview=true" \
+ --data-urlencode "text=${emoji} ${message}" \
+ "https://api.telegram.org/bot${bottoken}/sendMessage?chat_id=${chatid}")
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}'"
+ sent=$((sent + 1))
+ elif [ "${httpcode}" = "401" ]; then
+ error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}': Wrong bot token."
+ else
+ error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}' with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# Microsoft Team sender
+
+send_msteam() {
+
+ local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload
+
+ [ "${SEND_MSTEAM}" != "YES" ] && return 1
+
+ case "${status}" in
+ WARNING) icon="${MSTEAM_ICON_WARNING}" && color="${MSTEAM_COLOR_WARNING}" ;;
+ CRITICAL) icon="${MSTEAM_ICON_CRITICAL}" && color="${MSTEAM_COLOR_CRITICAL}" ;;
+ CLEAR) icon="${MSTEAM_ICON_CLEAR}" && color="${MSTEAM_COLOR_CLEAR}" ;;
+ *) icon="${MSTEAM_ICON_DEFAULT}" && color="${MSTEAM_COLOR_DEFAULT}" ;;
+ esac
+
+ for channel in ${channels}; do
+ ## More details are available here regarding the payload syntax options : https://docs.microsoft.com/en-us/outlook/actionable-messages/message-card-reference
+ ## Online designer : https://acdesignerbeta.azurewebsites.net/
+ payload="$(
+ cat <<EOF
+ {
+ "@context": "http://schema.org/extensions",
+ "@type": "MessageCard",
+ "themeColor": "${color}",
+ "title": "$icon Alert ${status} from netdata for ${host}",
+ "text": "${host} ${status_message}, ${chart} (_${family}_), *${alarm}*",
+ "potentialAction": [
+ {
+ "@type": "OpenUri",
+ "name": "Netdata",
+ "targets": [
+ { "os": "default", "uri": "${goto_url}" }
+ ]
+ }
+ ]
+ }
+EOF
+ )"
+
+ # Replacing in the webhook CHANNEL string by the MS Teams channel name from conf file.
+ webhook="${webhook//CHANNEL/${channel}}"
+
+ httpcode=$(docurl -H "Content-Type: application/json" -d "${payload}" "${webhook}")
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${webhook}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${webhook}', with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# slack sender
+
+send_slack() {
+ local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload
+
+ [ "${SEND_SLACK}" != "YES" ] && return 1
+
+ case "${status}" in
+ WARNING) color="warning" ;;
+ CRITICAL) color="danger" ;;
+ CLEAR) color="good" ;;
+ *) color="#777777" ;;
+ esac
+
+ for channel in ${channels}; do
+ # Default entry in the recipient is without a hash in front (backwards-compatible). Accept specification of channel or user.
+ if [ "${channel::1}" != "#" ] && [ "${channel::1}" != "@" ]; then channel="#$channel"; fi
+
+ # If channel is equal to "#" then do not send the channel attribute at all. Slack also defines channels and users in webhooks.
+ if [ "${channel}" = "#" ]; then
+ ch=""
+ chstr="without specifying a channel"
+ else
+ ch="\"channel\": \"${channel}\","
+ chstr="to '${channel}'"
+ fi
+
+ payload="$(
+ cat <<EOF
+ {
+ $ch
+ "username": "netdata on ${host}",
+ "icon_url": "${images_base_url}/images/banner-icon-144x144.png",
+ "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*",
+ "attachments": [
+ {
+ "fallback": "${alarm} - ${chart} (${family}) - ${info}",
+ "color": "${color}",
+ "title": "${alarm}",
+ "title_link": "${goto_url}",
+ "text": "${info}",
+ "fields": [
+ {
+ "title": "${chart}",
+ "short": true
+ },
+ {
+ "title": "${family}",
+ "short": true
+ }
+ ],
+ "thumb_url": "${image}",
+ "footer": "by ${host}",
+ "ts": ${when}
+ }
+ ]
+ }
+EOF
+ )"
+
+ httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}")
+ if [ "${httpcode}" = "200" ]; then
+ info "sent slack notification for: ${host} ${chart}.${name} is ${status} ${chstr}"
+ sent=$((sent + 1))
+ else
+ error "failed to send slack notification for: ${host} ${chart}.${name} is ${status} ${chstr}, with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# rocketchat sender
+
+send_rocketchat() {
+ local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload
+
+ [ "${SEND_ROCKETCHAT}" != "YES" ] && return 1
+
+ case "${status}" in
+ WARNING) color="warning" ;;
+ CRITICAL) color="danger" ;;
+ CLEAR) color="good" ;;
+ *) color="#777777" ;;
+ esac
+
+ for channel in ${channels}; do
+ payload="$(
+ cat <<EOF
+ {
+ "channel": "#${channel}",
+ "alias": "netdata on ${host}",
+ "avatar": "${images_base_url}/images/banner-icon-144x144.png",
+ "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*",
+ "attachments": [
+ {
+ "color": "${color}",
+ "title": "${alarm}",
+ "title_link": "${goto_url}",
+ "text": "${info}",
+ "fields": [
+ {
+ "title": "${chart}",
+ "short": true,
+ "value": "chart"
+ },
+ {
+ "title": "${family}",
+ "short": true,
+ "value": "family"
+ }
+ ],
+ "thumb_url": "${image}",
+ "ts": "${when}"
+ }
+ ]
+ }
+EOF
+ )"
+
+ httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}")
+ if [ "${httpcode}" = "200" ]; then
+ info "sent rocketchat notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send rocketchat notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# alerta sender
+
+send_alerta() {
+ local webhook="${1}" channels="${2}" httpcode sent=0 channel severity resource event payload auth
+
+ [ "${SEND_ALERTA}" != "YES" ] && return 1
+
+ case "${status}" in
+ CRITICAL) severity="critical" ;;
+ WARNING) severity="warning" ;;
+ CLEAR) severity="cleared" ;;
+ *) severity="indeterminate" ;;
+ esac
+
+ if [[ ${chart} == httpcheck* ]]; then
+ resource=$chart
+ event=$name
+ else
+ resource="${host}:${family}"
+ event="${chart}.${name}"
+ fi
+
+ for channel in ${channels}; do
+ payload="$(
+ cat <<EOF
+ {
+ "resource": "${resource}",
+ "event": "${event}",
+ "environment": "${channel}",
+ "severity": "${severity}",
+ "service": ["Netdata"],
+ "group": "Performance",
+ "value": "${value_string}",
+ "text": "${info}",
+ "tags": ["alarm_id:${alarm_id}"],
+ "attributes": {
+ "roles": "${roles}",
+ "name": "${name}",
+ "chart": "${chart}",
+ "family": "${family}",
+ "source": "${src}",
+ "moreInfo": "<a href=\"${goto_url}\">View Netdata</a>"
+ },
+ "origin": "netdata/${host}",
+ "type": "netdataAlarm",
+ "rawData": "${BASH_ARGV[@]}"
+ }
+EOF
+ )"
+
+ if [ -n "${ALERTA_API_KEY}" ]; then
+ auth="Key ${ALERTA_API_KEY}"
+ fi
+
+ httpcode=$(docurl -X POST "${webhook}/alert" -H "Content-Type: application/json" -H "Authorization: $auth" --data "${payload}")
+
+ if [ "${httpcode}" = "200" ] || [ "${httpcode}" = "201" ]; then
+ info "sent alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
+ sent=$((sent + 1))
+ elif [ "${httpcode}" = "202" ]; then
+ info "suppressed alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
+ else
+ error "failed to send alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# flock sender
+
+send_flock() {
+ local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload
+
+ [ "${SEND_FLOCK}" != "YES" ] && return 1
+
+ case "${status}" in
+ WARNING) color="warning" ;;
+ CRITICAL) color="danger" ;;
+ CLEAR) color="good" ;;
+ *) color="#777777" ;;
+ esac
+
+ for channel in ${channels}; do
+ httpcode=$(docurl -X POST "${webhook}" -H "Content-Type: application/json" -d "{
+ \"sendAs\": {
+ \"name\" : \"netdata on ${host}\",
+ \"profileImage\" : \"${images_base_url}/images/banner-icon-144x144.png\"
+ },
+ \"text\": \"${host} *${status_message}*\",
+ \"timestamp\": \"${when}\",
+ \"attachments\": [
+ {
+ \"description\": \"${chart} (${family}) - ${info}\",
+ \"color\": \"${color}\",
+ \"title\": \"${alarm}\",
+ \"url\": \"${goto_url}\",
+ \"text\": \"${info}\",
+ \"views\": {
+ \"image\": {
+ \"original\": { \"src\": \"${image}\", \"width\": 400, \"height\": 400 },
+ \"thumbnail\": { \"src\": \"${image}\", \"width\": 50, \"height\": 50 },
+ \"filename\": \"${image}\"
+ }
+ }
+ }
+ ]
+ }")
+ if [ "${httpcode}" = "200" ]; then
+ info "sent flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# discord sender
+
+send_discord() {
+ local webhook="${1}/slack" channels="${2}" httpcode sent=0 channel color payload username
+
+ [ "${SEND_DISCORD}" != "YES" ] && return 1
+
+ case "${status}" in
+ WARNING) color="warning" ;;
+ CRITICAL) color="danger" ;;
+ CLEAR) color="good" ;;
+ *) color="#777777" ;;
+ esac
+
+ for channel in ${channels}; do
+ username="netdata on ${host}"
+ [ ${#username} -gt 32 ] && username="${username:0:29}..."
+
+ payload="$(
+ cat <<EOF
+ {
+ "channel": "#${channel}",
+ "username": "${username}",
+ "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*",
+ "icon_url": "${images_base_url}/images/banner-icon-144x144.png",
+ "attachments": [
+ {
+ "color": "${color}",
+ "title": "${alarm}",
+ "title_link": "${goto_url}",
+ "text": "${info}",
+ "fields": [
+ {
+ "title": "${chart}",
+ "value": "${family}"
+ }
+ ],
+ "thumb_url": "${image}",
+ "footer_icon": "${images_base_url}/images/banner-icon-144x144.png",
+ "footer": "${host}",
+ "ts": ${when}
+ }
+ ]
+ }
+EOF
+ )"
+
+ httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}")
+ if [ "${httpcode}" = "200" ]; then
+ info "sent discord notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send discord notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# fleep sender
+
+send_fleep() {
+ local httpcode sent=0 webhooks="${1}" data message
+ if [ "${SEND_FLEEP}" = "YES" ]; then
+ message="${host} ${status_message}, \`${chart}\` (${family}), *${alarm}*\\n${info}"
+
+ for hook in ${webhooks}; do
+ data="{ "
+ data="${data} 'message': '${message}', "
+ data="${data} 'user': '${FLEEP_SENDER}' "
+ data="${data} }"
+
+ httpcode=$(docurl -X POST --data "${data}" "https://fleep.io/hook/${hook}")
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent fleep data for: ${host} ${chart}.${name} is ${status} and user '${FLEEP_SENDER}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send fleep data for: ${host} ${chart}.${name} is ${status} and user '${FLEEP_SENDER}' with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# Prowl sender
+
+send_prowl() {
+ local httpcode sent=0 data message keys prio=0 alarm_url event
+ if [ "${SEND_PROWL}" = "YES" ]; then
+ message="$(urlencode "${host} ${status_message}, \`${chart}\` (${family}), *${alarm}*\\n${info}")"
+ message="description=${message}"
+ keys="$(urlencode "$(echo "${1}" | tr ' ' ,)")"
+ keys="apikey=${keys}"
+ app="application=Netdata"
+
+ case "${status}" in
+ CRITICAL)
+ prio=2
+ ;;
+ WARNING)
+ prio=1
+ ;;
+ esac
+ prio="priority=${prio}"
+
+ alarm_url="$(urlencode ${goto_url})"
+ alarm_url="url=${alarm_url}"
+ event="$(urlencode "${host} ${status_message}")"
+ event="event=${event}"
+
+ data="${keys}&${prio}&${alarm_url}&${app}&${event}&${message}"
+
+ httpcode=$(docurl -X POST --data "${data}" "https://api.prowlapp.com/publicapi/add")
+
+ if [ "${httpcode}" = "200" ]; then
+ info "sent prowl data for: ${host} ${chart}.${name} is ${status}"
+ sent=1
+ else
+ error "failed to send prowl data for: ${host} ${chart}.${name} is ${status} with with error code ${httpcode}."
+ fi
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# irc sender
+
+send_irc() {
+ local NICKNAME="${1}" REALNAME="${2}" CHANNELS="${3}" NETWORK="${4}" SERVERNAME="${5}" MESSAGE="${6}" sent=0 channel color send_alarm reply_codes error
+
+ if [ "${SEND_IRC}" = "YES" ] && [ -n "${NICKNAME}" ] && [ -n "${REALNAME}" ] && [ -n "${CHANNELS}" ] && [ -n "${NETWORK}" ] && [ -n "${SERVERNAME}" ]; then
+ case "${status}" in
+ WARNING) color="warning" ;;
+ CRITICAL) color="danger" ;;
+ CLEAR) color="good" ;;
+ *) color="#777777" ;;
+ esac
+
+ for CHANNEL in ${CHANNELS}; do
+ error=0
+ send_alarm=$(echo -e "USER ${NICKNAME} guest ${REALNAME} ${SERVERNAME}\\nNICK ${NICKNAME}\\nJOIN ${CHANNEL}\\nPRIVMSG ${CHANNEL} :${MESSAGE}\\nQUIT\\n" \ | nc "${NETWORK}" 6667)
+ reply_codes=$(echo "${send_alarm}" | cut -d ' ' -f 2 | grep -o '[0-9]*')
+ for code in ${reply_codes}; do
+ if [ "${code}" -ge 400 ] && [ "${code}" -le 599 ]; then
+ error=1
+ break
+ fi
+ done
+
+ if [ "${error}" -eq 0 ]; then
+ info "sent irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}', with error code ${code}."
+ fi
+ done
+ fi
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# Amazon SNS sender
+
+send_awssns() {
+ local targets="${1}" message='' sent=0 region=''
+ local default_format="${status} on ${host} at ${date}: ${chart} ${value_string}"
+
+ [ "${SEND_AWSSNS}" = "YES" ] || return 1
+
+ message=${AWSSNS_MESSAGE_FORMAT:-${default_format}}
+
+ for target in ${targets}; do
+ # Extract the region from the target ARN. We need to explicitly specify the region so that it matches up correctly.
+ region="$(echo ${target} | cut -f 4 -d ':')"
+ if ${aws} sns publish --region "${region}" --subject "${host} ${status_message} - ${name//_/ } - ${chart}" --message "${message}" --target-arn ${target} &>/dev/null; then
+ info "sent Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'"
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# syslog sender
+
+send_syslog() {
+ local facility=${SYSLOG_FACILITY:-"local6"} level='info' targets="${1}"
+ local priority='' message='' host='' port='' prefix=''
+ local temp1='' temp2=''
+
+ [ "${SEND_SYSLOG}" = "YES" ] || return 1
+
+ if [ "${status}" = "CRITICAL" ]; then
+ level='crit'
+ elif [ "${status}" = "WARNING" ]; then
+ level='warning'
+ fi
+
+ for target in ${targets}; do
+ priority="${facility}.${level}"
+ message=''
+ host=''
+ port=''
+ prefix=''
+ temp1=''
+ temp2=''
+
+ prefix=$(echo ${target} | cut -d '/' -f 2)
+ temp1=$(echo ${target} | cut -d '/' -f 1)
+
+ if [ ${prefix} != ${temp1} ]; then
+ if (echo ${temp1} | grep -q '@'); then
+ temp2=$(echo ${temp1} | cut -d '@' -f 1)
+ host=$(echo ${temp1} | cut -d '@' -f 2)
+
+ if [ ${temp2} != ${host} ]; then
+ priority=${temp2}
+ fi
+
+ port=$(echo ${host} | rev | cut -d ':' -f 1 | rev)
+
+ if (echo ${host} | grep -E -q '\[.*\]'); then
+ if (echo ${port} | grep -q ']'); then
+ port=''
+ else
+ host=$(echo ${host} | rev | cut -d ':' -f 2- | rev)
+ fi
+ else
+ if [ ${port} = ${host} ]; then
+ port=''
+ else
+ host=$(echo ${host} | cut -d ':' -f 1)
+ fi
+ fi
+ else
+ priority=${temp1}
+ fi
+ fi
+
+ message="${prefix} ${status} on ${host} at ${date}: ${chart} ${value_string}"
+
+ if [ ${host} ]; then
+ logger_options="${logger_options} -n ${host}"
+ if [ ${port} ]; then
+ logger_options="${logger_options} -P ${port}"
+ fi
+ fi
+
+ ${logger} -p ${priority} ${logger_options} "${message}"
+ done
+
+ return $?
+}
+
+# -----------------------------------------------------------------------------
+# SMS sender
+
+send_sms() {
+ local recipients="${1}" errcode errmessage sent=0
+
+ # Human readable SMS
+ local msg="${host} ${status_message}: ${chart} (${family}), ${alarm}"
+
+ # limit it to 160 characters
+ msg="${msg:0:160}"
+
+ if [ "${SEND_SMS}" = "YES" ] && [ -n "${sendsms}" ] && [ -n "${recipients}" ] && [ -n "${msg}" ]; then
+ # http://api.kavenegar.com/v1/{API-KEY}/sms/send.json
+ for phone in ${recipients}; do
+ errmessage=$($sendsms $phone "$msg" 2>&1)
+ errcode=$?
+ if [ ${errcode} -eq 0 ]; then
+ info "sent smstools3 SMS for: ${host} ${chart}.${name} is ${status} to '${user}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send smstools3 SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with error code ${errcode}: ${errmessage}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# prepare the content of the notification
+
+# the url to send the user on click
+urlencode "${args_host}" >/dev/null
+url_host="${REPLY}"
+urlencode "${chart}" >/dev/null
+url_chart="${REPLY}"
+urlencode "${family}" >/dev/null
+url_family="${REPLY}"
+urlencode "${name}" >/dev/null
+url_name="${REPLY}"
+
+redirect_params="host=${url_host}&chart=${url_chart}&family=${url_family}&alarm=${url_name}&alarm_unique_id=${unique_id}&alarm_id=${alarm_id}&alarm_event_id=${event_id}"
+GOTOCLOUD=0
+
+if [ "${NETDATA_REGISTRY_URL}" == "https://registry.my-netdata.io" ]; then
+ if [ -z "${NETDATA_REGISTRY_UNIQUE_ID}" ]; then
+ if [ -f "/var/lib/netdata/registry/netdata.public.unique.id" ]; then
+ NETDATA_REGISTRY_UNIQUE_ID="$(cat "/var/lib/netdata/registry/netdata.public.unique.id")"
+ fi
+ fi
+ if [ -n "${NETDATA_REGISTRY_UNIQUE_ID}" ]; then
+ GOTOCLOUD=1
+ fi
+fi
+
+if [ ${GOTOCLOUD} -eq 0 ]; then
+ goto_url="${NETDATA_REGISTRY_URL}/goto-host-from-alarm.html?${redirect_params}"
+else
+ goto_url="${NETDATA_REGISTRY_CLOUD_BASE_URL}/alarms/redirect?agentID=${NETDATA_REGISTRY_UNIQUE_ID}&${redirect_params}"
+fi
+
+# the severity of the alarm
+severity="${status}"
+
+# the time the alarm was raised
+duration4human ${duration} >/dev/null
+duration_txt="${REPLY}"
+duration4human ${non_clear_duration} >/dev/null
+non_clear_duration_txt="${REPLY}"
+raised_for="(was ${old_status,,} for ${duration_txt})"
+
+# the key status message
+status_message="status unknown"
+
+# the color of the alarm
+color="grey"
+
+# the alarm value
+alarm="${name//_/ } = ${value_string}"
+
+# the image of the alarm
+image="${images_base_url}/images/banner-icon-144x144.png"
+
+# prepare the title based on status
+case "${status}" in
+CRITICAL)
+ image="${images_base_url}/images/alert-128-red.png"
+ status_message="is critical"
+ color="#ca414b"
+ ;;
+
+WARNING)
+ image="${images_base_url}/images/alert-128-orange.png"
+ status_message="needs attention"
+ color="#ffc107"
+ ;;
+
+CLEAR)
+ image="${images_base_url}/images/check-mark-2-128-green.png"
+ status_message="recovered"
+ color="#77ca6d"
+ ;;
+esac
+
+if [ "${status}" = "CLEAR" ]; then
+ severity="Recovered from ${old_status}"
+ if [ ${non_clear_duration} -gt ${duration} ]; then
+ raised_for="(alarm was raised for ${non_clear_duration_txt})"
+ fi
+
+ # don't show the value when the status is CLEAR
+ # for certain alarms, this value might not have any meaning
+ alarm="${name//_/ } ${raised_for}"
+
+elif { [ "${old_status}" = "WARNING" ] && [ "${status}" = "CRITICAL" ]; }; then
+ severity="Escalated to ${status}"
+ if [ ${non_clear_duration} -gt ${duration} ]; then
+ raised_for="(alarm is raised for ${non_clear_duration_txt})"
+ fi
+
+elif { [ "${old_status}" = "CRITICAL" ] && [ "${status}" = "WARNING" ]; }; then
+ severity="Demoted to ${status}"
+ if [ ${non_clear_duration} -gt ${duration} ]; then
+ raised_for="(alarm is raised for ${non_clear_duration_txt})"
+ fi
+
+else
+ raised_for=
+fi
+
+# prepare HTML versions of elements
+info_html=
+[ -n "${info}" ] && info_html=" <small><br/>${info}</small>"
+
+raised_for_html=
+[ -n "${raised_for}" ] && raised_for_html="<br/><small>${raised_for}</small>"
+
+# -----------------------------------------------------------------------------
+# send the slack notification
+
+# slack aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_slack "${SLACK_WEBHOOK_URL}" "${to_slack}"
+SENT_SLACK=$?
+
+# -----------------------------------------------------------------------------
+# send the Microsoft notification
+
+# Microsoft team aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_msteam "${MSTEAM_WEBHOOK_URL}" "${to_msteam}"
+SENT_MSTEAM=$?
+
+# -----------------------------------------------------------------------------
+# send the rocketchat notification
+
+# rocketchat aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_rocketchat "${ROCKETCHAT_WEBHOOK_URL}" "${to_rocketchat}"
+SENT_ROCKETCHAT=$?
+
+# -----------------------------------------------------------------------------
+# send the alerta notification
+
+# alerta aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_alerta "${ALERTA_WEBHOOK_URL}" "${to_alerta}"
+SENT_ALERTA=$?
+
+# -----------------------------------------------------------------------------
+# send the flock notification
+
+# flock aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_flock "${FLOCK_WEBHOOK_URL}" "${to_flock}"
+SENT_FLOCK=$?
+
+# -----------------------------------------------------------------------------
+# send the discord notification
+
+# discord aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_discord "${DISCORD_WEBHOOK_URL}" "${to_discord}"
+SENT_DISCORD=$?
+
+# -----------------------------------------------------------------------------
+# send the pushover notification
+
+send_pushover "${PUSHOVER_APP_TOKEN}" "${to_pushover}" "${when}" "${goto_url}" "${status}" "${host} ${status_message} - ${name//_/ } - ${chart}" "
+<font color=\"${color}\"><b>${alarm}</b></font>${info_html}<br/>&nbsp;
+<small><b>${chart}</b><br/>Chart<br/>&nbsp;</small>
+<small><b>${family}</b><br/>Family<br/>&nbsp;</small>
+<small><b>${severity}</b><br/>Severity<br/>&nbsp;</small>
+<small><b>${date}${raised_for_html}</b><br/>Time<br/>&nbsp;</small>
+<a href=\"${goto_url}\">View Netdata</a><br/>&nbsp;
+<small><small>The source of this alarm is line ${src}</small></small>
+"
+
+SENT_PUSHOVER=$?
+
+# -----------------------------------------------------------------------------
+# send the pushbullet notification
+
+send_pushbullet "${PUSHBULLET_ACCESS_TOKEN}" "${PUSHBULLET_SOURCE_DEVICE}" "${to_pushbullet}" "${goto_url}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}\\n
+Severity: ${severity}\\n
+Chart: ${chart}\\n
+Family: ${family}\\n
+${date}\\n
+The source of this alarm is line ${src}"
+
+SENT_PUSHBULLET=$?
+
+# -----------------------------------------------------------------------------
+# send the twilio SMS
+
+send_twilio "${TWILIO_ACCOUNT_SID}" "${TWILIO_ACCOUNT_TOKEN}" "${TWILIO_NUMBER}" "${to_twilio}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}
+Severity: ${severity}
+Chart: ${chart}
+Family: ${family}
+${info}"
+
+SENT_TWILIO=$?
+
+# -----------------------------------------------------------------------------
+# send the messagebird SMS
+
+send_messagebird "${MESSAGEBIRD_ACCESS_KEY}" "${MESSAGEBIRD_NUMBER}" "${to_messagebird}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}
+Severity: ${severity}
+Chart: ${chart}
+Family: ${family}
+${info}"
+
+SENT_MESSAGEBIRD=$?
+
+# -----------------------------------------------------------------------------
+# send the kavenegar SMS
+
+send_kavenegar "${KAVENEGAR_API_KEY}" "${KAVENEGAR_SENDER}" "${to_kavenegar}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}
+Severity: ${severity}
+Chart: ${chart}
+Family: ${family}
+${info}"
+
+SENT_KAVENEGAR=$?
+
+# -----------------------------------------------------------------------------
+# send the telegram.org message
+
+# https://core.telegram.org/bots/api#formatting-options
+send_telegram "${TELEGRAM_BOT_TOKEN}" "${to_telegram}" "${host} ${status_message} - <b>${name//_/ }</b>
+${chart} (${family})
+<a href=\"${goto_url}\">${alarm}</a>
+<i>${info}</i>"
+
+SENT_TELEGRAM=$?
+
+# -----------------------------------------------------------------------------
+# send the kafka message
+
+send_kafka
+SENT_KAFKA=$?
+
+# -----------------------------------------------------------------------------
+# send the pagerduty.com message
+
+send_pd "${to_pd}"
+SENT_PD=$?
+
+# -----------------------------------------------------------------------------
+# send the fleep message
+
+send_fleep "${to_fleep}"
+SENT_FLEEP=$?
+
+# -----------------------------------------------------------------------------
+# send the Prowl message
+
+send_prowl "${to_prowl}"
+SENT_PROWL=$?
+
+# -----------------------------------------------------------------------------
+# send the irc message
+
+send_irc "${IRC_NICKNAME}" "${IRC_REALNAME}" "${to_irc}" "${IRC_NETWORK}" "${host}" "${host} ${status_message} - ${name//_/ } - ${chart} ----- ${alarm}
+Severity: ${severity}
+Chart: ${chart}
+Family: ${family}
+${info}"
+
+SENT_IRC=$?
+
+# -----------------------------------------------------------------------------
+# send the SMS message with smstools3
+
+send_sms "${to_sms}"
+
+SENT_SMS=$?
+
+# -----------------------------------------------------------------------------
+# send the custom message
+
+send_custom() {
+ # is it enabled?
+ [ "${SEND_CUSTOM}" != "YES" ] && return 1
+
+ # do we have any sender?
+ [ -z "${1}" ] && return 1
+
+ # call the custom_sender function
+ custom_sender "${@}"
+}
+
+send_custom "${to_custom}"
+SENT_CUSTOM=$?
+
+# -----------------------------------------------------------------------------
+# send hipchat message
+
+send_hipchat "${HIPCHAT_AUTH_TOKEN}" "${to_hipchat}" " \
+${host} ${status_message}<br/> \
+<b>${alarm}</b> ${info_html}<br/> \
+<b>${chart}</b> (family <b>${family}</b>)<br/> \
+<b>${date}${raised_for_html}</b><br/> \
+<a href=\\\"${goto_url}\\\">View netdata dashboard</a> \
+(source of alarm ${src}) \
+"
+
+SENT_HIPCHAT=$?
+
+# -----------------------------------------------------------------------------
+# send the Amazon SNS message
+
+send_awssns "${to_awssns}"
+
+SENT_AWSSNS=$?
+
+# -----------------------------------------------------------------------------
+# send the syslog message
+
+send_syslog "${to_syslog}"
+
+SENT_SYSLOG=$?
+
+# -----------------------------------------------------------------------------
+# send the email
+
+IFS='' read -r -d '' email_plaintext_part <<EOF
+Content-Type: text/plain; encoding=${EMAIL_CHARSET}
+Content-Disposition: inline
+Content-Transfer-Encoding: 8bit
+
+${host} ${status_message}
+
+${alarm} ${info}
+${raised_for}
+
+Chart : ${chart}
+Family : ${family}
+Severity: ${severity}
+URL : ${goto_url}
+Source : ${src}
+Date : ${date}
+Notification generated on ${host}
+
+Evaluated Expression : ${calc_expression}
+Expression Variables : ${calc_param_values}
+
+The host has ${total_warnings} WARNING and ${total_critical} CRITICAL alarm(s) raised.
+EOF
+
+if [[ "${EMAIL_PLAINTEXT_ONLY}" == "YES" ]]; then
+
+send_email <<EOF
+To: ${to_email}
+Subject: ${host} ${status_message} - ${name//_/ } - ${chart}
+MIME-Version: 1.0
+Content-Type: multipart/alternative; boundary="multipart-boundary"
+${email_thread_headers}
+
+This is a MIME-encoded multipart message
+
+--multipart-boundary
+${email_plaintext_part}
+--multipart-boundary--
+EOF
+
+else
+
+IFS='' read -r -d '' email_html_part <<EOF
+Content-Type: text/html; encoding=${EMAIL_CHARSET}
+Content-Disposition: inline
+Content-Transfer-Encoding: 8bit
+
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 14px; margin: 0; padding: 0;">
+<body style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 14px; width: 100% !important; min-height: 100%; line-height: 1.6; background: #f6f6f6; margin:0; padding: 0;">
+<table>
+ <tbody>
+ <tr>
+ <td style="vertical-align: top;" valign="top"></td>
+ <td width="700" style="vertical-align: top; display: block !important; max-width: 700px !important; clear: both !important; margin: 0 auto; padding: 0;" valign="top">
+ <div style="max-width: 700px; display: block; margin: 0 auto; padding: 20px;">
+ <table width="100%" cellpadding="0" cellspacing="0" style="background: #fff; border: 1px solid #e9e9e9;">
+ <tbody>
+ <tr>
+ <td bgcolor="#eee" style="padding: 5px 20px 5px 20px; background-color: #eee;">
+ <div style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 20px; color: #777; font-weight: bold;">netdata notification</div>
+ </td>
+ </tr>
+ <tr>
+ <td bgcolor="${color}" style="font-size: 16px; vertical-align: top; font-weight: 400; text-align: center; margin: 0; padding: 10px; color: #ffffff; background: ${color} !important; border: 1px solid ${color}; border-top-color: ${color};" align="center" valign="top">
+ <h1 style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-weight: 400; margin: 0;">${host} ${status_message}</h1>
+ </td>
+ </tr>
+ <tr>
+ <td style="vertical-align: top;" valign="top">
+ <div style="margin: 0; padding: 20px; max-width: 700px;">
+ <table width="100%" cellpadding="0" cellspacing="0" style="max-width:700px">
+ <tbody>
+ <tr>
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding:0 0 20px;" align="left" valign="top">
+ <span>${chart}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Chart</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span><b>${alarm}</b>${info_html}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Alarm</span>
+ </td>
+ </tr>
+ <tr>
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span>${family}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Family</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span>${severity}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Severity</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"><span>${date}</span>
+ <span>${raised_for_html}</span> <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Time</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span>${calc_expression}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Evaluated Expression</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <span>${calc_param_values}</span>
+ <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Expression Variables</span>
+ </td>
+ </tr>
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ The host has ${total_warnings} WARNING and ${total_critical} CRITICAL alarm(s) raised.
+ </td>
+ </tr>
+
+ <tr style="margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;">
+ <a href="${goto_url}" style="font-size: 14px; color: #ffffff; text-decoration: none; line-height: 1.5; font-weight: bold; text-align: center; display: inline-block; text-transform: capitalize; background: #35568d; border-width: 1px; border-style: solid; border-color: #2b4c86; margin: 0; padding: 10px 15px;" target="_blank">View Netdata</a>
+ </td>
+ </tr>
+ <tr style="text-align: center; margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 11px; vertical-align: top; margin: 0; padding: 10px 0 0 0; color: #666666;" align="center" valign="bottom">The source of this alarm is line <code>${src}</code><br/>(alarms are configurable, edit this file to adapt the alarm to your needs)
+ </td>
+ </tr>
+ <tr style="text-align: center; margin: 0; padding: 0;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; vertical-align: top; margin:0; padding: 20px 0 0 0; color: #666666; border-top: 1px solid #f0f0f0;" align="center" valign="bottom">Sent by
+ <a href="https://mynetdata.io/" target="_blank">netdata</a>, the real-time performance and health monitoring, on <code>${host}</code>.
+ </td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
+ </td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
+ </td>
+ </tr>
+ </tbody>
+</table>
+</body>
+</html>
+EOF
+
+send_email <<EOF
+To: ${to_email}
+Subject: ${host} ${status_message} - ${name//_/ } - ${chart}
+MIME-Version: 1.0
+Content-Type: multipart/alternative; boundary="multipart-boundary"
+${email_thread_headers}
+
+This is a MIME-encoded multipart message
+
+--multipart-boundary
+${email_plaintext_part}
+--multipart-boundary
+${email_html_part}
+--multipart-boundary--
+EOF
+
+fi
+
+SENT_EMAIL=$?
+
+# -----------------------------------------------------------------------------
+# let netdata know
+for state in "${SENT_EMAIL}" \
+ "${SENT_PUSHOVER}" \
+ "${SENT_TELEGRAM}" \
+ "${SENT_SLACK}" \
+ "${SENT_ROCKETCHAT}" \
+ "${SENT_ALERTA}" \
+ "${SENT_FLOCK}" \
+ "${SENT_DISCORD}" \
+ "${SENT_TWILIO}" \
+ "${SENT_HIPCHAT}" \
+ "${SENT_MESSAGEBIRD}" \
+ "${SENT_KAVENEGAR}" \
+ "${SENT_PUSHBULLET}" \
+ "${SENT_KAFKA}" \
+ "${SENT_PD}" \
+ "${SENT_FLEEP}" \
+ "${SENT_PROWL}" \
+ "${SENT_CUSTOM}" \
+ "${SENT_IRC}" \
+ "${SENT_AWSSNS}" \
+ "${SENT_SYSLOG}" \
+ "${SENT_SMS}" \
+ "${SENT_MSTEAM}"; do
+ if [ "${state}" -eq 0 ]; then
+ # we sent something
+ exit 0
+ fi
+done
+# we did not send anything
+exit 1
diff --git a/health/notifications/alerta/README.md b/health/notifications/alerta/README.md
index 2826fe77..a34d2b79 100644
--- a/health/notifications/alerta/README.md
+++ b/health/notifications/alerta/README.md
@@ -26,7 +26,9 @@ configurations are out os scope of this tutorial but information
about different deployment scenaries can be found in the [docs][3].
[1]: https://hub.docker.com/r/alerta/alerta-web/
+
[2]: http://alerta.readthedocs.io/en/latest/gettingstarted/tutorial-1-deploy-alerta.html
+
[3]: http://docs.alerta.io/en/latest/deployment.html
## Send alarms to Alerta
@@ -42,7 +44,9 @@ Step 2. configure Netdata to send alarms to Alerta
On your system run:
- $ /etc/netdata/edit-config health_alarm_notify.conf
+```sh
+/etc/netdata/edit-config health_alarm_notify.conf
+```
and modify the file as below:
@@ -70,13 +74,15 @@ DEFAULT_RECIPIENT_ALERTA="Production"
We can test alarms using the standard approach:
- $ /opt/netdata/netdata-plugins/plugins.d/alarm-notify.sh test
+```sh
+/opt/netdata/netdata-plugins/plugins.d/alarm-notify.sh test
+```
Note: Netdata will send 3 alarms, and because last alarm is "CLEAR"
you will not see them in main Alerta page, you need to select to see
"closed" alarma in top-right lookup. A little change in `alarm-notify.sh`
that let us test each state one by one will be useful.
-For more information see [https://docs.alerta.io](https://docs.alerta.io)
+For more information see <https://docs.alerta.io>
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Falerta%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Falerta%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/awssns/README.md b/health/notifications/awssns/README.md
index 5205d4cb..ed838dac 100644
--- a/health/notifications/awssns/README.md
+++ b/health/notifications/awssns/README.md
@@ -2,23 +2,25 @@
As part of it's AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' or SNS. Amazon SNS works kind of similarly to Netdata's own notification system, allowing dispatch of a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to:
-* Email addresses.
-* Mobile Phones via SMS.
-* HTTP or HTTPS web hooks.
-* AWS Lambda functions.
-* AWS SQS queues.
-* Mobile applications via push notifications.
+- Email addresses.
+- Mobile Phones via SMS.
+- HTTP or HTTPS web hooks.
+- AWS Lambda functions.
+- AWS SQS queues.
+- Mobile applications via push notifications.
To get this working, you will need:
-* The Amazon Web Services CLI tools. Most distributions provide these with the package name `awscli`.
-* An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. Setup of this is distribution specific. `/var/lib/netdata` is the recommended directory (because the permissions will already be correct) if you are using a dedicated user (which is how most distributions work).
-* An Amazon SNS topic to send notifications to with one or more subscribers. The [Getting Started](https://docs.aws.amazon.com/sns/latest/dg/GettingStarted.html) section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.
-* While not mandatory, it is highly recommended to create a dedicated IAM user on your account for netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. If you're really paranoid, you can create one for each system or group of systems.
+- The Amazon Web Services CLI tools. Most distributions provide these with the package name `awscli`.
+- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. Setup of this is distribution specific. `/var/lib/netdata` is the recommended directory (because the permissions will already be correct) if you are using a dedicated user (which is how most distributions work).
+- An Amazon SNS topic to send notifications to with one or more subscribers. The [Getting Started](https://docs.aws.amazon.com/sns/latest/dg/GettingStarted.html) section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.
+- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. If you're really paranoid, you can create one for each system or group of systems.
-Once you have all the above, run the follwing command as the user netdata runs under:
+Once you have all the above, run the following command as the user Netdata runs under:
- aws configure
+```
+aws configure
+```
THis will prompt you for the access key and secret key for accessing Amazon SNS (as well as the default region and output format, but you can leave those blank because we don't use them).
@@ -26,8 +28,8 @@ Once that's done, you're ready to go and can specify the desired topic ARN as a
Notes:
- * Netdata's native email notification support is far better in almost all respects than it's support through Amazon SNS. If you want email notifications, use the native support, not SNS.
- * If you need to change the notification format for SNS notifications, you can do so by specifying the format in `AWSSNS_MESSAGE_FORMAT` in the configuration. This variable supports all the same vairiables you can use in custom notifications.
- * While Amazon SNS supports sending differently formatted messages for different delivery methods, netdata does not currently support this functionality.
+- Netdata's native email notification support is far better in almost all respects than it's support through Amazon SNS. If you want email notifications, use the native support, not SNS.
+ - If you need to change the notification format for SNS notifications, you can do so by specifying the format in `AWSSNS_MESSAGE_FORMAT` in the configuration. This variable supports all the same vairiables you can use in custom notifications.
+ - While Amazon SNS supports sending differently formatted messages for different delivery methods, Netdata does not currently support this functionality.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fawssns%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fawssns%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/custom/README.md b/health/notifications/custom/README.md
index eeaad8a6..18df7954 100644
--- a/health/notifications/custom/README.md
+++ b/health/notifications/custom/README.md
@@ -39,45 +39,45 @@ The following is a sample `custom_sender` function in `health_alarm_notify.conf`
Variables available to the custom_sender:
- - `${to_custom}` the list of recipients for the alarm
- - `${host}` the host generated this event
- - `${url_host}` same as `${host}` but URL encoded
- - `${unique_id}` the unique id of this event
- - `${alarm_id}` the unique id of the alarm that generated this event
- - `${event_id}` the incremental id of the event, for this alarm id
- - `${when}` the timestamp this event occurred
- - `${name}` the name of the alarm, as given in netdata health.d entries
- - `${url_name}` same as `${name}` but URL encoded
- - `${chart}` the name of the chart (type.id)
- - `${url_chart}` same as `${chart}` but URL encoded
- - `${family}` the family of the chart
- - `${url_family}` same as `${family}` but URL encoded
- - `${status}` the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
- - `${old_status}` the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
- - `${value}` the current value of the alarm
- - `${old_value}` the previous value of the alarm
- - `${src}` the line number and file the alarm has been configured
- - `${duration}` the duration in seconds of the previous alarm state
- - `${duration_txt}` same as `${duration}` for humans
- - `${non_clear_duration}` the total duration in seconds this is/was non-clear
- - `${non_clear_duration_txt}` same as `${non_clear_duration}` for humans
- - `${units}` the units of the value
- - `${info}` a short description of the alarm
- - `${value_string}` friendly value (with units)
- - `${old_value_string}` friendly old value (with units)
- - `${image}` the URL of an image to represent the status of the alarm
- - `${color}` a color in #AABBCC format for the alarm
- - `${goto_url}` the URL the user can click to see the netdata dashboard
- - `${calc_expression}` the expression evaluated to provide the value for the alarm
- - `${calc_param_values}` the value of the variables in the evaluated expression
- - `${total_warnings}` the total number of alarms in WARNING state on the host
- - `${total_critical}` the total number of alarms in CRITICAL state on the host
+- `${to_custom}` the list of recipients for the alarm
+- `${host}` the host generated this event
+- `${url_host}` same as `${host}` but URL encoded
+- `${unique_id}` the unique id of this event
+- `${alarm_id}` the unique id of the alarm that generated this event
+- `${event_id}` the incremental id of the event, for this alarm id
+- `${when}` the timestamp this event occurred
+- `${name}` the name of the alarm, as given in Netdata health.d entries
+- `${url_name}` same as `${name}` but URL encoded
+- `${chart}` the name of the chart (type.id)
+- `${url_chart}` same as `${chart}` but URL encoded
+- `${family}` the family of the chart
+- `${url_family}` same as `${family}` but URL encoded
+- `${status}` the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+- `${old_status}` the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+- `${value}` the current value of the alarm
+- `${old_value}` the previous value of the alarm
+- `${src}` the line number and file the alarm has been configured
+- `${duration}` the duration in seconds of the previous alarm state
+- `${duration_txt}` same as `${duration}` for humans
+- `${non_clear_duration}` the total duration in seconds this is/was non-clear
+- `${non_clear_duration_txt}` same as `${non_clear_duration}` for humans
+- `${units}` the units of the value
+- `${info}` a short description of the alarm
+- `${value_string}` friendly value (with units)
+- `${old_value_string}` friendly old value (with units)
+- `${image}` the URL of an image to represent the status of the alarm
+- `${color}` a color in #AABBCC format for the alarm
+- `${goto_url}` the URL the user can click to see the Netdata dashboard
+- `${calc_expression}` the expression evaluated to provide the value for the alarm
+- `${calc_param_values}` the value of the variables in the evaluated expression
+- `${total_warnings}` the total number of alarms in WARNING state on the host
+- `${total_critical}` the total number of alarms in CRITICAL state on the host
The following are more human friendly:
- - `${alarm}` like "name = value units"
- - `${status_message}` like "needs attention", "recovered", "is critical"
- - `${severity}` like "Escalated to CRITICAL", "Recovered from WARNING"
- - `${raised_for}` like "(alarm was raised for 10 minutes)"
+- `${alarm}` like "name = value units"
+- `${status_message}` like "needs attention", "recovered", "is critical"
+- `${severity}` like "Escalated to CRITICAL", "Recovered from WARNING"
+- `${raised_for}` like "(alarm was raised for 10 minutes)"
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fcustom%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fcustom%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/discord/README.md b/health/notifications/discord/README.md
index 7694fef4..88e0a970 100644
--- a/health/notifications/discord/README.md
+++ b/health/notifications/discord/README.md
@@ -6,8 +6,8 @@ This is what you will get:
You need:
-1. The **incoming webhook URL** as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your netdata servers (or you can have multiple if you like - your decision).
-2. One or more Discord channels to post the messages to.
+1. The **incoming webhook URL** as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).
+2. One or more Discord channels to post the messages to.
Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
@@ -29,7 +29,6 @@ DISCORD_WEBHOOK_URL="https://discordapp.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXX
# this discord channel (empty = do not send a notification for unconfigured
# roles):
DEFAULT_RECIPIENT_DISCORD="alarms"
-
```
You can define multiple channels like this: `alarms systems`.
@@ -43,4 +42,4 @@ role_recipients_discord[webmaster]="marketing development"
The keywords `systems`, `databases`, `marketing`, `development` are discordapp.com channels (they should already exist within your discord server).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fdiscord%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fdiscord%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/email/README.md b/health/notifications/email/README.md
index 84a9e0ce..92916d19 100644
--- a/health/notifications/email/README.md
+++ b/health/notifications/email/README.md
@@ -2,7 +2,7 @@
You need a working `sendmail` command for email alerts to work. Almost all MTAs provide a `sendmail` interface.
-netdata sends all emails as user `netdata`, so make sure your `sendmail` works for local users.
+Netdata sends all emails as user `netdata`, so make sure your `sendmail` works for local users.
email notifications look like this:
@@ -16,7 +16,7 @@ You can configure recipients in [`/etc/netdata/health_alarm_notify.conf`](https:
You can also configure per role recipients [in the same file, a few lines below](https://github.com/netdata/netdata/blob/99d44b7d0c4e006b11318a28ba4a7e7d3f9b3bae/conf.d/health_alarm_notify.conf#L313).
-Changes to this file do not require netdata restart.
+Changes to this file do not require a Netdata restart.
You can test your configuration by issuing the commands:
@@ -30,6 +30,7 @@ sudo su -s /bin/bash netdata
Where `[ROLE]` is the role you want to test. The default (if you don't give a `[ROLE]`) is `sysadmin`.
-Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`). You can always find the location of the alarm-notify.sh script in `netdata.conf`.
+Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`).
+You can always find the location of the alarm-notify.sh script in `netdata.conf`.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Femail%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Femail%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/flock/README.md b/health/notifications/flock/README.md
index 0d679ce6..658fc7b8 100644
--- a/health/notifications/flock/README.md
+++ b/health/notifications/flock/README.md
@@ -2,14 +2,14 @@
This is what you will get:
-
![Flock](https://i.imgur.com/ok9bRzw.png)
You need:
-The **incoming webhook URL** as given by flock.com. You can use the same on all your netdata servers (or you can have multiple if you like - your decision).
+The **incoming webhook URL** as given by flock.com.
+You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).
-Get them here: https://admin.flock.com/webhooks
+Get them here: <https://admin.flock.com/webhooks>
Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
@@ -21,13 +21,12 @@ Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system r
SEND_FLOCK="YES"
# Login to flock.com and create an incoming webhook.
-# You need only one for all your netdata servers.
-# Without it, netdata cannot send flock notifications.
+# You need only one for all your Netdata servers.
+# Without it, Netdata cannot send flock notifications.
FLOCK_WEBHOOK_URL="https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# if a role recipient is not configured, no notification will be sent
DEFAULT_RECIPIENT_FLOCK="alarms"
-
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fflock%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fflock%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/irc/README.md b/health/notifications/irc/README.md
index 9ea86e92..36590eb7 100644
--- a/health/notifications/irc/README.md
+++ b/health/notifications/irc/README.md
@@ -2,15 +2,15 @@
This is what you will get:
-IRCCloud web client:
+IRCCloud web client:\
![image](https://user-images.githubusercontent.com/31221999/36793487-3735673e-1ca6-11e8-8880-d1d8b6cd3bc0.png)
Irssi terminal client:
![image](https://user-images.githubusercontent.com/31221999/36793486-3713ada6-1ca6-11e8-8c12-70d956ad801e.png)
-
You need:
-1. The `nc` utility. If you do not set the path, netdata will search for it in your system `$PATH`.
+
+1. The `nc` utility. If you do not set the path, Netdata will search for it in your system `$PATH`.
Set the path for `nc` in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
@@ -22,12 +22,11 @@ Set the path for `nc` in `/etc/netdata/health_alarm_notify.conf` (to edit it on
# If empty, the system $PATH will be searched for it.
# If not found, irc notifications will be silently disabled.
nc="/usr/bin/nc"
-
```
-2. Αn `IRC_NETWORK` to which your preffered channels belong to.
-3. One or more channels ( `DEFAULT_RECIPIENT_IRC` ) to post the messages to.
-4. An `IRC_NICKNAME` and an `IRC_REALNAME` to identify in IRC.
+2. Αn `IRC_NETWORK` to which your preffered channels belong to.
+3. One or more channels ( `DEFAULT_RECIPIENT_IRC` ) to post the messages to.
+4. An `IRC_NICKNAME` and an `IRC_REALNAME` to identify in IRC.
Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
@@ -57,11 +56,10 @@ IRC_NICKNAME="netdata-alarm-user"
# The irc realname which is required in order to make the connection and is an
# extra identifier.
IRC_REALNAME="netdata-user"
-
```
-You can define multiple channels like this: `#system-alarms #networking-alarms`.
-You can also filter the notifications like this: `#system-alarms|critical`.
+You can define multiple channels like this: `#system-alarms #networking-alarms`.\
+You can also filter the notifications like this: `#system-alarms|critical`.\
You can give different channels per **role** using these (at the same file):
```
@@ -72,4 +70,4 @@ role_recipients_irc[webmaster]="#networking-alarms"
The keywords `#user-alarms`, `#networking-alarms`, `#system-alarms`, `#databases-alarms` are irc channels which belong to the specified IRC network.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Firc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Firc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/kavenegar/README.md b/health/notifications/kavenegar/README.md
index d833eef8..495b5338 100644
--- a/health/notifications/kavenegar/README.md
+++ b/health/notifications/kavenegar/README.md
@@ -8,10 +8,10 @@ Will look like this on your Android device:
You will need:
-1. Signup and Login to kavenegar.com
-2. Get your APIKEY and Sender from http://panel.kavenegar.com/client/setting/account
-3. Fill in KAVENEGAR_API_KEY="" KAVENEGAR_SENDER=""
-4. Add the recipient phone numbers to DEFAULT_RECIPIENT_KAVENEGAR=""
+1. Signup and Login to kavenegar.com
+2. Get your APIKEY and Sender from <http://panel.kavenegar.com/client/setting/account>
+3. Fill in KAVENEGAR_API_KEY="" KAVENEGAR_SENDER=""
+4. Add the recipient phone numbers to DEFAULT_RECIPIENT_KAVENEGAR=""
Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
@@ -32,10 +32,10 @@ SEND_KAVENEGAR="YES"
# copy your api key. You can generate new API Key too.
# You can find and select kevenegar sender number from this place.
-# Without an API key, netdata cannot send KAVENEGAR text messages.
+# Without an API key, Netdata cannot send KAVENEGAR text messages.
KAVENEGAR_API_KEY=""
KAVENEGAR_SENDER=""
DEFAULT_RECIPIENT_KAVENEGAR=""
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fkavenegar%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fkavenegar%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/messagebird/README.md b/health/notifications/messagebird/README.md
index cdb3e8dc..30d52a4f 100644
--- a/health/notifications/messagebird/README.md
+++ b/health/notifications/messagebird/README.md
@@ -6,12 +6,12 @@ The messagebird notifications will look like this on your Android device:
You will need:
-1. Signup and Login to messagebird.com
-2. Pick an SMS capable number after sign up to get some free credits
-3. Go to <https://www.messagebird.com/app/settings/developers/access>
-4. Create a new access key under 'API ACCESS (REST)' (you will want a live key)
-3. Fill in MESSAGEBIRD_ACCESS_KEY="XXXXXXXX" MESSAGEBIRD_NUMBER="+XXXXXXXXXXX"
-4. Add the recipient phone numbers to DEFAULT_RECIPIENT_MESSAGEBIRD="+XXXXXXXXXXX"
+1. Signup and Login to messagebird.com
+2. Pick an SMS capable number after sign up to get some free credits
+3. Go to <https://www.messagebird.com/app/settings/developers/access>
+4. Create a new access key under 'API ACCESS (REST)' (you will want a live key)
+5. Fill in MESSAGEBIRD_ACCESS_KEY="XXXXXXXX" MESSAGEBIRD_NUMBER="+XXXXXXXXXXX"
+6. Add the recipient phone numbers to DEFAULT_RECIPIENT_MESSAGEBIRD="+XXXXXXXXXXX"
Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
@@ -31,11 +31,10 @@ SEND_MESSAGEBIRD="YES"
# to get the API key, click on 'API' in the sidebar, then 'API Access (REST)'
# click 'Add access key' and fill in data (you want a live key to send SMS)
-# Without an access key, netdata cannot send Messagebird text messages.
+# Without an access key, Netdata cannot send Messagebird text messages.
MESSAGEBIRD_ACCESS_KEY="XXXXXXXX"
MESSAGEBIRD_NUMBER="XXXXXXX"
DEFAULT_RECIPIENT_MESSAGEBIRD="XXXXXXX"
-
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fmessagebird%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fmessagebird%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/pagerduty/README.md b/health/notifications/pagerduty/README.md
index 884b9792..59a48515 100644
--- a/health/notifications/pagerduty/README.md
+++ b/health/notifications/pagerduty/README.md
@@ -2,13 +2,13 @@
[PagerDuty](https://www.pagerduty.com/company/) is the enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times.
-Here is an example of a PagerDuty dashboard with netdata notifications:
+Here is an example of a PagerDuty dashboard with Netdata notifications:
-![PagerDuty dashboard with netdata notifications](https://cloud.githubusercontent.com/assets/19278582/21233877/b466a08a-c2a5-11e6-8d66-ee6eed43818f.png)
+![PagerDuty dashboard with Netdata notifications](https://cloud.githubusercontent.com/assets/19278582/21233877/b466a08a-c2a5-11e6-8d66-ee6eed43818f.png)
-To have netdata send notifications to PagerDuty, you'll first need to set up a PagerDuty `Generic API` service and install the PagerDuty agent on the host running netdata. See the following guide for details:
+To have Netdata send notifications to PagerDuty, you'll first need to set up a PagerDuty `Generic API` service and install the PagerDuty agent on the host running Netdata. See the following guide for details:
-https://www.pagerduty.com/docs/guides/agent-install-guide/
+<https://www.pagerduty.com/docs/guides/agent-install-guide/>
During the setup of the `Generic API` PagerDuty service, you'll obtain a `pagerduty service key`. Keep this **service key** handy.
@@ -34,4 +34,4 @@ SEND_PD="YES"
DEFAULT_RECIPIENT_PD="<service key>"
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fpagerduty%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fpagerduty%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/prowl/Makefile.inc b/health/notifications/prowl/Makefile.inc
deleted file mode 100644
index 08e4c2e5..00000000
--- a/health/notifications/prowl/Makefile.inc
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_noinst_DATA += \
- prowl/README.md \
- prowl/Makefile.inc \
- $(NULL)
-
diff --git a/health/notifications/prowl/README.md b/health/notifications/prowl/README.md
deleted file mode 100644
index 1f060edc..00000000
--- a/health/notifications/prowl/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# prowl
-
-(Prowl)[1] is a push notification service for iOS devices. Netdata
-supprots delivering notifications to iOS devices through Prowl.
-
-Because of how Netdata integrates with Prowl, there is a hard limit of
-at most 1000 notifications per hour (starting from the first notification
-sent). Any alerts beyond the first thousand in an hour will be dropped.
-
-Warning messages will be sent with the 'High' priority, critical messages
-will be sent with the 'Emergency' priority, and all other messages will
-be sent with the normal priority. Opening the notification's associated
-URL will take you to the Netdata dashboard of the system that issued
-the alert, directly to the chart that it triggered on.
-
-## configuration
-
-To use this, you will need a Prowl API key, which can be rquested through
-the Prowl website after registering.
-
-Once you have an API key, simply specify that as a recipient for Prowl
-notifications.
diff --git a/health/notifications/pushbullet/README.md b/health/notifications/pushbullet/README.md
index 42b343e4..f5673eca 100644
--- a/health/notifications/pushbullet/README.md
+++ b/health/notifications/pushbullet/README.md
@@ -5,16 +5,15 @@ Will look like this on your browser:
And like this on your Android device:
-
![image](https://cloud.githubusercontent.com/assets/4300670/19109635/278a1dde-8aee-11e6-9984-0bc87a13312d.png)
You will need:
-1. Signup and Login to pushbullet.com
-2. Get your Access Token, go to https://www.pushbullet.com/#settings/account and create a new one
-3. Fill in the PUSHBULLET_ACCESS_TOKEN with that value
-4. Add the recipient emails to DEFAULT_RECIPIENT_PUSHBULLET
-!!PLEASE NOTE THAT IF THE RECIPIENT DOES NOT HAVE A PUSHBULLET ACCOUNT, PUSHBULLET SERVICE WILL SEND AN EMAIL!!
+1. Signup and Login to pushbullet.com
+2. Get your Access Token, go to <https://www.pushbullet.com/#settings/account> and create a new one
+3. Fill in the PUSHBULLET_ACCESS_TOKEN with that value
+4. Add the recipient emails to DEFAULT_RECIPIENT_PUSHBULLET
+ !!PLEASE NOTE THAT IF THE RECIPIENT DOES NOT HAVE A PUSHBULLET ACCOUNT, PUSHBULLET SERVICE WILL SEND AN EMAIL!!
Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
@@ -36,9 +35,9 @@ SEND_PUSHBULLET="YES"
# not have a pushbullet account, the pushbullet service will send an email
# to that address instead
-# Without an access token, netdata cannot send pushbullet notifications.
+# Without an access token, Netdata cannot send pushbullet notifications.
PUSHBULLET_ACCESS_TOKEN="o.Sometokenhere"
DEFAULT_RECIPIENT_PUSHBULLET="admin1@example.com admin3@somemail.com"
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fpushbullet%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fpushbullet%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/pushover/README.md b/health/notifications/pushover/README.md
index 1debf5dc..2d488d1a 100644
--- a/health/notifications/pushover/README.md
+++ b/health/notifications/pushover/README.md
@@ -2,12 +2,12 @@
pushover.net allows you to receive push notifications on your mobile phone. The service seems free for up to 7.500 messages per month.
-netdata will send warning messages with priority `0` and critical messages with priority `1`. pushover.net allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours. All other notifications will be delivered silently.
+Netdata will send warning messages with priority `0` and critical messages with priority `1`. pushover.net allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours. All other notifications will be delivered silently.
You need:
-1. APP TOKEN. You can use the same on all your netdata servers.
-2. USER TOKEN for each user you are going to send notifications to. This is the actual recipient of the notification.
+1. APP TOKEN. You can use the same on all your Netdata servers.
+2. USER TOKEN for each user you are going to send notifications to. This is the actual recipient of the notification.
The configuration is like above (slack messages).
@@ -15,4 +15,4 @@ pushover.net notifications look like this:
![image](https://cloud.githubusercontent.com/assets/2662304/18407319/839c10c4-7715-11e6-92c0-12f8215128d3.png)
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fpushover%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fpushover%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/rocketchat/README.md b/health/notifications/rocketchat/README.md
index f05e73f0..47ac5e3f 100644
--- a/health/notifications/rocketchat/README.md
+++ b/health/notifications/rocketchat/README.md
@@ -4,10 +4,10 @@ This is what you will get:
![Netdata on RocketChat](https://i.imgur.com/Zu4t3j3.png)
You need:
-1. The **incoming webhook URL** as given by RocketChat. You can use the same on all your netdata servers (or you can have multiple if you like - your decision).
-2. One or more channels to post the messages to.
+1. The **incoming webhook URL** as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).
+2. One or more channels to post the messages to.
-Get them here: https://rocket.chat/docs/administrator-guides/integrations/index.html#how-to-create-a-new-incoming-webhook
+Get them here: <https://rocket.chat/docs/administrator-guides/integrations/index.html#how-to-create-a-new-incoming-webhook>
Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
@@ -22,15 +22,14 @@ Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system r
SEND_ROCKETCHAT="YES"
# Login to rocket.chat and create an incoming webhook. You need only one for all
-# your netdata servers (or you can have one for each of your netdata).
-# Without it, netdata cannot send rocketchat notifications.
+# your Netdata servers (or you can have one for each of your Netdata).
+# Without it, Netdata cannot send rocketchat notifications.
ROCKETCHAT_WEBHOOK_URL="<your_incoming_webhook_url>"
# if a role's recipients are not configured, a notification will be send to
# this rocketchat channel (empty = do not send a notification for unconfigured
# roles).
DEFAULT_RECIPIENT_ROCKETCHAT="monitoring_alarms"
-
```
You can define multiple channels like this: `alarms systems`.
@@ -45,4 +44,4 @@ role_recipients_rocketchat[webmaster]="marketing development"
The keywords `systems`, `databases`, `marketing`, `development` are RocketChat channels (they should already exist).
Both public and private channels can be used, even if they differ from the channel configured in yout RocketChat incomming webhook.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Frocketchat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Frocketchat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/slack/README.md b/health/notifications/slack/README.md
index 0616de46..2352d27c 100644
--- a/health/notifications/slack/README.md
+++ b/health/notifications/slack/README.md
@@ -5,11 +5,11 @@ This is what you will get:
You need:
-1. The **incoming webhook URL** as given by slack.com. You can use the same on all your netdata servers (or you can have multiple if you like - your decision).
-2. One or more channels to post the messages to.
+1. The **incoming webhook URL** as given by slack.com. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).
+2. One or more channels to post the messages to.
To get a webhook that works on multiple channels, you will need to login to your slack.com workspace and create an incoming webhook using the [Incoming Webhooks App](https://slack.com/apps/A0F7XDUAZ-incoming-webhooks).
-Do NOT use the instructions in https://api.slack.com/incoming-webhooks#enable_webhooks, as the particular webhooks work only for a single channel.
+Do NOT use the instructions in <https://api.slack.com/incoming-webhooks#enable_webhooks>, as the particular webhooks work only for a single channel.
Set the webhook and the recipients in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
@@ -24,15 +24,15 @@ SLACK_WEBHOOK_URL="https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXX
# - The channel or user defined in slack for the webhook (syntax: '#')
# empty = do not send a notification for unconfigured roles
DEFAULT_RECIPIENT_SLACK="alarms"
-
```
You can define multiple recipients like this: `# #alarms systems @myuser`.
This example will send the alarm to:
-- The recipient defined in slack for the webhook (not known to netdata)
-- The channel 'alarms'
-- The channel 'systems'
-- The user @myuser
+
+- The recipient defined in slack for the webhook (not known to Netdata)
+- The channel 'alarms'
+- The channel 'systems'
+- The user @myuser
You can give different recipients per **role** using these (at the same file):
@@ -42,4 +42,4 @@ role_recipients_slack[dba]="databases systems"
role_recipients_slack[webmaster]="marketing development"
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fslack%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fslack%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/smstools3/README.md b/health/notifications/smstools3/README.md
index 4a146985..28184b3c 100644
--- a/health/notifications/smstools3/README.md
+++ b/health/notifications/smstools3/README.md
@@ -2,12 +2,12 @@
The [SMS Server Tools 3](http://smstools3.kekekasvi.com/) is a SMS Gateway software which can send and receive short messages through GSM modems and mobile phones.
-To have netdata send notifications via SMS Server Tools 3, you'll first need to [install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) smsd.
+To have Netdata send notifications via SMS Server Tools 3, you'll first need to [install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) smsd.
Ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:
-* Have write permissions to `/tmp` and `/var/spool/sms/outgoing`
-* Be a member of group `smsd`
+- Have write permissions to `/tmp` and `/var/spool/sms/outgoing`
+- Be a member of group `smsd`
To ensure that the steps above are successful, just `su netdata` and execute `sendsms phone message`.
@@ -36,4 +36,4 @@ Netdata uses the script `sendsms` that is installed by `smstools3` and just pass
sendsms=""
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fsmstools3%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fsmstools3%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/syslog/README.md b/health/notifications/syslog/README.md
index 597db0cd..5f1d5d8b 100644
--- a/health/notifications/syslog/README.md
+++ b/health/notifications/syslog/README.md
@@ -4,7 +4,9 @@ You need a working `logger` command for this to work. This is the case on prett
Logged messages will look like this:
- netdata WARNING on hostname at Tue Apr 3 09:00:00 EDT 2018: disk_space._ out of disk space time = 5h
+```
+netdata WARNING on hostname at Tue Apr 3 09:00:00 EDT 2018: disk_space._ out of disk space time = 5h
+```
## configuration
@@ -14,12 +16,14 @@ You can als configure per-role targets in the same file a bit further down.
Targets are defined as follows:
- [[facility.level][@host[:port]]/]prefix
+```
+[[facility.level][@host[:port]]/]prefix
+```
`prefix` defines what the log messages are prefixed with. By default, all lines are prefixed with 'netdata'.
-The `facility` and `level` are the standard syslog facility and level options, for more info on them see your local `logger` and `syslog` documentation. By default, netdata will log to the `local6` facility, with a log level dependent on the type of message (`crit` for CRITICAL, `warning` for WARNING, and `info` for everything else).
+The `facility` and `level` are the standard syslog facility and level options, for more info on them see your local `logger` and `syslog` documentation. By default, Netdata will log to the `local6` facility, with a log level dependent on the type of message (`crit` for CRITICAL, `warning` for WARNING, and `info` for everything else).
You can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fsyslog%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fsyslog%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/telegram/README.md b/health/notifications/telegram/README.md
index 9d652542..981030ef 100644
--- a/health/notifications/telegram/README.md
+++ b/health/notifications/telegram/README.md
@@ -4,13 +4,13 @@
With Telegram, you can send messages, photos, videos and files of any type (doc, zip, mp3, etc), as well as create groups for up to 30,000 people or channels for broadcasting to unlimited audiences. You can write to your phone contacts and find people by their usernames. As a result, Telegram is like SMS and email combined — and can take care of all your personal or business messaging needs.
-netdata will send warning messages without vibration.
+Netdata will send warning messages without vibration.
You need:
-1. A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot`. Follow the instructions.
-2. A chat id for every chat you want to send messages to. Contact the [@myidbot](https://t.me/myidbot) bot and send the command `/getid` to get your personal chat id or invite him into a group and issue the same command to get the group chat id.
-3. Start a conversation with your bot or invite him into a group you want to sent messages to.
+1. A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot`. Follow the instructions.
+2. A chat id for every chat you want to send messages to. Contact the [@myidbot](https://t.me/myidbot) bot and send the command `/getid` to get your personal chat id or invite him into a group and issue the same command to get the group chat id.
+3. Start a conversation with your bot or invite him into a group you want to sent messages to.
See slack for configuration.
@@ -18,4 +18,4 @@ Telegram messages look like this:
![image](https://fb.hash.works/ytl/preview.jpg)
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Ftelegram%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Ftelegram%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/twilio/README.md b/health/notifications/twilio/README.md
index 743f54e3..8c8d7cfb 100644
--- a/health/notifications/twilio/README.md
+++ b/health/notifications/twilio/README.md
@@ -6,11 +6,11 @@ Will look like this on your Android device:
You will need:
-1. Signup and Login to twilio.com
-2. Pick an SMS capable number during sign up.
-3. Get your SID, and Token from <https://www.twilio.com/console>
-3. Fill in TWILIO_ACCOUNT_SID="XXXXXXXX" TWILIO_ACCOUNT_TOKEN="XXXXXXXXX" TWILIO_NUMBER="+XXXXXXXXXXX"
-4. Add the recipient phone numbers to DEFAULT_RECIPIENT_TWILIO="+XXXXXXXXXXX"
+1. Signup and Login to twilio.com
+2. Pick an SMS capable number during sign up.
+3. Get your SID, and Token from <https://www.twilio.com/console>
+4. Fill in TWILIO_ACCOUNT_SID="XXXXXXXX" TWILIO_ACCOUNT_TOKEN="XXXXXXXXX" TWILIO_NUMBER="+XXXXXXXXXXX"
+5. Add the recipient phone numbers to DEFAULT_RECIPIENT_TWILIO="+XXXXXXXXXXX"
!!PLEASE NOTE THAT IF YOUR ACCOUNT IS A TRIAL ACCOUNT YOU WILL ONLY BE ABLE TO SEND NOTIFICATIONS TO THE NUMBER YOU SIGNED UP WITH
@@ -32,11 +32,11 @@ SEND_TWILIO="YES"
# Then just set the recipients' phone numbers.
# The trial account is only allowed to use the number specified when set up.
-# Without an account sid and token, netdata cannot send Twilio text messages.
+# Without an account sid and token, Netdata cannot send Twilio text messages.
TWILIO_ACCOUNT_SID="xxxxxxxxx"
TWILIO_ACCOUNT_TOKEN="xxxxxxxxxx"
TWILIO_NUMBER="xxxxxxxxxxx"
DEFAULT_RECIPIENT_TWILIO="+15555555555"
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Ftwilio%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Ftwilio%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/health/notifications/web/README.md b/health/notifications/web/README.md
index 0aac9419..8eed06bf 100644
--- a/health/notifications/web/README.md
+++ b/health/notifications/web/README.md
@@ -1,8 +1,8 @@
# Dashboard
-The netdata dashboard shows HTML notifications, when it is open.
+The Netdata dashboard shows HTML notifications, when it is open.
Such web notifications look like this:
![image](https://cloud.githubusercontent.com/assets/2662304/18407279/82bac6a6-7714-11e6-847e-c2e84eeacbfb.png)
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fweb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fweb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/install-sh b/install-sh
new file mode 100755
index 00000000..59990a10
--- /dev/null
+++ b/install-sh
@@ -0,0 +1,508 @@
+#!/bin/sh
+# install - install a program, script, or datafile
+
+scriptversion=2014-09-12.12; # UTC
+
+# This originates from X11R5 (mit/util/scripts/install.sh), which was
+# later released in X11R6 (xc/config/util/install.sh) with the
+# following copyright and license.
+#
+# Copyright (C) 1994 X Consortium
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
+# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+# Except as contained in this notice, the name of the X Consortium shall not
+# be used in advertising or otherwise to promote the sale, use or other deal-
+# ings in this Software without prior written authorization from the X Consor-
+# tium.
+#
+#
+# FSF changes to this file are in the public domain.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# 'make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch.
+
+tab=' '
+nl='
+'
+IFS=" $tab$nl"
+
+# Set DOITPROG to "echo" to test this script.
+
+doit=${DOITPROG-}
+doit_exec=${doit:-exec}
+
+# Put in absolute file names if you don't have them in your path;
+# or use environment vars.
+
+chgrpprog=${CHGRPPROG-chgrp}
+chmodprog=${CHMODPROG-chmod}
+chownprog=${CHOWNPROG-chown}
+cmpprog=${CMPPROG-cmp}
+cpprog=${CPPROG-cp}
+mkdirprog=${MKDIRPROG-mkdir}
+mvprog=${MVPROG-mv}
+rmprog=${RMPROG-rm}
+stripprog=${STRIPPROG-strip}
+
+posix_mkdir=
+
+# Desired mode of installed file.
+mode=0755
+
+chgrpcmd=
+chmodcmd=$chmodprog
+chowncmd=
+mvcmd=$mvprog
+rmcmd="$rmprog -f"
+stripcmd=
+
+src=
+dst=
+dir_arg=
+dst_arg=
+
+copy_on_change=false
+is_target_a_directory=possibly
+
+usage="\
+Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
+ or: $0 [OPTION]... SRCFILES... DIRECTORY
+ or: $0 [OPTION]... -t DIRECTORY SRCFILES...
+ or: $0 [OPTION]... -d DIRECTORIES...
+
+In the 1st form, copy SRCFILE to DSTFILE.
+In the 2nd and 3rd, copy all SRCFILES to DIRECTORY.
+In the 4th, create DIRECTORIES.
+
+Options:
+ --help display this help and exit.
+ --version display version info and exit.
+
+ -c (ignored)
+ -C install only if different (preserve the last data modification time)
+ -d create directories instead of installing files.
+ -g GROUP $chgrpprog installed files to GROUP.
+ -m MODE $chmodprog installed files to MODE.
+ -o USER $chownprog installed files to USER.
+ -s $stripprog installed files.
+ -t DIRECTORY install into DIRECTORY.
+ -T report an error if DSTFILE is a directory.
+
+Environment variables override the default commands:
+ CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG
+ RMPROG STRIPPROG
+"
+
+while test $# -ne 0; do
+ case $1 in
+ -c) ;;
+
+ -C) copy_on_change=true;;
+
+ -d) dir_arg=true;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift;;
+
+ --help) echo "$usage"; exit $?;;
+
+ -m) mode=$2
+ case $mode in
+ *' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*)
+ echo "$0: invalid mode: $mode" >&2
+ exit 1;;
+ esac
+ shift;;
+
+ -o) chowncmd="$chownprog $2"
+ shift;;
+
+ -s) stripcmd=$stripprog;;
+
+ -t)
+ is_target_a_directory=always
+ dst_arg=$2
+ # Protect names problematic for 'test' and other utilities.
+ case $dst_arg in
+ -* | [=\(\)!]) dst_arg=./$dst_arg;;
+ esac
+ shift;;
+
+ -T) is_target_a_directory=never;;
+
+ --version) echo "$0 $scriptversion"; exit $?;;
+
+ --) shift
+ break;;
+
+ -*) echo "$0: invalid option: $1" >&2
+ exit 1;;
+
+ *) break;;
+ esac
+ shift
+done
+
+# We allow the use of options -d and -T together, by making -d
+# take the precedence; this is for compatibility with GNU install.
+
+if test -n "$dir_arg"; then
+ if test -n "$dst_arg"; then
+ echo "$0: target directory not allowed when installing a directory." >&2
+ exit 1
+ fi
+fi
+
+if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then
+ # When -d is used, all remaining arguments are directories to create.
+ # When -t is used, the destination is already specified.
+ # Otherwise, the last argument is the destination. Remove it from $@.
+ for arg
+ do
+ if test -n "$dst_arg"; then
+ # $@ is not empty: it contains at least $arg.
+ set fnord "$@" "$dst_arg"
+ shift # fnord
+ fi
+ shift # arg
+ dst_arg=$arg
+ # Protect names problematic for 'test' and other utilities.
+ case $dst_arg in
+ -* | [=\(\)!]) dst_arg=./$dst_arg;;
+ esac
+ done
+fi
+
+if test $# -eq 0; then
+ if test -z "$dir_arg"; then
+ echo "$0: no input file specified." >&2
+ exit 1
+ fi
+ # It's OK to call 'install-sh -d' without argument.
+ # This can happen when creating conditional directories.
+ exit 0
+fi
+
+if test -z "$dir_arg"; then
+ if test $# -gt 1 || test "$is_target_a_directory" = always; then
+ if test ! -d "$dst_arg"; then
+ echo "$0: $dst_arg: Is not a directory." >&2
+ exit 1
+ fi
+ fi
+fi
+
+if test -z "$dir_arg"; then
+ do_exit='(exit $ret); exit $ret'
+ trap "ret=129; $do_exit" 1
+ trap "ret=130; $do_exit" 2
+ trap "ret=141; $do_exit" 13
+ trap "ret=143; $do_exit" 15
+
+ # Set umask so as not to create temps with too-generous modes.
+ # However, 'strip' requires both read and write access to temps.
+ case $mode in
+ # Optimize common cases.
+ *644) cp_umask=133;;
+ *755) cp_umask=22;;
+
+ *[0-7])
+ if test -z "$stripcmd"; then
+ u_plus_rw=
+ else
+ u_plus_rw='% 200'
+ fi
+ cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;;
+ *)
+ if test -z "$stripcmd"; then
+ u_plus_rw=
+ else
+ u_plus_rw=,u+rw
+ fi
+ cp_umask=$mode$u_plus_rw;;
+ esac
+fi
+
+for src
+do
+ # Protect names problematic for 'test' and other utilities.
+ case $src in
+ -* | [=\(\)!]) src=./$src;;
+ esac
+
+ if test -n "$dir_arg"; then
+ dst=$src
+ dstdir=$dst
+ test -d "$dstdir"
+ dstdir_status=$?
+ else
+
+ # Waiting for this to be detected by the "$cpprog $src $dsttmp" command
+ # might cause directories to be created, which would be especially bad
+ # if $src (and thus $dsttmp) contains '*'.
+ if test ! -f "$src" && test ! -d "$src"; then
+ echo "$0: $src does not exist." >&2
+ exit 1
+ fi
+
+ if test -z "$dst_arg"; then
+ echo "$0: no destination specified." >&2
+ exit 1
+ fi
+ dst=$dst_arg
+
+ # If destination is a directory, append the input filename; won't work
+ # if double slashes aren't ignored.
+ if test -d "$dst"; then
+ if test "$is_target_a_directory" = never; then
+ echo "$0: $dst_arg: Is a directory" >&2
+ exit 1
+ fi
+ dstdir=$dst
+ dst=$dstdir/`basename "$src"`
+ dstdir_status=0
+ else
+ dstdir=`dirname "$dst"`
+ test -d "$dstdir"
+ dstdir_status=$?
+ fi
+ fi
+
+ obsolete_mkdir_used=false
+
+ if test $dstdir_status != 0; then
+ case $posix_mkdir in
+ '')
+ # Create intermediate dirs using mode 755 as modified by the umask.
+ # This is like FreeBSD 'install' as of 1997-10-28.
+ umask=`umask`
+ case $stripcmd.$umask in
+ # Optimize common cases.
+ *[2367][2367]) mkdir_umask=$umask;;
+ .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;;
+
+ *[0-7])
+ mkdir_umask=`expr $umask + 22 \
+ - $umask % 100 % 40 + $umask % 20 \
+ - $umask % 10 % 4 + $umask % 2
+ `;;
+ *) mkdir_umask=$umask,go-w;;
+ esac
+
+ # With -d, create the new directory with the user-specified mode.
+ # Otherwise, rely on $mkdir_umask.
+ if test -n "$dir_arg"; then
+ mkdir_mode=-m$mode
+ else
+ mkdir_mode=
+ fi
+
+ posix_mkdir=false
+ case $umask in
+ *[123567][0-7][0-7])
+ # POSIX mkdir -p sets u+wx bits regardless of umask, which
+ # is incompatible with FreeBSD 'install' when (umask & 300) != 0.
+ ;;
+ *)
+ # $RANDOM is not portable (e.g. dash); use it when possible to
+ # lower collision chance
+ tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
+ trap 'ret=$?; rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" 2>/dev/null; exit $ret' 0
+
+ # As "mkdir -p" follows symlinks and we work in /tmp possibly; so
+ # create the $tmpdir first (and fail if unsuccessful) to make sure
+ # that nobody tries to guess the $tmpdir name.
+ if (umask $mkdir_umask &&
+ $mkdirprog $mkdir_mode "$tmpdir" &&
+ exec $mkdirprog $mkdir_mode -p -- "$tmpdir/a/b") >/dev/null 2>&1
+ then
+ if test -z "$dir_arg" || {
+ # Check for POSIX incompatibilities with -m.
+ # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
+ # other-writable bit of parent directory when it shouldn't.
+ # FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
+ test_tmpdir="$tmpdir/a"
+ ls_ld_tmpdir=`ls -ld "$test_tmpdir"`
+ case $ls_ld_tmpdir in
+ d????-?r-*) different_mode=700;;
+ d????-?--*) different_mode=755;;
+ *) false;;
+ esac &&
+ $mkdirprog -m$different_mode -p -- "$test_tmpdir" && {
+ ls_ld_tmpdir_1=`ls -ld "$test_tmpdir"`
+ test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
+ }
+ }
+ then posix_mkdir=:
+ fi
+ rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir"
+ else
+ # Remove any dirs left behind by ancient mkdir implementations.
+ rmdir ./$mkdir_mode ./-p ./-- "$tmpdir" 2>/dev/null
+ fi
+ trap '' 0;;
+ esac;;
+ esac
+
+ if
+ $posix_mkdir && (
+ umask $mkdir_umask &&
+ $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
+ )
+ then :
+ else
+
+ # The umask is ridiculous, or mkdir does not conform to POSIX,
+ # or it failed possibly due to a race condition. Create the
+ # directory the slow way, step by step, checking for races as we go.
+
+ case $dstdir in
+ /*) prefix='/';;
+ [-=\(\)!]*) prefix='./';;
+ *) prefix='';;
+ esac
+
+ oIFS=$IFS
+ IFS=/
+ set -f
+ set fnord $dstdir
+ shift
+ set +f
+ IFS=$oIFS
+
+ prefixes=
+
+ for d
+ do
+ test X"$d" = X && continue
+
+ prefix=$prefix$d
+ if test -d "$prefix"; then
+ prefixes=
+ else
+ if $posix_mkdir; then
+ (umask=$mkdir_umask &&
+ $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
+ # Don't fail if two instances are running concurrently.
+ test -d "$prefix" || exit 1
+ else
+ case $prefix in
+ *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
+ *) qprefix=$prefix;;
+ esac
+ prefixes="$prefixes '$qprefix'"
+ fi
+ fi
+ prefix=$prefix/
+ done
+
+ if test -n "$prefixes"; then
+ # Don't fail if two instances are running concurrently.
+ (umask $mkdir_umask &&
+ eval "\$doit_exec \$mkdirprog $prefixes") ||
+ test -d "$dstdir" || exit 1
+ obsolete_mkdir_used=true
+ fi
+ fi
+ fi
+
+ if test -n "$dir_arg"; then
+ { test -z "$chowncmd" || $doit $chowncmd "$dst"; } &&
+ { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } &&
+ { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false ||
+ test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1
+ else
+
+ # Make a couple of temp file names in the proper directory.
+ dsttmp=$dstdir/_inst.$$_
+ rmtmp=$dstdir/_rm.$$_
+
+ # Trap to clean up those temp files at exit.
+ trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
+
+ # Copy the file name to the temp name.
+ (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") &&
+
+ # and set any options; do chmod last to preserve setuid bits.
+ #
+ # If any of these fail, we abort the whole thing. If we want to
+ # ignore errors from any of these, just make sure not to ignore
+ # errors from the above "$doit $cpprog $src $dsttmp" command.
+ #
+ { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } &&
+ { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } &&
+ { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } &&
+ { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } &&
+
+ # If -C, don't bother to copy if it wouldn't change the file.
+ if $copy_on_change &&
+ old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` &&
+ new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` &&
+ set -f &&
+ set X $old && old=:$2:$4:$5:$6 &&
+ set X $new && new=:$2:$4:$5:$6 &&
+ set +f &&
+ test "$old" = "$new" &&
+ $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1
+ then
+ rm -f "$dsttmp"
+ else
+ # Rename the file to the real destination.
+ $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null ||
+
+ # The rename failed, perhaps because mv can't rename something else
+ # to itself, or perhaps because mv is so ancient that it does not
+ # support -f.
+ {
+ # Now remove or move aside any old file at destination location.
+ # We try this two ways since rm can't unlink itself on some
+ # systems and the destination file might be busy for other
+ # reasons. In this case, the final cleanup might fail but the new
+ # file should still install successfully.
+ {
+ test ! -f "$dst" ||
+ $doit $rmcmd -f "$dst" 2>/dev/null ||
+ { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null &&
+ { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }
+ } ||
+ { echo "$0: cannot unlink or rename $dst" >&2
+ (exit 1); exit 1
+ }
+ } &&
+
+ # Now rename the file to the real destination.
+ $doit $mvcmd "$dsttmp" "$dst"
+ }
+ fi || exit 1
+
+ trap '' 0
+ fi
+done
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/libnetdata/Makefile.in b/libnetdata/Makefile.in
new file mode 100644
index 00000000..064637fe
--- /dev/null
+++ b/libnetdata/Makefile.in
@@ -0,0 +1,716 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ adaptive_resortable_list \
+ avl \
+ buffer \
+ clocks \
+ config \
+ dictionary \
+ eval \
+ json \
+ health \
+ locks \
+ log \
+ popen \
+ procfile \
+ simple_pattern \
+ socket \
+ statistical \
+ storage_number \
+ threads \
+ url \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/README.md b/libnetdata/README.md
index 9892d670..8610cfd1 100644
--- a/libnetdata/README.md
+++ b/libnetdata/README.md
@@ -1,8 +1,5 @@
# libnetdata
-`libnetdata` is a collection of library code that is used by all netdata `C` programs.
+`libnetdata` is a collection of library code that is used by all Netdata `C` programs.
-
-
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/adaptive_resortable_list/Makefile.in b/libnetdata/adaptive_resortable_list/Makefile.in
new file mode 100644
index 00000000..16129a28
--- /dev/null
+++ b/libnetdata/adaptive_resortable_list/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/adaptive_resortable_list
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/adaptive_resortable_list/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/adaptive_resortable_list/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/adaptive_resortable_list/README.md b/libnetdata/adaptive_resortable_list/README.md
index ab0d7c5a..0bb99b81 100644
--- a/libnetdata/adaptive_resortable_list/README.md
+++ b/libnetdata/adaptive_resortable_list/README.md
@@ -1,10 +1,9 @@
-
# Adaptive Re-sortable List (ARL)
-This library allows netdata to read a series of `name - value` pairs
+This library allows Netdata to read a series of `name - value` pairs
in the **fastest possible way**.
-ARLs are used all over netdata, as they are the most
+ARLs are used all over Netdata, as they are the most
CPU utilization efficient way to process `/proc` files. They are used to
process both vertical (csv like) and horizontal (one pair per line) `name - value` pairs.
@@ -19,9 +18,9 @@ linked list will adapt at the next iteration.
During initialization (just once), the caller:
-- calls `arl_create()` to create the ARL
+- calls `arl_create()` to create the ARL
-- calls `arl_expect()` multiple times to register the expected keywords
+- calls `arl_expect()` multiple times to register the expected keywords
The library will call the `processor()` function (given to
`arl_create()`), for each expected keyword found.
@@ -34,16 +33,16 @@ Each `name` keyword may have a different `processor()` (by calling
For each iteration through the data source, the caller:
-- calls `arl_begin()` to initiate a data collection iteration.
- This is to be called just ONCE every time the source is re-evaluated.
+- calls `arl_begin()` to initiate a data collection iteration.
+ This is to be called just ONCE every time the source is re-evaluated.
-- calls `arl_check()` for each entry read from the file.
+- calls `arl_check()` for each entry read from the file.
### Cleanup
When the caller exits:
-- calls `arl_free()` to destroy this and free all memory.
+- calls `arl_free()` to destroy this and free all memory.
### Performance
@@ -68,22 +67,22 @@ In the following test we used alternative methods to process, **1M times**,
a data source like `/proc/meminfo`, already tokenized, in memory,
to extract the same number of expected metrics:
-test|code|string comparison|number parsing|duration
-:---:|:---:|:---:|:---:|:---:|
-1|if-else-if-else-if|`strcmp()`|`strtoull()`|4630.337 ms
-2|nested loops|inline `simple_hash()` and `strcmp()`|`strtoull()`|1597.481 ms
-3|nested loops|inline `simple_hash()` and `strcmp()`|`str2ull()`|923.523 ms
-4|if-else-if-else-if|inline `simple_hash()` and `strcmp()`|`strtoull()`| 854.574 ms
-5|if-else-if-else-if|statement expression `simple_hash()` and `strcmp()`|`strtoull()`|912.013 ms
-6|if-continue|inline `simple_hash()` and `strcmp()`|`strtoull()`|842.279 ms
-7|if-else-if-else-if|inline `simple_hash()` and `strcmp()`|`str2ull()`|602.837 ms
-8|ARL|ARL|`strtoull()`|350.360 ms
-9|ARL|ARL|`str2ull()`|157.237 ms
+|test|code|string comparison|number parsing|duration|
+|:--:|:--:|:---------------:|:------------:|:------:|
+|1|if-else-if-else-if|`strcmp()`|`strtoull()`|4630.337 ms|
+|2|nested loops|inline `simple_hash()` and `strcmp()`|`strtoull()`|1597.481 ms|
+|3|nested loops|inline `simple_hash()` and `strcmp()`|`str2ull()`|923.523 ms|
+|4|if-else-if-else-if|inline `simple_hash()` and `strcmp()`|`strtoull()`|854.574 ms|
+|5|if-else-if-else-if|statement expression `simple_hash()` and `strcmp()`|`strtoull()`|912.013 ms|
+|6|if-continue|inline `simple_hash()` and `strcmp()`|`strtoull()`|842.279 ms|
+|7|if-else-if-else-if|inline `simple_hash()` and `strcmp()`|`str2ull()`|602.837 ms|
+|8|ARL|ARL|`strtoull()`|350.360 ms|
+|9|ARL|ARL|`str2ull()`|157.237 ms|
Compared to unoptimized code (test No 1: 4.6sec):
-
- - before ARL netdata was using test No **7** with hashing and a custom `str2ull()` to achieve 602ms.
- - the current ARL implementation is test No **9** that needs only 157ms (29 times faster vs unoptimized code, about 4 times faster vs optimized code).
+
+- before ARL Netdata was using test No **7** with hashing and a custom `str2ull()` to achieve 602ms.
+- the current ARL implementation is test No **9** that needs only 157ms (29 times faster vs unoptimized code, about 4 times faster vs optimized code).
[Check the source code of this test](../../tests/profile/benchmark-value-pairs.c).
@@ -92,4 +91,4 @@ Compared to unoptimized code (test No 1: 4.6sec):
Do not use ARL if the a name/keyword may appear more than once in the
source data.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fadaptive_resortable_list%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fadaptive_resortable_list%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/avl/Makefile.in b/libnetdata/avl/Makefile.in
new file mode 100644
index 00000000..1a16827f
--- /dev/null
+++ b/libnetdata/avl/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/avl
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/avl/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/avl/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/avl/README.md b/libnetdata/avl/README.md
index c4c72ff4..6a96884a 100644
--- a/libnetdata/avl/README.md
+++ b/libnetdata/avl/README.md
@@ -9,4 +9,4 @@ use any memory allocations and their memory footprint is optimized
In addition to the above, this version of AVL, provides versions using locks
and traversal functions.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Favl%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Favl%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/buffer/Makefile.in b/libnetdata/buffer/Makefile.in
new file mode 100644
index 00000000..3e124ae6
--- /dev/null
+++ b/libnetdata/buffer/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/buffer
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/buffer/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/buffer/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/buffer/README.md b/libnetdata/buffer/README.md
index 48072e96..c89889c2 100644
--- a/libnetdata/buffer/README.md
+++ b/libnetdata/buffer/README.md
@@ -9,4 +9,4 @@ is just a lookup (it does not traverse the string).
Netdata uses `BUFFER`s for preparing web responses and buffering data to be sent upstream or
to backend databases.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fbuffer%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fbuffer%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/clocks/Makefile.in b/libnetdata/clocks/Makefile.in
new file mode 100644
index 00000000..c3913c16
--- /dev/null
+++ b/libnetdata/clocks/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/clocks
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/clocks/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/clocks/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/clocks/README.md b/libnetdata/clocks/README.md
index c4215a75..e4c424f3 100644
--- a/libnetdata/clocks/README.md
+++ b/libnetdata/clocks/README.md
@@ -1,2 +1 @@
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fclocks%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fclocks%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/clocks/clocks.c b/libnetdata/clocks/clocks.c
index f7d21717..161225a9 100644
--- a/libnetdata/clocks/clocks.c
+++ b/libnetdata/clocks/clocks.c
@@ -210,3 +210,68 @@ int sleep_usec(usec_t usec) {
return ret;
#endif
}
+
+static inline collected_number uptime_from_boottime(void) {
+#ifdef CLOCK_BOOTTIME_IS_AVAILABLE
+ return now_boottime_usec() / 1000;
+#else
+ error("uptime cannot be read from CLOCK_BOOTTIME on this system.");
+ return 0;
+#endif
+}
+
+static procfile *read_proc_uptime_ff = NULL;
+static inline collected_number read_proc_uptime(char *filename) {
+ if(unlikely(!read_proc_uptime_ff)) {
+ read_proc_uptime_ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!read_proc_uptime_ff)) return 0;
+ }
+
+ read_proc_uptime_ff = procfile_readall(read_proc_uptime_ff);
+ if(unlikely(!read_proc_uptime_ff)) return 0;
+
+ if(unlikely(procfile_lines(read_proc_uptime_ff) < 1)) {
+ error("/proc/uptime has no lines.");
+ return 0;
+ }
+ if(unlikely(procfile_linewords(read_proc_uptime_ff, 0) < 1)) {
+ error("/proc/uptime has less than 1 word in it.");
+ return 0;
+ }
+
+ return (collected_number)(strtold(procfile_lineword(read_proc_uptime_ff, 0, 0), NULL) * 1000.0);
+}
+
+inline collected_number uptime_msec(char *filename){
+ static int use_boottime = -1;
+
+ if(unlikely(use_boottime == -1)) {
+ collected_number uptime_boottime = uptime_from_boottime();
+ collected_number uptime_proc = read_proc_uptime(filename);
+
+ long long delta = (long long)uptime_boottime - (long long)uptime_proc;
+ if(delta < 0) delta = -delta;
+
+ if(delta <= 1000 && uptime_boottime != 0) {
+ procfile_close(read_proc_uptime_ff);
+ info("Using now_boottime_usec() for uptime (dt is %lld ms)", delta);
+ use_boottime = 1;
+ }
+ else if(uptime_proc != 0) {
+ info("Using /proc/uptime for uptime (dt is %lld ms)", delta);
+ use_boottime = 0;
+ }
+ else {
+ error("Cannot find any way to read uptime on this system.");
+ return 1;
+ }
+ }
+
+ collected_number uptime;
+ if(use_boottime)
+ uptime = uptime_from_boottime();
+ else
+ uptime = read_proc_uptime(filename);
+
+ return uptime;
+}
diff --git a/libnetdata/clocks/clocks.h b/libnetdata/clocks/clocks.h
index 47aa148c..4af451d6 100644
--- a/libnetdata/clocks/clocks.h
+++ b/libnetdata/clocks/clocks.h
@@ -136,4 +136,6 @@ extern int sleep_usec(usec_t usec);
*/
void test_clock_boottime(void);
+extern collected_number uptime_msec(char *filename);
+
#endif /* NETDATA_CLOCKS_H */
diff --git a/libnetdata/config/Makefile.in b/libnetdata/config/Makefile.in
new file mode 100644
index 00000000..ba8ee3b5
--- /dev/null
+++ b/libnetdata/config/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/config
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/config/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/config/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/config/README.md b/libnetdata/config/README.md
index f0a27d95..51216b6d 100644
--- a/libnetdata/config/README.md
+++ b/libnetdata/config/README.md
@@ -1,6 +1,6 @@
-# netdata ini config files
+# Netdata ini config files
-Configuration files `netdata.conf` and `stream.conf` are netdata ini files.
+Configuration files `netdata.conf` and `stream.conf` are Netdata ini files.
## Motivation
@@ -17,9 +17,9 @@ developers and the users.
So, we did this:
-1. No configuration is required to run netdata
-2. There are plenty of options to tweak
-3. There is minimal documentation (or no at all)
+1. No configuration is required to run Netdata
+2. There are plenty of options to tweak
+3. There is minimal documentation (or no at all)
## Why this works?
@@ -33,11 +33,12 @@ In all places, in the code, there are both the `names` and their
file, the default is used. The lookup is made using B-Trees and hashes
(no string comparisons), so they are super fast. Also the `names` of the
settings can be `my super duper setting that once set to yes, will turn the world upside down = no`
-- so goodbye to most of the documentation involved.
-Next, netdata can generate a valid configuration for the user to edit.
+- so goodbye to most of the documentation involved.
+
+Next, Netdata can generate a valid configuration for the user to edit.
No need to remember anything or copy and paste settings. Just get the
-configuration from the server (`/netdata.conf` on your netdata server),
+configuration from the server (`/netdata.conf` on your Netdata server),
edit it and save it.
Last, what about options you believe you have set, but you misspelled?
@@ -45,4 +46,4 @@ When you get the configuration file from the server, there will be a
comment above all `name = value` pairs the server does not use.
So you know that whatever you wrote there, is not used.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fconfig%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fconfig%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/dictionary/Makefile.in b/libnetdata/dictionary/Makefile.in
new file mode 100644
index 00000000..deabfeb9
--- /dev/null
+++ b/libnetdata/dictionary/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/dictionary
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/dictionary/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/dictionary/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/dictionary/README.md b/libnetdata/dictionary/README.md
index 9c705227..5622b691 100644
--- a/libnetdata/dictionary/README.md
+++ b/libnetdata/dictionary/README.md
@@ -1,2 +1 @@
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fdictionary%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fdictionary%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/eval/Makefile.in b/libnetdata/eval/Makefile.in
new file mode 100644
index 00000000..a7da5911
--- /dev/null
+++ b/libnetdata/eval/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/eval
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/eval/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/eval/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/eval/README.md b/libnetdata/eval/README.md
index e7b4579a..72b4089a 100644
--- a/libnetdata/eval/README.md
+++ b/libnetdata/eval/README.md
@@ -1,2 +1 @@
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Feval%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Feval%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/health/Makefile.in b/libnetdata/health/Makefile.in
new file mode 100644
index 00000000..863b86a2
--- /dev/null
+++ b/libnetdata/health/Makefile.in
@@ -0,0 +1,513 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/health
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/health/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/health/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/json/Makefile.in b/libnetdata/json/Makefile.in
new file mode 100644
index 00000000..5a419013
--- /dev/null
+++ b/libnetdata/json/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/json
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/json/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/json/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/json/README.md b/libnetdata/json/README.md
index fd6cb0f3..e723cede 100644
--- a/libnetdata/json/README.md
+++ b/libnetdata/json/README.md
@@ -1,5 +1,5 @@
# json
-`json` contains a parser for json strings, based on `jsmn` (https://github.com/zserge/jsmn), but case you have installed the JSON-C library, the installation script will prefer it, you can also force its use with `--enable-jsonc` in the compilation time.
+`json` contains a parser for json strings, based on `jsmn` (<https://github.com/zserge/jsmn>), but case you have installed the JSON-C library, the installation script will prefer it, you can also force its use with `--enable-jsonc` in the compilation time.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fjson%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fjson%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/locks/Makefile.in b/libnetdata/locks/Makefile.in
new file mode 100644
index 00000000..125a608a
--- /dev/null
+++ b/libnetdata/locks/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/locks
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/locks/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/locks/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/locks/README.md b/libnetdata/locks/README.md
index 0f01e8c5..d06f5b0b 100644
--- a/libnetdata/locks/README.md
+++ b/libnetdata/locks/README.md
@@ -1,2 +1 @@
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Flocks%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Flocks%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/log/Makefile.in b/libnetdata/log/Makefile.in
new file mode 100644
index 00000000..5f6709ae
--- /dev/null
+++ b/libnetdata/log/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/log
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/log/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/log/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/log/README.md b/libnetdata/log/README.md
index 28e3c3f7..d10f817f 100644
--- a/libnetdata/log/README.md
+++ b/libnetdata/log/README.md
@@ -1,2 +1 @@
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Flog%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Flog%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/popen/Makefile.in b/libnetdata/popen/Makefile.in
new file mode 100644
index 00000000..d62fb352
--- /dev/null
+++ b/libnetdata/popen/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/popen
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/popen/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/popen/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/popen/README.md b/libnetdata/popen/README.md
index 5a83b602..63002d1c 100644
--- a/libnetdata/popen/README.md
+++ b/libnetdata/popen/README.md
@@ -1,2 +1 @@
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fpopen%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fpopen%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/procfile/Makefile.in b/libnetdata/procfile/Makefile.in
new file mode 100644
index 00000000..dadd97ae
--- /dev/null
+++ b/libnetdata/procfile/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/procfile
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/procfile/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/procfile/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/procfile/README.md b/libnetdata/procfile/README.md
index 7037dc4e..7487e6c6 100644
--- a/libnetdata/procfile/README.md
+++ b/libnetdata/procfile/README.md
@@ -1,4 +1,3 @@
-
# PROCFILE
procfile is a library for reading text data files (i.e `/proc` files) in the fastest possible way.
@@ -15,49 +14,49 @@ The library also supported quoted words (i.e. strings within of which the separa
Initially the caller:
-- calls `procfile_open()` to open the file and allocate the structures needed.
+- calls `procfile_open()` to open the file and allocate the structures needed.
### Iterations
For each iteration, the caller:
-- calls `procfile_readall()` to read updated contents.
- This call also rewinds (`lseek()` to 0) before reading it.
+- calls `procfile_readall()` to read updated contents.
+ This call also rewinds (`lseek()` to 0) before reading it.
+
+ For every file, a [BUFFER](../buffer/) is used that is automatically adjusted to fit
+ the entire file contents of the file. So the file is read with a single `read()` call
+ (providing atomicity / consistency when the data are read from the kernel).
+
+ Once the data are read, 2 arrays of pointers are updated:
- For every file, a [BUFFER](../buffer/) is used that is automatically adjusted to fit
- the entire file contents of the file. So the file is read with a single `read()` call
- (providing atomicity / consistency when the data are read from the kernel).
+ - a `words` array, pointing to each word in the data read
+ - a `lines` array, pointing to the first word for each line
- Once the data are read, 2 arrays of pointers are updated:
+ This is highly optimized. Both arrays are automatically adjusted to
+ fit all contents and are updated in a single pass on the data.
- - a `words` array, pointing to each word in the data read
- - a `lines` array, pointing to the first word for each line
+ The library provides a number of macros:
- This is highly optimized. Both arrays are automatically adjusted to
- fit all contents and are updated in a single pass on the data.
-
- The library provides a number of macros:
-
- - `procfile_lines()` returns the # of lines read
- - `procfile_linewords()` returns the # of words in the given line
- - `procfile_word()` returns a pointer the given word #
- - `procfile_line()` returns a pointer to the first word of the given line #
- - `procfile_lineword()` returns a pointer to the given word # of the given line #
+ - `procfile_lines()` returns the # of lines read
+ - `procfile_linewords()` returns the # of words in the given line
+ - `procfile_word()` returns a pointer the given word #
+ - `procfile_line()` returns a pointer to the first word of the given line #
+ - `procfile_lineword()` returns a pointer to the given word # of the given line #
### Cleanup
When the caller exits:
-- calls `procfile_free()` to close the file and free all memory used.
+- calls `procfile_free()` to close the file and free all memory used.
### Performance
-- a **raspberry Pi 1** (the oldest single core one) can process 5.000+ `/proc` files per second.
-- a **J1900 Celeron** processor can process 23.000+ `/proc` files per second per core.
+- a **raspberry Pi 1** (the oldest single core one) can process 5.000+ `/proc` files per second.
+- a **J1900 Celeron** processor can process 23.000+ `/proc` files per second per core.
To achieve this kind of performance, the library tries to work in batches so that the code
and the data are inside the processor's caches.
-This library is extensively used in netdata and its plugins.
+This library is extensively used in Netdata and its plugins.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fprocfile%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fprocfile%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/simple_pattern/Makefile.in b/libnetdata/simple_pattern/Makefile.in
new file mode 100644
index 00000000..dfc92086
--- /dev/null
+++ b/libnetdata/simple_pattern/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/simple_pattern
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/simple_pattern/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/simple_pattern/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/simple_pattern/README.md b/libnetdata/simple_pattern/README.md
index 79a71316..53ed947e 100644
--- a/libnetdata/simple_pattern/README.md
+++ b/libnetdata/simple_pattern/README.md
@@ -1,9 +1,9 @@
-## netdata simple patterns
+## Netdata simple patterns
Unix prefers regular expressions. But they are just too hard, too cryptic
to use, write and understand.
-So, netdata supports **simple patterns**.
+So, Netdata supports **simple patterns**.
Simple patterns are a space separated list of words, that can have `*`
as a wildcard. Each world may use any number of `*`. Simple patterns
@@ -16,7 +16,7 @@ Simple patterns are quite powerful: `pattern = *foobar* !foo* !*bar *`
matches everything containing `foobar`, except strings that start
with `foo` or end with `bar`.
-You can use the netdata command line to check simple patterns,
+You can use the Netdata command line to check simple patterns,
like this:
```sh
@@ -30,9 +30,8 @@ RESULT: NOT MATCHED - pattern '*foobar* !foo* !*bar *' does not match 'hello wor
RESULT: MATCHED - pattern '*foobar* !foo* !*bar *' matches 'hello world foobar'
```
-netdata stops processing to the first positive or negative match
+Netdata stops processing to the first positive or negative match
(left to right). If it is not matched by either positive or negative
patterns, it is denied at the end.
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fsimple_pattern%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fsimple_pattern%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/socket/Makefile.in b/libnetdata/socket/Makefile.in
new file mode 100644
index 00000000..29245cf8
--- /dev/null
+++ b/libnetdata/socket/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/socket
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/socket/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/socket/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/socket/README.md b/libnetdata/socket/README.md
index e4275607..ee1b97cf 100644
--- a/libnetdata/socket/README.md
+++ b/libnetdata/socket/README.md
@@ -1,2 +1 @@
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fsocket%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fsocket%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/statistical/Makefile.in b/libnetdata/statistical/Makefile.in
new file mode 100644
index 00000000..280f5db4
--- /dev/null
+++ b/libnetdata/statistical/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/statistical
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/statistical/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/statistical/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/statistical/README.md b/libnetdata/statistical/README.md
index 184f82b4..686299f7 100644
--- a/libnetdata/statistical/README.md
+++ b/libnetdata/statistical/README.md
@@ -1,2 +1 @@
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fstatistical%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fstatistical%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/storage_number/Makefile.in b/libnetdata/storage_number/Makefile.in
new file mode 100644
index 00000000..35236d6b
--- /dev/null
+++ b/libnetdata/storage_number/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/storage_number
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/storage_number/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/storage_number/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/storage_number/README.md b/libnetdata/storage_number/README.md
index 5c2c0b07..9f6c08e0 100644
--- a/libnetdata/storage_number/README.md
+++ b/libnetdata/storage_number/README.md
@@ -1,4 +1,4 @@
-# netdata storage number
+# Netdata storage number
Although `netdata` does all its calculations using `long double`, it stores all values using
a **custom-made 32-bit number**.
@@ -9,4 +9,4 @@ have less decimal precision) and 3 bits for flags.
This provides an extremely optimized memory footprint with just 0.0001% max accuracy loss.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fstorage_number%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fstorage_number%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/threads/Makefile.in b/libnetdata/threads/Makefile.in
new file mode 100644
index 00000000..b0ff6d87
--- /dev/null
+++ b/libnetdata/threads/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/threads
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/threads/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/threads/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/threads/README.md b/libnetdata/threads/README.md
index 9f98ba16..478c746e 100644
--- a/libnetdata/threads/README.md
+++ b/libnetdata/threads/README.md
@@ -1,2 +1 @@
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fthreads%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fthreads%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/libnetdata/url/Makefile.in b/libnetdata/url/Makefile.in
new file mode 100644
index 00000000..18004b9c
--- /dev/null
+++ b/libnetdata/url/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = libnetdata/url
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/url/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu libnetdata/url/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/libnetdata/url/README.md b/libnetdata/url/README.md
index 7562ffb7..2f44d0e9 100644
--- a/libnetdata/url/README.md
+++ b/libnetdata/url/README.md
@@ -1,2 +1 @@
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Furl%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Furl%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/missing b/missing
new file mode 100755
index 00000000..f62bbae3
--- /dev/null
+++ b/missing
@@ -0,0 +1,215 @@
+#! /bin/sh
+# Common wrapper for a few potentially missing GNU programs.
+
+scriptversion=2013-10-28.13; # UTC
+
+# Copyright (C) 1996-2014 Free Software Foundation, Inc.
+# Originally written by Fran,cois Pinard <pinard@iro.umontreal.ca>, 1996.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+if test $# -eq 0; then
+ echo 1>&2 "Try '$0 --help' for more information"
+ exit 1
+fi
+
+case $1 in
+
+ --is-lightweight)
+ # Used by our autoconf macros to check whether the available missing
+ # script is modern enough.
+ exit 0
+ ;;
+
+ --run)
+ # Back-compat with the calling convention used by older automake.
+ shift
+ ;;
+
+ -h|--h|--he|--hel|--help)
+ echo "\
+$0 [OPTION]... PROGRAM [ARGUMENT]...
+
+Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due
+to PROGRAM being missing or too old.
+
+Options:
+ -h, --help display this help and exit
+ -v, --version output version information and exit
+
+Supported PROGRAM values:
+ aclocal autoconf autoheader autom4te automake makeinfo
+ bison yacc flex lex help2man
+
+Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and
+'g' are ignored when checking the name.
+
+Send bug reports to <bug-automake@gnu.org>."
+ exit $?
+ ;;
+
+ -v|--v|--ve|--ver|--vers|--versi|--versio|--version)
+ echo "missing $scriptversion (GNU Automake)"
+ exit $?
+ ;;
+
+ -*)
+ echo 1>&2 "$0: unknown '$1' option"
+ echo 1>&2 "Try '$0 --help' for more information"
+ exit 1
+ ;;
+
+esac
+
+# Run the given program, remember its exit status.
+"$@"; st=$?
+
+# If it succeeded, we are done.
+test $st -eq 0 && exit 0
+
+# Also exit now if we it failed (or wasn't found), and '--version' was
+# passed; such an option is passed most likely to detect whether the
+# program is present and works.
+case $2 in --version|--help) exit $st;; esac
+
+# Exit code 63 means version mismatch. This often happens when the user
+# tries to use an ancient version of a tool on a file that requires a
+# minimum version.
+if test $st -eq 63; then
+ msg="probably too old"
+elif test $st -eq 127; then
+ # Program was missing.
+ msg="missing on your system"
+else
+ # Program was found and executed, but failed. Give up.
+ exit $st
+fi
+
+perl_URL=http://www.perl.org/
+flex_URL=http://flex.sourceforge.net/
+gnu_software_URL=http://www.gnu.org/software
+
+program_details ()
+{
+ case $1 in
+ aclocal|automake)
+ echo "The '$1' program is part of the GNU Automake package:"
+ echo "<$gnu_software_URL/automake>"
+ echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:"
+ echo "<$gnu_software_URL/autoconf>"
+ echo "<$gnu_software_URL/m4/>"
+ echo "<$perl_URL>"
+ ;;
+ autoconf|autom4te|autoheader)
+ echo "The '$1' program is part of the GNU Autoconf package:"
+ echo "<$gnu_software_URL/autoconf/>"
+ echo "It also requires GNU m4 and Perl in order to run:"
+ echo "<$gnu_software_URL/m4/>"
+ echo "<$perl_URL>"
+ ;;
+ esac
+}
+
+give_advice ()
+{
+ # Normalize program name to check for.
+ normalized_program=`echo "$1" | sed '
+ s/^gnu-//; t
+ s/^gnu//; t
+ s/^g//; t'`
+
+ printf '%s\n' "'$1' is $msg."
+
+ configure_deps="'configure.ac' or m4 files included by 'configure.ac'"
+ case $normalized_program in
+ autoconf*)
+ echo "You should only need it if you modified 'configure.ac',"
+ echo "or m4 files included by it."
+ program_details 'autoconf'
+ ;;
+ autoheader*)
+ echo "You should only need it if you modified 'acconfig.h' or"
+ echo "$configure_deps."
+ program_details 'autoheader'
+ ;;
+ automake*)
+ echo "You should only need it if you modified 'Makefile.am' or"
+ echo "$configure_deps."
+ program_details 'automake'
+ ;;
+ aclocal*)
+ echo "You should only need it if you modified 'acinclude.m4' or"
+ echo "$configure_deps."
+ program_details 'aclocal'
+ ;;
+ autom4te*)
+ echo "You might have modified some maintainer files that require"
+ echo "the 'autom4te' program to be rebuilt."
+ program_details 'autom4te'
+ ;;
+ bison*|yacc*)
+ echo "You should only need it if you modified a '.y' file."
+ echo "You may want to install the GNU Bison package:"
+ echo "<$gnu_software_URL/bison/>"
+ ;;
+ lex*|flex*)
+ echo "You should only need it if you modified a '.l' file."
+ echo "You may want to install the Fast Lexical Analyzer package:"
+ echo "<$flex_URL>"
+ ;;
+ help2man*)
+ echo "You should only need it if you modified a dependency" \
+ "of a man page."
+ echo "You may want to install the GNU Help2man package:"
+ echo "<$gnu_software_URL/help2man/>"
+ ;;
+ makeinfo*)
+ echo "You should only need it if you modified a '.texi' file, or"
+ echo "any other file indirectly affecting the aspect of the manual."
+ echo "You might want to install the Texinfo package:"
+ echo "<$gnu_software_URL/texinfo/>"
+ echo "The spurious makeinfo call might also be the consequence of"
+ echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might"
+ echo "want to install GNU make:"
+ echo "<$gnu_software_URL/make/>"
+ ;;
+ *)
+ echo "You might have modified some files without having the proper"
+ echo "tools for further handling them. Check the 'README' file, it"
+ echo "often tells you about the needed prerequisites for installing"
+ echo "this package. You may also peek at any GNU archive site, in"
+ echo "case some other package contains this missing '$1' program."
+ ;;
+ esac
+}
+
+give_advice "$1" | sed -e '1s/^/WARNING: /' \
+ -e '2,$s/^/ /' >&2
+
+# Propagate the correct exit status (expected to be 127 for a program
+# not found, 63 for a program that failed due to version mismatch).
+exit $st
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/netdata-installer.sh b/netdata-installer.sh
index a79f41c1..781cd9b1 100755
--- a/netdata-installer.sh
+++ b/netdata-installer.sh
@@ -168,6 +168,8 @@ USAGE: ${PROGRAM} [options]
--enable-backend-prometheus-remote-write Enable Prometheus remote write backend. Default: enable it when libprotobuf and
libsnappy are available.
--disable-backend-prometheus-remote-write
+ --enable-backend-mongodb Enable MongoDB backend. Default: enable it when libmongoc is available.
+ --disable-backend-mongodb
--enable-lto Enable Link-Time-Optimization. Default: enabled
--disable-lto
--disable-x86-sse Disable SSE instructions. By default SSE optimizations are enabled.
@@ -218,6 +220,8 @@ while [ -n "${1}" ]; do
"--disable-backend-kinesis") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-kinesis/} --disable-backend-kinesis";;
"--enable-backend-prometheus-remote-write") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-backend-prometheus-remote-write/} --enable-backend-prometheus-remote-write";;
"--disable-backend-prometheus-remote-write") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-prometheus-remote-write/} --disable-backend-prometheus-remote-write";;
+ "--enable-backend-mongodb") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-backend-mongodb/} --enable-backend-mongodb";;
+ "--disable-backend-mongodb") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-mongodb/} --disable-backend-mongodb";;
"--enable-lto") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-lto/} --enable-lto";;
"--disable-lto") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-lto/} --disable-lto";;
"--disable-x86-sse") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-x86-sse/} --disable-x86-sse";;
@@ -396,6 +400,7 @@ run ./configure \
--sysconfdir="${NETDATA_PREFIX}/etc" \
--localstatedir="${NETDATA_PREFIX}/var" \
--libexecdir="${NETDATA_PREFIX}/usr/libexec" \
+ --libdir="${NETDATA_PREFIX}/usr/lib" \
--with-zlib \
--with-math \
--with-user=netdata \
@@ -774,7 +779,7 @@ fi
install_go() {
# When updating this value, ensure correct checksums in packaging/go.d.checksums
- GO_PACKAGE_VERSION="v0.7.0"
+ GO_PACKAGE_VERSION="$(cat packaging/go.d.version)"
ARCH_MAP=(
'i386::386'
'i686::386'
@@ -800,7 +805,7 @@ install_go() {
fi
done
tmp=$(mktemp -d /tmp/netdata-go-XXXXXX)
- GO_PACKAGE_BASENAME="go.d.plugin-${GO_PACKAGE_VERSION}.${OS}-${ARCH}"
+ GO_PACKAGE_BASENAME="go.d.plugin-${GO_PACKAGE_VERSION}.${OS}-${ARCH}.tar.gz"
download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/${GO_PACKAGE_BASENAME}" "${tmp}/${GO_PACKAGE_BASENAME}"
@@ -833,11 +838,13 @@ install_go() {
run tar -xf "${tmp}/config.tar.gz" -C "${NETDATA_STOCK_CONFIG_DIR}/"
run chown -R "${ROOT_USER}:${NETDATA_GROUP}" "${NETDATA_STOCK_CONFIG_DIR}"
- run mv "${tmp}/$GO_PACKAGE_BASENAME" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin"
+ run tar xf "${tmp}/${GO_PACKAGE_BASENAME}"
+ run mv "${GO_PACKAGE_BASENAME/\.tar\.gz/}" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin"
if [ "${UID}" -eq 0 ]; then
run chown "root:${NETDATA_GROUP}" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin"
fi
run chmod 0750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin"
+ rm -rf "${tmp}"
fi
return 0
}
diff --git a/netdata.spec b/netdata.spec
new file mode 100644
index 00000000..0236787a
--- /dev/null
+++ b/netdata.spec
@@ -0,0 +1,511 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+%global contentdir %{_datadir}/netdata
+
+
+#TODO: Temporary fix for the build-id error during go.d plugin set up
+%global _missing_build_ids_terminate_build 0
+
+# This is temporary and should eventually be resolved. This bypasses
+# the default rhel __os_install_post which throws a python compile
+# error.
+%global __os_install_post %{nil}
+
+# Mitigate the cross-distro mayhem by strictly defining the libexec destination
+%define _prefix /usr
+%define _sysconfdir /etc
+%define _localstatedir /var
+%define _libexecdir /usr/libexec
+%define _libdir /usr/lib
+
+#
+# Conditional build:
+%bcond_without systemd # systemd
+%bcond_with netns # build with netns support (cgroup-network)
+
+%if 0%{?fedora} || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1140
+%else
+%undefine with_systemd
+%undefine with_netns
+%endif
+
+%if %{with systemd}
+%if 0%{?suse_version}
+%global netdata_initd_buildrequires \
+BuildRequires: systemd-rpm-macros \
+%{nil}
+%global netdata_initd_requires \
+%{?systemd_requires} \
+%{nil}
+%global netdata_init_post %service_add_post netdata.service \
+/sbin/service netdata restart > /dev/null 2>&1 \
+%{nil}
+%global netdata_init_preun %service_del_preun netdata.service \
+/sbin/service netdata stop > /dev/null 2>&1 \
+%{nil}
+%global netdata_init_postun %service_del_postun netdata.service
+%else
+%global netdata_initd_buildrequires \
+BuildRequires: systemd
+%global netdata_initd_requires \
+Requires(preun): systemd-units \
+Requires(postun): systemd-units \
+Requires(post): systemd-units \
+%{nil}
+%global netdata_init_post %systemd_post netdata.service \
+/usr/bin/systemctl enable netdata.service \
+/usr/bin/systemctl daemon-reload \
+/usr/bin/systemctl restart netdata.service \
+%{nil}
+%global netdata_init_preun %systemd_preun netdata.service
+%global netdata_init_postun %systemd_postun_with_restart netdata.service
+%endif
+%else
+%global netdata_initd_buildrequires %{nil}
+%global netdata_initd_requires \
+Requires(post): chkconfig \
+%{nil}
+%global netdata_init_post \
+/sbin/chkconfig --add netdata \
+/sbin/service netdata restart > /dev/null 2>&1 \
+%{nil}
+%global netdata_init_preun %{nil} \
+if [ $1 = 0 ]; then \
+ /sbin/service netdata stop > /dev/null 2>&1 \
+ /sbin/chkconfig --del netdata \
+fi \
+%{nil}
+%global netdata_init_postun %{nil} \
+if [ $1 != 0 ]; then \
+ /sbin/service netdata condrestart 2>&1 > /dev/null \
+fi \
+%{nil}
+%endif
+
+Summary: Real-time performance monitoring, done right!
+Name: netdata
+Version: v1.17.0
+Release: 1%{?dist}
+License: GPLv3+
+Group: Applications/System
+Source0: https://github.com/netdata/%{name}/releases/download/%{version}/%{name}-%{version}.tar.gz
+URL: http://my-netdata.io
+
+# #####################################################################
+# Core build/install/runtime dependencies
+# #####################################################################
+
+# Build dependencies
+#
+BuildRequires: gcc
+BuildRequires: gcc-c++
+BuildRequires: make
+BuildRequires: git
+BuildRequires: autoconf
+%if 0%{?fedora} || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1140
+BuildRequires: autoconf-archive
+BuildRequires: autogen
+%endif
+BuildRequires: automake
+BuildRequires: pkgconfig
+BuildRequires: curl
+BuildRequires: findutils
+BuildRequires: zlib-devel
+BuildRequires: libuuid-devel
+BuildRequires: libuv-devel >= 1
+BuildRequires: openssl-devel
+%if 0%{?suse_version}
+BuildRequires: judy-devel
+BuildRequires: liblz4-devel
+BuildRequires: netcat-openbsd
+BuildRequires: json-glib-devel
+%else
+BuildRequires: Judy-devel
+BuildRequires: lz4-devel
+BuildRequires: nc
+BuildRequires: json-c-devel
+%endif
+
+# Core build requirements for service install
+%{netdata_initd_buildrequires}
+
+# Runtime dependencies
+#
+Requires: python
+Requires: zlib
+%if 0%{?suse_version}
+# for libuv, Requires version >= 1
+Requires: libuv1
+Requires: libJudy1
+Requires: json-glib
+Requires: libuuid1
+%else
+# for libuv, Requires version >= 1
+Requires: libuv >= 1
+Requires: Judy
+Requires: json-c
+Requires: libuuid
+%endif
+Requires: openssl
+Requires: lz4
+
+# Core requirements for the install to succeed
+Requires(pre): /usr/sbin/groupadd
+Requires(pre): /usr/sbin/useradd
+%if 0%{?suse_version} >= 1140
+Requires(post): libcap1
+%else
+Requires(post): libcap
+%endif
+
+%{netdata_initd_requires}
+
+# #####################################################################
+# Functionality-dependent package dependencies
+# #####################################################################
+# Note: Some or all of the Packages may be found in the EPEL repo,
+# rather than the standard ones
+
+# nfacct plugin dependencies
+BuildRequires: libmnl-devel
+%if 0%{?fedora} || 0%{?suse_version} >= 1140
+BuildRequires: libnetfilter_acct-devel
+%endif
+
+%if 0%{?suse_version}
+Requires: libmnl0
+%else
+Requires: libmnl
+%endif
+
+%if 0%{?fedora}
+Requires: libnetfilter_acct
+%else
+%if 0%{?suse_version} >= 1140
+Requires: libnetfilter_acct1
+%endif
+%endif
+# end nfacct plugin dependencies
+
+# freeipmi plugin dependencies
+BuildRequires: freeipmi-devel
+Requires: freeipmi
+# end - freeipmi plugin dependencies
+
+# CUPS plugin dependencies
+BuildRequires: cups-devel
+Requires: cups
+# end - cups plugin dependencies
+
+# Prometheus remote write dependencies
+BuildRequires: snappy-devel
+BuildRequires: protobuf-devel
+%if 0%{?suse_version}
+BuildRequires: libprotobuf-c-devel
+%else
+BuildRequires: protobuf-c-devel
+%endif
+
+%if 0%{?suse_version}
+Requires: libsnappy1
+Requires: protobuf-c
+Requires: libprotobuf15
+%else
+Requires: snappy
+Requires: protobuf-c
+Requires: protobuf
+%endif
+# end - prometheus remote write dependencies
+
+# #####################################################################
+# End of dependency management configuration
+# #####################################################################
+
+%description
+ netdata is the fastest way to visualize metrics. It is a resource
+efficient, highly optimized system for collecting and visualizing any
+type of realtime timeseries data, from CPU usage, disk activity, SQL
+queries, API calls, web site visitors, etc.
+ netdata tries to visualize the truth of now, in its greatest detail,
+so that you can get insights of what is happening now and what just
+happened, on your systems and applications.
+
+%prep
+%setup -q -n %{name}-%{version}
+
+%build
+# Conf step
+autoreconf -ivf
+%configure \
+ --prefix="%{_prefix}" \
+ --sysconfdir="%{_sysconfdir}" \
+ --localstatedir="%{_localstatedir}" \
+ --libexecdir="%{_libexecdir}" \
+ --libdir="%{_libdir}" \
+ --with-zlib \
+ --with-math \
+ --with-user=netdata \
+
+# Build step
+%{__make} %{?_smp_mflags}
+
+%install
+
+# ###########################################################
+# Clear the directory, if already exists and install
+rm -rf "${RPM_BUILD_ROOT}"
+%{__make} %{?_smp_mflags} DESTDIR="${RPM_BUILD_ROOT}" install
+
+find "${RPM_BUILD_ROOT}" -name .keep -delete
+
+install -m 644 -p system/netdata.conf "${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}"
+
+# ###########################################################
+# logrotate settings
+install -m 755 -d "${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d"
+install -m 644 -p system/netdata.logrotate "${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}"
+
+# ###########################################################
+# Install freeipmi
+install -m 4750 -p freeipmi.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/freeipmi.plugin"
+
+# ###########################################################
+# Install apps.plugin
+install -m 4750 -p apps.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/apps.plugin"
+
+# ###########################################################
+# Install perf.plugin
+install -m 4750 -p perf.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/perf.plugin"
+
+# ###########################################################
+# Install registry directory
+install -m 755 -d "${RPM_BUILD_ROOT}%{_localstatedir}/lib/%{name}/registry"
+install -m 755 -d "${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/custom-plugins.d"
+install -m 755 -d "${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/go.d"
+install -m 755 -d "${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ssl"
+
+# ###########################################################
+# Install netdata service
+%if %{with systemd}
+install -m 755 -d "${RPM_BUILD_ROOT}%{_unitdir}"
+install -m 644 -p system/netdata.service "${RPM_BUILD_ROOT}%{_unitdir}/netdata.service"
+%else
+# install SYSV init stuff
+install -d "${RPM_BUILD_ROOT}/etc/rc.d/init.d"
+install -m 755 system/netdata-init-d \
+ "${RPM_BUILD_ROOT}/etc/rc.d/init.d/netdata"
+%endif
+
+# ############################################################
+# Package Go within netdata (TBD: Package it separately)
+safe_sha256sum() {
+ # Within the contexct of the installer, we only use -c option that is common between the two commands
+ # We will have to reconsider if we start non-common options
+ if command -v sha256sum >/dev/null 2>&1; then
+ sha256sum $@
+ elif command -v shasum >/dev/null 2>&1; then
+ shasum -a 256 $@
+ else
+ fatal "I could not find a suitable checksum binary to use"
+ fi
+}
+
+download_go() {
+ url="${1}"
+ dest="${2}"
+
+ if command -v curl >/dev/null 2>&1; then
+ curl -sSL --connect-timeout 10 --retry 3 "${url}" > "${dest}"
+ elif command -v wget >/dev/null 2>&1; then
+ wget -T 15 -O - "${url}" > "${dest}"
+ else
+ echo >&2
+ echo >&2 "Downloading go.d plugin from '${url}' failed because of missing mandatory packages."
+ echo >&2 "Either add packages or disable it by issuing '--disable-go' in the installer"
+ echo >&2
+ exit 1
+ fi
+}
+
+install_go() {
+ # When updating this value, ensure correct checksums in packaging/go.d.checksums
+ GO_PACKAGE_VERSION="$(cat packaging/go.d.version)"
+ ARCH_MAP=(
+ 'i386::386'
+ 'i686::386'
+ 'x86_64::amd64'
+ 'aarch64::arm64'
+ 'armv64::arm64'
+ 'armv6l::arm'
+ 'armv7l::arm'
+ 'armv5tel::arm'
+ )
+
+ if [ -z "${NETDATA_DISABLE_GO+x}" ]; then
+ echo >&2 "Install go.d.plugin"
+ ARCH=$(uname -m)
+ OS=$(uname -s | tr '[:upper:]' '[:lower:]')
+
+ for index in "${ARCH_MAP[@]}" ; do
+ KEY="${index%%::*}"
+ VALUE="${index##*::}"
+ if [ "$KEY" = "$ARCH" ]; then
+ ARCH="${VALUE}"
+ break
+ fi
+ done
+ tmp=$(mktemp -d /tmp/netdata-go-XXXXXX)
+ GO_PACKAGE_BASENAME="go.d.plugin-${GO_PACKAGE_VERSION}.${OS}-${ARCH}.tar.gz"
+ download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/${GO_PACKAGE_BASENAME}" "${tmp}/${GO_PACKAGE_BASENAME}"
+ download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/config.tar.gz" "${tmp}/config.tar.gz"
+
+ if [ ! -f "${tmp}/${GO_PACKAGE_BASENAME}" ] || [ ! -f "${tmp}/config.tar.gz" ] || [ ! -s "${tmp}/config.tar.gz" ] || [ ! -s "${tmp}/${GO_PACKAGE_BASENAME}" ]; then
+ echo >&2 "Either check the error or consider disabling it by issuing '--disable-go' in the installer"
+ echo >&2
+ return 1
+ fi
+
+ grep "${GO_PACKAGE_BASENAME}\$" "packaging/go.d.checksums" > "${tmp}/sha256sums.txt" 2>/dev/null
+ grep "config.tar.gz" "packaging/go.d.checksums" >> "${tmp}/sha256sums.txt" 2>/dev/null
+
+ # Checksum validation
+ if ! (cd "${tmp}" && safe_sha256sum -c "sha256sums.txt"); then
+
+ echo >&2 "go.d plugin checksum validation failure."
+ echo >&2 "Either check the error or consider disabling it by issuing '--disable-go' in the installer"
+ echo >&2
+
+ echo "go.d.plugin package files checksum validation failed."
+ exit 1
+ fi
+
+ # Install files
+ tar -xf "${tmp}/config.tar.gz" -C "${RPM_BUILD_ROOT}%{_libdir}/%{name}/conf.d/"
+ tar xf "${tmp}/${GO_PACKAGE_BASENAME}"
+ mv "${GO_PACKAGE_BASENAME/\.tar\.gz/}" "go.d.plugin"
+ rm -rf "${tmp}"
+ fi
+ return 0
+}
+install_go
+install -m 0750 -p go.d.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/go.d.plugin"
+
+%pre
+
+# User/Group creations, as needed
+getent group netdata >/dev/null || groupadd -r netdata
+getent group docker >/dev/null || groupadd -r docker
+getent passwd netdata >/dev/null || \
+ useradd -r -g netdata -G docker -s /sbin/nologin \
+ -d %{contentdir} -c "netdata" netdata
+
+%post
+%{netdata_init_post}
+
+%preun
+%{netdata_init_preun}
+
+%postun
+%{netdata_init_postun}
+
+%clean
+rm -rf "${RPM_BUILD_ROOT}"
+
+%files
+%doc README.md
+%defattr(-,root,netdata)
+
+%dir %{_sysconfdir}/%{name}
+%dir %{_libdir}/%{name}
+
+%config(noreplace) %{_sysconfdir}/%{name}/*.conf
+%config(noreplace) %{_sysconfdir}/logrotate.d/%{name}
+
+%{_libdir}/%{name}
+
+%defattr(0755,netdata,netdata,0755)
+%{_libexecdir}/%{name}
+%{_sbindir}/%{name}
+%{_sysconfdir}/%{name}/edit-config
+
+%defattr(4750,root,netdata,0750)
+
+%dir %{_libexecdir}/%{name}/python.d
+%dir %{_libexecdir}/%{name}/charts.d
+%dir %{_libexecdir}/%{name}/plugins.d
+%dir %{_libexecdir}/%{name}/node.d
+
+%caps(cap_dac_read_search,cap_sys_ptrace=ep) %attr(0550,root,netdata) %{_libexecdir}/%{name}/plugins.d/apps.plugin
+
+%if %{with netns}
+# cgroup-network detects the network interfaces of CGROUPs
+# it must be able to use setns() and run cgroup-network-helper.sh as root
+# the helper script reads /proc/PID/fdinfo/* files, runs virsh, etc.
+%caps(cap_setuid=ep) %attr(4550,root,netdata) %{_libexecdir}/%{name}/plugins.d/cgroup-network
+%attr(0550,root,root) %{_libexecdir}/%{name}/plugins.d/cgroup-network-helper.sh
+%endif
+
+# perf plugin
+%caps(cap_setuid=ep) %attr(4750,root,netdata) %{_libexecdir}/%{name}/plugins.d/perf.plugin
+
+
+# freeipmi files
+%caps(cap_setuid=ep) %attr(4550,root,netdata) %{_libexecdir}/%{name}/plugins.d/freeipmi.plugin
+%dir %{_datadir}/%{name}
+
+%defattr(0750,netdata,netdata,0755)
+
+%dir %{_sysconfdir}/%{name}/health.d
+%dir %{_sysconfdir}/%{name}/python.d
+%dir %{_sysconfdir}/%{name}/charts.d
+%dir %{_sysconfdir}/%{name}/custom-plugins.d
+%dir %{_sysconfdir}/%{name}/go.d
+%dir %{_sysconfdir}/%{name}/ssl
+%dir %{_sysconfdir}/%{name}/node.d
+%dir %{_sysconfdir}/%{name}/statsd.d
+%{_libdir}/%{name}/conf.d/
+
+%if %{with systemd}
+%{_unitdir}/netdata.service
+%else
+%{_sysconfdir}/rc.d/init.d/netdata
+%endif
+
+# Enforce 0644 for files and 0755 for directories
+# for the netdata web directory
+%defattr(0644,root,netdata,0755)
+%{_datadir}/%{name}/web
+
+# Enforce 0660 for files and 0770 for directories
+# for the netdata lib, cache and log dirs
+%defattr(0660,root,netdata,0770)
+%attr(0770,netdata,netdata) %dir %{_localstatedir}/cache/%{name}
+%attr(0755,netdata,root) %dir %{_localstatedir}/log/%{name}
+%attr(0770,netdata,netdata) %dir %{_localstatedir}/lib/%{name}
+%attr(0770,netdata,netdata) %dir %{_localstatedir}/lib/%{name}/registry
+
+
+%changelog
+* Fri Jun 28 2019 Pavlos Emm. Katsoulakis <paul@netdata.cloud> - 0.0.0-7
+- Raise the path overrides to the spec file level, not just the configure.
+- Adjust tighter permissions on some folders, based on what we did on our installer
+- Introduce go.d plugin download and install, to include it on the package (Temporarily, to become separate package on next iteration)
+* Tue Jun 25 2019 Pavlos Emm. Katsoulakis <paul@netdata.cloud> - 0.0.0-6
+- Adjust dependency list: Some packages are missing on some distros, adopt to build successfully
+* Mon Jun 24 2019 Pavlos Emm. Katsoulakis <paul@netdata.cloud> - 0.0.0-5
+Another pass on cleaning up pre/post installation steps
+- Sync permission and ownership on files and directories
+* Sun Jun 16 2019 Pavlos Emm. Katsoulakis <paul@netdata.cloud> - 0.0.0-4
+First draft refactor on package dependencies section
+- Remove freeipmi/nfacct plugin flags. We auto-detect all plugins by decision
+- Start refactor of package dependencies
+- Add missing dependencies, with respect to distro peculiarities
+- Adjust existing dependencies, so that distro-specific package names is applied
+* Wed Jan 02 2019 Pawel Krupa <pkrupa@redhat.com> - 0.0.0-3
+- Temporary set version statically
+- Fix changelog ordering
+- Comment-out node.d configuration directory
+* Wed Jan 02 2019 Pawel Krupa <pkrupa@redhat.com> - 0.0.0-2
+- Fix permissions for log files
+* Sun Nov 15 2015 Alon Bar-Lev <alonbl@redhat.com> - 0.0.0-1
+- Initial add.
+
diff --git a/netdata.spec.in b/netdata.spec.in
index d686906f..7563acc7 100644
--- a/netdata.spec.in
+++ b/netdata.spec.in
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-3.0-or-later
%global contentdir %{_datadir}/netdata
+
+#TODO: Temporary fix for the build-id error during go.d plugin set up
+%global _missing_build_ids_terminate_build 0
+
# This is temporary and should eventually be resolved. This bypasses
# the default rhel __os_install_post which throws a python compile
# error.
@@ -11,6 +15,7 @@
%define _sysconfdir /etc
%define _localstatedir /var
%define _libexecdir /usr/libexec
+%define _libdir /usr/lib
#
# Conditional build:
@@ -235,6 +240,7 @@ autoreconf -ivf
--sysconfdir="%{_sysconfdir}" \
--localstatedir="%{_localstatedir}" \
--libexecdir="%{_libexecdir}" \
+ --libdir="%{_libdir}" \
--with-zlib \
--with-math \
--with-user=netdata \
@@ -322,7 +328,7 @@ download_go() {
install_go() {
# When updating this value, ensure correct checksums in packaging/go.d.checksums
- GO_PACKAGE_VERSION="v0.7.0"
+ GO_PACKAGE_VERSION="$(cat packaging/go.d.version)"
ARCH_MAP=(
'i386::386'
'i686::386'
@@ -348,7 +354,7 @@ install_go() {
fi
done
tmp=$(mktemp -d /tmp/netdata-go-XXXXXX)
- GO_PACKAGE_BASENAME="go.d.plugin-${GO_PACKAGE_VERSION}.${OS}-${ARCH}"
+ GO_PACKAGE_BASENAME="go.d.plugin-${GO_PACKAGE_VERSION}.${OS}-${ARCH}.tar.gz"
download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/${GO_PACKAGE_BASENAME}" "${tmp}/${GO_PACKAGE_BASENAME}"
download_go "https://github.com/netdata/go.d.plugin/releases/download/${GO_PACKAGE_VERSION}/config.tar.gz" "${tmp}/config.tar.gz"
@@ -374,11 +380,14 @@ install_go() {
# Install files
tar -xf "${tmp}/config.tar.gz" -C "${RPM_BUILD_ROOT}%{_libdir}/%{name}/conf.d/"
- mv "${tmp}/$GO_PACKAGE_BASENAME" "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/go.d.plugin"
+ tar xf "${tmp}/${GO_PACKAGE_BASENAME}"
+ mv "${GO_PACKAGE_BASENAME/\.tar\.gz/}" "go.d.plugin"
+ rm -rf "${tmp}"
fi
return 0
}
install_go
+install -m 0750 -p go.d.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/go.d.plugin"
%pre
diff --git a/package.json b/package.json
index 2bd614ca..20888161 100644
--- a/package.json
+++ b/package.json
@@ -52,6 +52,6 @@
"scripts": {
"lint-md": "remark .",
"lint-md-path": "remark",
- "fix-md": "remark collectors --output"
+ "fix-md": "remark . --output"
}
}
diff --git a/packaging/DISTRIBUTIONS.md b/packaging/DISTRIBUTIONS.md
deleted file mode 100644
index d180f25f..00000000
--- a/packaging/DISTRIBUTIONS.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# Netdata distribution support matrix
-![](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/images/packaging-beta-tag.svg?sanitize=true)
-
-In the following table we've listed Netdata's official supported operating systems. We detail the distributions, flavors, and the level of support Netdata is currently capable to provide.
-
-The following table is a work in progress. We have concluded on the list of distributions
-that we currently supporting and we are working on documenting our current state so that our users
-have complete visibility over the range of support.
-
-Distribution | Family | Architecture | Code health | Installer support | Kickstart support | Binary packaging support | Integrity testing (CI) | Functionality testing (CI) | Community support
-:------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :--------------------
-14.04.6 LTS (Trusty Tahr) | Ubuntu | | | | | | | |
-16.04.6 LTS (Xenial Xerus) | Ubuntu | | | | | | | |
-18.04.2 LTS (Bionic Beaver) | Ubuntu | | | | | | | |
-19.04 (Disco Dingo) Latest | Ubuntu | | | | | | | |
-Debian 7 (Wheezy) | Debian | | | | | | | |
-Debian 8 (Jessie) | Debian | | | | | | | |
-Debian 9 (Stretch) | Debian | | | | | | | |
-Debian 10 (Buster) | Debian | | | | | | | |
-Versions 6.* | RHEL | | | | | | | |
-Versions 7.* | RHEL | | | | | | | |
-Versions 8.* | RHEL | | | | | | | |
-Fedora 28 | Fedora | | | | | | | |
-Fedora 29 | Fedora | | | | | | | |
-Fedora 30 | Fedora | | | | | | | |
-Fedora 31 | Fedora | | | | | | | |
-CentOS 6.* | Cent OS | | | | | | | | |
-CentOS 7.* | Cent OS | | | | | | | | |
-CentOS 8.* | Cent OS | | | | | | | | |
-Open SuSE Leap 15.0 | Open SuSE | | | | | | | |
-Open SuSE Leap 15.1 | Open SuSE | | | | | | | |
-Open SuSE Tumbleweed (latest) | Open SuSE | | | | | | | |
-SuSE Enterprise Linux Server 11 | SLES | | | | | | | |
-SuSE Enterprise Linux Server 12 | SLES | | | | | | | |
-SuSE Enterprise Linux Server 15 | SLES | | | | | | | |
-Arch Linux (latest) | Arch | | | | | | | |
-All other linux | Other | | | | | | | |
diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile
deleted file mode 100644
index 4be2d93b..00000000
--- a/packaging/docker/Dockerfile
+++ /dev/null
@@ -1,92 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-# author : paulfantom
-
-# Cross-arch building is achieved by specifying ARCH as a build parameter with `--build-arg` option.
-# It is automated in `build.sh` script
-ARG ARCH=amd64
-# This image contains preinstalled dependecies
-FROM netdata/builder:${ARCH} as builder
-
-ENV JUDY_VER 1.0.5
-
-# Copy source
-COPY . /opt/netdata.git
-WORKDIR /opt/netdata.git
-
-# Install from source
-RUN chmod +x netdata-installer.sh && ./netdata-installer.sh --dont-wait --dont-start-it
-
-# files to one directory
-RUN mkdir -p /app/usr/sbin/ \
- /app/usr/share \
- /app/usr/libexec \
- /app/usr/lib \
- /app/var/cache \
- /app/var/lib \
- /app/etc && \
- mv /usr/share/netdata /app/usr/share/ && \
- mv /usr/libexec/netdata /app/usr/libexec/ && \
- mv /usr/lib/netdata /app/usr/lib/ && \
- mv /var/cache/netdata /app/var/cache/ && \
- mv /var/lib/netdata /app/var/lib/ && \
- mv /etc/netdata /app/etc/ && \
- mv /usr/sbin/netdata /app/usr/sbin/ && \
- mv /judy-${JUDY_VER} /app/judy-${JUDY_VER} && \
- mv packaging/docker/run.sh /app/usr/sbin/ && \
- chmod +x /app/usr/sbin/run.sh
-
-#####################################################################
-ARG ARCH
-# This image contains preinstalled dependecies
-FROM netdata/base:${ARCH}
-
-# Conditional subscribiton to Polyverse's Polymorphic Linux repositories
-RUN if [ "$(uname -m)" == "x86_64" ]; then \
- apk update && apk upgrade; \
- curl https://sh.polyverse.io | sh -s install gcxce5byVQbtRz0iwfGkozZwy support+netdata@polyverse.io; \
- if [ $? -eq 0 ]; then \
- apk update && \
- apk upgrade --available --no-cache && \
- sed -in 's/^#//g' /etc/apk/repositories; \
- fi \
- fi
-
-# Copy files over
-RUN mkdir -p /opt/src
-COPY --from=builder /app /
-
-# Configure system
-ARG NETDATA_UID=201
-ARG NETDATA_GID=201
-ENV DOCKER_GRP netdata
-ENV DOCKER_USR netdata
-RUN \
- # provide judy installation to base image
- apk add make alpine-sdk shadow && \
- cd /judy-${JUDY_VER} && make install && cd / && \
- # Clean the source stuff once judy is installed
- rm -rf /judy-${JUDY_VER} && apk del make alpine-sdk && \
- # fping from alpine apk is on a different location. Moving it.
- mv /usr/sbin/fping /usr/local/bin/fping && \
- chmod 4755 /usr/local/bin/fping && \
- mkdir -p /var/log/netdata && \
- # Add netdata user
- addgroup -g ${NETDATA_GID} -S "${DOCKER_GRP}" && \
- adduser -S -H -s /usr/sbin/nologin -u ${NETDATA_GID} -h /etc/netdata -G "${DOCKER_GRP}" "${DOCKER_USR}" && \
- # Apply the permissions as described in
- # https://github.com/netdata/netdata/wiki/netdata-security#netdata-directories
- chown -R root:netdata /etc/netdata && \
- chown -R netdata:netdata /var/cache/netdata /var/lib/netdata /usr/share/netdata && \
- chown -R root:netdata /usr/lib/netdata && \
- chown -R root:netdata /usr/libexec/netdata/ && \
- chmod 4750 /usr/libexec/netdata/plugins.d/cgroup-network /usr/libexec/netdata/plugins.d/apps.plugin && \
- chmod 0750 /var/lib/netdata /var/cache/netdata && \
- # Link log files to stdout
- ln -sf /dev/stdout /var/log/netdata/access.log && \
- ln -sf /dev/stdout /var/log/netdata/debug.log && \
- ln -sf /dev/stderr /var/log/netdata/error.log
-
-ENV NETDATA_PORT 19999
-EXPOSE $NETDATA_PORT
-
-ENTRYPOINT ["/usr/sbin/run.sh"]
diff --git a/packaging/docker/README.md b/packaging/docker/README.md
deleted file mode 100644
index 4e21918e..00000000
--- a/packaging/docker/README.md
+++ /dev/null
@@ -1,229 +0,0 @@
-# Install netdata with Docker
-
-> :warning: As of Sep 9th, 2018 we ship [new docker builds](https://github.com/netdata/netdata/pull/3995), running netdata in docker with an [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint) directive, not a COMMAND directive. Please adapt your execution scripts accordingly. You can find more information about ENTRYPOINT vs COMMAND is presented by goinbigdata [here](http://goinbigdata.com/docker-run-vs-cmd-vs-entrypoint/) and by docker docs [here](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
->
-> Also, the `latest` is now based on alpine, so **`alpine` is not updated any more** and `armv7hf` is now replaced with `armhf` (to comply with https://github.com/multiarch naming), so **`armv7hf` is not updated** either.
-
-## Limitations
-
-Running netdata in a container for monitoring the whole host, can limit its capabilities. Some data is not accessible or not as detailed as when running netdata on the host.
-
-## Package scrambling in runtime (x86_64 only)
-
-By default on x86_64 architecture our docker images use Polymorphic Polyverse Linux package scrambling. For increased security you can enable rescrambling of packages during runtime. To do this set environment variable `RESCRAMBLE=true` while starting netdata docker container.
-
-For more information go to [Polyverse site](https://polyverse.io/how-it-works/)
-
-## Run netdata with docker command
-
-Quickly start netdata with the docker command line.
-Netdata is then available at http://host:19999
-
-This is good for an internal network or to quickly analyse a host.
-
-```bash
-docker run -d --name=netdata \
- -p 19999:19999 \
- -v /etc/passwd:/host/etc/passwd:ro \
- -v /etc/group:/host/etc/group:ro \
- -v /proc:/host/proc:ro \
- -v /sys:/host/sys:ro \
- --cap-add SYS_PTRACE \
- --security-opt apparmor=unconfined \
- netdata/netdata
-```
-
-The above can be converted to docker-compose file for ease of management:
-
-```yaml
-version: '3'
-services:
- netdata:
- image: netdata/netdata
- hostname: example.com # set to fqdn of host
- ports:
- - 19999:19999
- cap_add:
- - SYS_PTRACE
- security_opt:
- - apparmor:unconfined
- volumes:
- - /etc/passwd:/host/etc/passwd:ro
- - /etc/group:/host/etc/group:ro
- - /proc:/host/proc:ro
- - /sys:/host/sys:ro
-```
-
-If you don't want to use the apps.plugin functionality, you can remove the mounts of `/etc/passwd` and `/etc/group` (they are used to get proper user and group names for the monitored host) to get slightly better security.
-
-### Docker container names resolution
-
-There are a few options for resolving container names within netdata. Some methods of doing so will allow root access to your machine from within the container. Please read the following carefully.
-
-#### Docker Socket Proxy (Safest Option)
-
-Deploy a Docker socket proxy that accepts and filter out requests using something like [HAProxy](https://docs.netdata.cloud/docs/running-behind-haproxy/) so that it restricts connections to read-only access to the CONTAINERS endpoint.
-
-The reason it's safer to expose the socket to the proxy is because netdata has a TCP port exposed outside the Docker network. Access to the proxy container is limited to only within the network.
-
-#### Giving group access to Docker Socket (Less safe)
-
-**Important Note**: You should seriously consider the necessity of activating this option,
-as it grants to the netdata user access to the privileged socket connection of docker service and therefore your whole machine.
-
-If you want to have your container names resolved by Netdata, make the `netdata` user be part of the group that owns the socket.
-
-To achieve that just add environment variable `PGID=[GROUP NUMBER]` to the Netdata container,
-where `[GROUP NUMBER]` is practically the group id of the group assigned to the docker socket, on your host.
-
-This group number can be found by running the following (if socket group ownership is docker):
-
-```bash
-grep docker /etc/group | cut -d ':' -f 3
-```
-
-#### Running as root (Unsafe)
-
-**Important Note**: You should seriously consider the necessity of activating this option,
-as it grants to the netdata user access to the privileged socket connection of docker service and therefore your whole machine.
-
-```yaml
-version: '3'
-services:
- netdata:
- image: netdata/netdata
- # ... rest of your config ...
- volumes:
- # ... other volumes ...
- - /var/run/docker.sock:/var/run/docker.sock:ro
- environment:
- - DOCKER_USR=root
-```
-
-### Pass command line options to Netdata
-
-Since we use an [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint) directive, you can provide [netdata daemon command line options](https://docs.netdata.cloud/daemon/#command-line-options) such as the IP address netdata will be running on, using the [command instruction](https://docs.docker.com/engine/reference/builder/#cmd).
-
-## Install Netdata using Docker Compose with SSL/TLS enabled http proxy
-
-For a permanent installation on a public server, you should [secure the netdata instance](../../docs/netdata-security.md). This section contains an example of how to install netdata with an SSL reverse proxy and basic authentication.
-
-You can use use the following docker-compose.yml and Caddyfile files to run netdata with docker. Replace the Domains and email address for [Letsencrypt](https://letsencrypt.org/) before starting.
-
-### Prerequisites
-* [Docker](https://docs.docker.com/install/#server)
-* [Docker Compose](https://docs.docker.com/compose/install/)
-* Domain configured in DNS pointing to host.
-
-### Caddyfile
-
-This file needs to be placed in /opt with name `Caddyfile`. Here you customize your domain and you need to provide your email address to obtain a Letsencrypt certificate. Certificate renewal will happen automatically and will be executed internally by the caddy server.
-
-```
-netdata.example.org {
- proxy / netdata:19999
- tls admin@example.org
-}
-```
-
-### docker-compose.yml
-
-After setting Caddyfile run this with `docker-compose up -d` to have fully functioning netdata setup behind HTTP reverse proxy.
-
-```yaml
-version: '3'
-volumes:
- caddy:
-
-services:
- caddy:
- image: abiosoft/caddy
- ports:
- - 80:80
- - 443:443
- volumes:
- - /opt/Caddyfile:/etc/Caddyfile
- - caddy:/root/.caddy
- environment:
- ACME_AGREE: 'true'
- netdata:
- restart: always
- hostname: netdata.example.org
- image: netdata/netdata
- cap_add:
- - SYS_PTRACE
- security_opt:
- - apparmor:unconfined
- volumes:
- - /etc/passwd:/host/etc/passwd:ro
- - /etc/group:/host/etc/group:ro
- - /proc:/host/proc:ro
- - /sys:/host/sys:ro
- - /var/run/docker.sock:/var/run/docker.sock:ro
-```
-
-### Restrict access with basic auth
-
-You can restrict access by following [official caddy guide](https://caddyserver.com/docs/basicauth) and adding lines to Caddyfile.
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Fdocker%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
-
-## Publish a test image to your own repository
-
-At netdata we provide multiple ways of testing your docker images using your own repositories.
-You may either use the command line tools available or take advantage of our Travis CI infrastructure.
-
-### Using tools manually from the command line
-
-The script `packaging/docker/build-test.sh` can be used to create an image and upload it to a repository of your choosing.
-
-```
-Usage: packaging/docker/build-test.sh -r <REPOSITORY> -v <VERSION> -u <DOCKER_USERNAME> -p <DOCKER_PWD> [-s]
- -s skip build, just push the image
-Builds an amd64 image and pushes it to the docker hub repository REPOSITORY
-```
-
-This is especially useful when testing a Pull Request for Kubernetes, since you can set `image` to an immutable repository and tag, set the `imagePullPolicy` to `Always` and just keep uploading new images.
-
-Example:
-
-We get a local copy of the Helm chart at https://github.com/netdata/helmchart. We modify `values.yaml` to have the following:
-
-```
-image:
- repository: cakrit/netdata-prs
- tag: PR5576
- pullPolicy: Always
-```
-
-We check out PR5576 and run the following:
-```
-./packaging/docker/build-test.sh -r cakrit/netdata-prs -v PR5576 -u cakrit -p 'XXX'
-```
-
-Then we can run `helm install [path to our helmchart clone]`.
-
-If we make changes to the code, we execute the same `build-test.sh` command, followed by `helm upgrade [name] [path to our helmchart clone]`
-
-### Inside netdata organization, using Travis CI
-
-To enable Travis CI integration on your own repositories (Docker and Github), you need to be part of the Netdata organization.
-Once you have contacted the netdata owners to setup you up on Github and Travis, execute the following steps
-
-- Preparation
- - Have netdata forked on your personal GITHUB account
- - Get a GITHUB token: Go to Github settings -> Developer Settings -> Personal access tokens, generate a new token with full access to repo_hook, read only access to admin:org, public_repo, repo_deployment, repo:status and user:email settings enabled. This will be your GITHUB_TOKEN that is described later in the instructions, so keep it somewhere safe until is needed.
- - Contact netdata team and seek for permissions on https://scan.coverity.com should you require Travis to be able to push your forked code to coverity for analysis and report. Once you are setup, you should have your email you used in coverity and a token from them. These will be your COVERITY_SCAN_SUBMIT_EMAIL and COVERITY_SCAN_TOKEN that we will refer to later.
- - Have a valid Docker hub account, the credentials from this account will be your DOCKER_USERNAME and DOCKER_PWD mentioned later
-
-- Setting up Travis CI for your own fork (Detailed instructions provided by Travis team [here](https://docs.travis-ci.com/user/tutorial/))
- - Login to travis with your own GITHUB credentials (There is Open Auth access)
- - Go to your profile settings, under [repositories](https://travis-ci.com/account/repositories) section and setup your netdata fork to be built by travis
- - Once the repository has been setup, go to repository settings within travis (usually under https://travis-ci.com/NETDATA_DEVELOPER/netdata/settings, where "NETDATA_DEVELOPER" is your github handle) and select your desired settings.
-- While in Travis settings, under netdata repository settings in the Environment Variables section, you need to add the following:
- - DOCKER_USERNAME and DOCKER_PWD variables so that Travis can login to your docker hub account and publish docker images there.
- - REPOSITORY variable to "NETDATA_DEVELOPER/netdata" where NETDATA_DEVELOPER is your github handle again.
- - GITHUB_TOKEN variable with the token generated on the preparation step, for travis workflows to function properly
- - COVERITY_SCAN_SUBMIT_EMAIL and COVERITY_SCAN_TOKEN variables to enable Travis to submit your code for analysis to Coverity.
-
-Having followed these instructions, your forked repository should be all set up for Travis Integration, happy testing!
diff --git a/packaging/docker/build-test.sh b/packaging/docker/build-test.sh
deleted file mode 100755
index 3c55e173..00000000
--- a/packaging/docker/build-test.sh
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/bin/bash
-# Docker build wrapper, for testing manually the docker build process
-# TODO: This script should consume build.sh after setting up required parameters
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Chris Akritidis (chris@netdata.cloud)
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-
-printhelp() {
- echo "Usage: packaging/docker/build-test.sh -r <REPOSITORY> -v <VERSION> -u <DOCKER_USERNAME> -p <DOCKER_PWD> [-s]
- -s skip build, just push the image
-Builds an amd64 image and pushes it to the docker hub repository REPOSITORY"
-}
-
-set -e
-
-if [ ! -f .gitignore ]; then
- echo "Run as ./packaging/docker/$(basename "$0") from top level directory of git repository"
- exit 1
-fi
-
-DOBUILD=1
-while getopts :r:v:u:p:s option
-do
- case "$option" in
- r)
- REPOSITORY=$OPTARG
- ;;
- v)
- VERSION=$OPTARG
- ;;
- u)
- DOCKER_USERNAME=$OPTARG
- ;;
- p)
- DOCKER_PWD=$OPTARG
- ;;
- s)
- DOBUILD=0
- ;;
- *)
- printhelp
- exit 1
- ;;
- esac
-done
-
-if [ -n "${REPOSITORY}" ]; then
- if [ $DOBUILD -eq 1 ] ; then
- echo "Building ${VERSION:-latest} of ${REPOSITORY} container"
- docker run --rm --privileged multiarch/qemu-user-static:register --reset
-
- # Build images using multi-arch Dockerfile.
- eval docker build --build-arg ARCH="amd64" --tag "${REPOSITORY}:${VERSION:-latest}" --file packaging/docker/Dockerfile ./
-
- # Create temporary docker CLI config with experimental features enabled (manifests v2 need it)
- mkdir -p /tmp/docker
- #echo '{"experimental":"enabled"}' > /tmp/docker/config.json
- fi
-
- if [ -n "${DOCKER_USERNAME}" ] && [ -n "${DOCKER_PWD}" ] ; then
- # Login to docker hub to allow futher operations
- echo "Logging into docker"
- echo "$DOCKER_PWD" | docker --config /tmp/docker login -u "$DOCKER_USERNAME" --password-stdin
-
- echo "Pushing ${REPOSITORY}:${VERSION}"
- docker --config /tmp/docker push "${REPOSITORY}:${VERSION}"
- fi
-else
- echo "Missing parameter. REPOSITORY=${REPOSITORY}"
- printhelp
- exit 1
-fi
diff --git a/packaging/docker/build.sh b/packaging/docker/build.sh
deleted file mode 100755
index f15c7dad..00000000
--- a/packaging/docker/build.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pawel Krupa (paulfantom)
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-
-set -e
-
-if [ "${BASH_VERSINFO[0]}" -lt "4" ]; then
- echo "This mechanism currently can only run on BASH version 4 and above"
- exit 1
-fi
-
-VERSION="$1"
-declare -A ARCH_MAP
-ARCH_MAP=(["i386"]="386" ["amd64"]="amd64" ["armhf"]="arm" ["aarch64"]="arm64")
-DEVEL_ARCHS=(amd64)
-ARCHS="${!ARCH_MAP[@]}"
-
-if [ -z ${REPOSITORY} ]; then
- REPOSITORY="${TRAVIS_REPO_SLUG}"
- if [ -z ${REPOSITORY} ]; then
- echo "REPOSITORY not set, build cannot proceed"
- exit 1
- else
- echo "REPOSITORY was not detected, attempted to use TRAVIS_REPO_SLUG setting: ${TRAVIS_REPO_SLUG}"
- fi
-fi
-
-# When development mode is set, build on DEVEL_ARCHS
-if [ ! -z ${DEVEL+x} ]; then
- declare -a ARCHS=(${DEVEL_ARCHS[@]})
-fi
-
-# Ensure there is a version, the most appropriate one
-if [ "${VERSION}" == "" ]; then
- VERSION=$(git tag --points-at)
- if [ "${VERSION}" == "" ]; then
- VERSION="latest"
- fi
-fi
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup)
-if [ ! -z $CWD ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as ./packaging/docker/$(basename "$0") from top level directory of netdata git repository"
- echo "Docker build process aborted"
- exit 1
-fi
-
-echo "Docker image build in progress.."
-echo "Version : ${VERSION}"
-echo "Repository : ${REPOSITORY}"
-echo "Architectures : ${ARCHS[*]}"
-
-docker run --rm --privileged multiarch/qemu-user-static:register --reset
-
-# Build images using multi-arch Dockerfile.
-for ARCH in ${ARCHS[@]}; do
- TAG="${REPOSITORY}:${VERSION}-${ARCH}"
- echo "Building tag ${TAG}.."
- eval docker build --no-cache \
- --build-arg ARCH="${ARCH}" \
- --tag "${TAG}" \
- --file packaging/docker/Dockerfile ./
- echo "..Done!"
-done
-
-echo "Docker build process completed!"
diff --git a/packaging/docker/check_login.sh b/packaging/docker/check_login.sh
deleted file mode 100755
index 7cc8d4e5..00000000
--- a/packaging/docker/check_login.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-#
-# This is a credential checker script, to help get early input on docker credentials status
-# If these are wrong, then build/publish has no point running
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-
-set -e
-
-if [ "${BASH_VERSINFO[0]}" -lt "4" ]; then
- echo "This mechanism currently can only run on BASH version 4 and above"
- exit 1
-fi
-
-DOCKER_CMD="docker "
-
-# There is no reason to continue if we cannot log in to docker hub
-if [ -z ${DOCKER_USERNAME+x} ] || [ -z ${DOCKER_PWD+x} ]; then
- echo "No docker hub username or password found, aborting without publishing"
- exit 1
-fi
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup)
-if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as ./packaging/docker/$(basename "$0") from top level directory of netdata git repository"
- echo "Docker build process aborted"
- exit 1
-fi
-
-# Login to docker hub to allow futher operations
-echo "Attempting to login to docker"
-echo "$DOCKER_PWD" | $DOCKER_CMD login -u "$DOCKER_USERNAME" --password-stdin
-
-echo "Docker login successful!"
-$DOCKER_CMD logout
-
-echo "Docker login validation completed"
diff --git a/packaging/docker/publish.sh b/packaging/docker/publish.sh
deleted file mode 100755
index 5a9e67ed..00000000
--- a/packaging/docker/publish.sh
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/env bash
-#
-# Cross-arch docker publish helper script
-# Needs docker in version >18.02 due to usage of manifests
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-
-set -e
-
-if [ "${BASH_VERSINFO[0]}" -lt "4" ]; then
- echo "This mechanism currently can only run on BASH version 4 and above"
- exit 1
-fi
-
-WORKDIR="$(mktemp -d)" # Temporary folder, removed after script is done
-VERSION="$1"
-declare -A ARCH_MAP
-ARCH_MAP=(["i386"]="386" ["amd64"]="amd64" ["armhf"]="arm" ["aarch64"]="arm64")
-DEVEL_ARCHS=(amd64)
-ARCHS="${!ARCH_MAP[@]}"
-DOCKER_CMD="docker --config ${WORKDIR}"
-GIT_MAIL=${GIT_MAIL:-"bot@netdata.cloud"}
-GIT_USER=${GIT_USER:-"netdatabot"}
-
-if [ -z ${REPOSITORY} ]; then
- REPOSITORY="${TRAVIS_REPO_SLUG}"
- if [ -z ${REPOSITORY} ]; then
- echo "REPOSITORY not set, publish cannot proceed"
- exit 1
- else
- echo "REPOSITORY was not detected, attempted to use TRAVIS_REPO_SLUG setting: ${TRAVIS_REPO_SLUG}"
- fi
-fi
-
-# When development mode is set, build on DEVEL_ARCHS
-if [ ! -z ${DEVEL+x} ]; then
- declare -a ARCHS=(${DEVEL_ARCHS[@]})
-fi
-
-# Ensure there is a version, the most appropriate one
-if [ "${VERSION}" == "" ]; then
- VERSION=$(git tag --points-at)
- if [ "${VERSION}" == "" ]; then
- VERSION="latest"
- fi
-fi
-MANIFEST_LIST="${REPOSITORY}:${VERSION}"
-
-# There is no reason to continue if we cannot log in to docker hub
-if [ -z ${DOCKER_USERNAME+x} ] || [ -z ${DOCKER_PWD+x} ]; then
- echo "No docker hub username or password found, aborting without publishing"
- exit 1
-fi
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup)
-if [ ! -z $CWD ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as ./packaging/docker/$(basename "$0") from top level directory of netdata git repository"
- echo "Docker build process aborted"
- exit 1
-fi
-
-echo "Docker image publishing in progress.."
-echo "Version : ${VERSION}"
-echo "Repository : ${REPOSITORY}"
-echo "Architectures : ${ARCHS[*]}"
-echo "Manifest list : ${MANIFEST_LIST}"
-
-# Create temporary docker CLI config with experimental features enabled (manifests v2 need it)
-echo '{"experimental":"enabled"}' > "${WORKDIR}"/config.json
-
-# Login to docker hub to allow futher operations
-echo "$DOCKER_PWD" | $DOCKER_CMD login -u "$DOCKER_USERNAME" --password-stdin
-
-# Push images to registry
-for ARCH in ${ARCHS[@]}; do
- TAG="${MANIFEST_LIST}-${ARCH}"
- echo "Publishing image ${TAG}.."
- $DOCKER_CMD push "${TAG}" &
- echo "Image ${TAG} published succesfully!"
-done
-
-echo "Waiting for images publishing to complete"
-wait
-
-# Recreate docker manifest list
-echo "Creating manifest list.."
-$DOCKER_CMD manifest create --amend "${MANIFEST_LIST}" \
- "${MANIFEST_LIST}-i386" \
- "${MANIFEST_LIST}-armhf" \
- "${MANIFEST_LIST}-aarch64" \
- "${MANIFEST_LIST}-amd64"
-
-# Annotate manifest with CPU architecture information
-
-echo "Executing manifest annotate.."
-for ARCH in ${ARCHS[@]}; do
- TAG="${MANIFEST_LIST}-${ARCH}"
- echo "Annotating manifest for $ARCH, with TAG: ${TAG} (Manifest list: ${MANIFEST_LIST})"
- $DOCKER_CMD manifest annotate "${MANIFEST_LIST}" "${TAG}" --os linux --arch "${ARCH_MAP[$ARCH]}"
-done
-
-# Push manifest to docker hub
-echo "Pushing manifest list to docker.."
-$DOCKER_CMD manifest push -p "${MANIFEST_LIST}"
-
-# Show current manifest (debugging purpose only)
-echo "Evaluating manifest list entry"
-$DOCKER_CMD manifest inspect "${MANIFEST_LIST}"
-
-# Cleanup
-rm -r "${WORKDIR}"
-
-echo "Docker publishing process completed!"
diff --git a/packaging/docker/run.sh b/packaging/docker/run.sh
deleted file mode 100755
index f4377d45..00000000
--- a/packaging/docker/run.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-#
-# Entry point script for netdata
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
-set -e
-
-echo "Netdata entrypoint script starting"
-if [ ${RESCRAMBLE+x} ]; then
- echo "Reinstalling all packages to get the latest Polymorphic Linux scramble"
- apk upgrade --update-cache --available
-fi
-
-if [ -n "${PGID}" ]; then
- echo "Creating docker group ${PGID}"
- addgroup -g "${PGID}" "docker" || echo >&2 "Could not add group docker with ID ${PGID}, its already there probably"
- echo "Assign netdata user to docker group ${PGID}"
- usermod -a -G ${PGID} ${DOCKER_USR} || echo >&2 "Could not add netdata user to group docker with ID ${PGID}"
-fi
-
-exec /usr/sbin/netdata -u "${DOCKER_USR}" -D -s /host -p "${NETDATA_PORT}" "$@"
-
-echo "Netdata entrypoint script, completed!"
diff --git a/packaging/go.d.checksums b/packaging/go.d.checksums
index 700bad0a..964a09b7 100644
--- a/packaging/go.d.checksums
+++ b/packaging/go.d.checksums
@@ -1,16 +1,16 @@
-133e138307a52a1c3af5abeec4d368c7bcb27f3398f0f380cfacc23db57b9911 *config.tar.gz
-7795ff9058852e9e03ceecd432e5c462ef141b3dd2e1f8e7c3cb13a6c4b685ce *go.d.plugin-v0.7.0.darwin-386
-a8db5312e803376bd96ab3c4cfd6f2d8288795fde97a2aefca7916cd8743f2a4 *go.d.plugin-v0.7.0.darwin-amd64
-a130a6aa7a98d37b648d41f8c3f0b939bfb8f343d1a3a6c8267a7fe604aae96f *go.d.plugin-v0.7.0.freebsd-386
-078c8a9607aea92ee8346cb2567a73b2a2ac317ea72c6975de07b47fdba2de80 *go.d.plugin-v0.7.0.freebsd-amd64
-bf7bff1f6fa32055242b627534ec5936fa1b8eb2f42edc736bbe041bee11129e *go.d.plugin-v0.7.0.freebsd-arm
-2e15dc67736b29cf736ad7a05271f462467f84e80073fd1a7084dd5e2ac83115 *go.d.plugin-v0.7.0.linux-386
-3b0b5b0faa319201ecac554cb300789546b7f51847d202ff913e29339acca48b *go.d.plugin-v0.7.0.linux-amd64
-1be3860bea67e2ac789a37bf4dae24f8925f93bebe72a57cc2218c9e9a702f19 *go.d.plugin-v0.7.0.linux-arm
-cba7cbfeda2e5146c8229d455aaf61f29f196d24291a509f4bf36ae12a2729e7 *go.d.plugin-v0.7.0.linux-arm64
-5f263cd5a032149618483a50486ce69c6e1a32b7e568c498d42b4d94691167f5 *go.d.plugin-v0.7.0.linux-mips
-9558e7aa633331afea78c682a15fc9e6cf10ed39fb4c26f03034a7b0cbdfcc1a *go.d.plugin-v0.7.0.linux-mips64
-0f93f4cac9b21cdb28ef88b9f1ba42afcc1e913c0227deb266440c205ff9a224 *go.d.plugin-v0.7.0.linux-mips64le
-51c0763f07de48e9f9dd9625a647aacecdd4a1bd39f13298b4f7c123436f4327 *go.d.plugin-v0.7.0.linux-mipsle
-7e7e53fff1852c9756d6117d35d1f061a8bd97135b231b010ad1461e789b1f66 *go.d.plugin-v0.7.0.linux-ppc64
-6d4203f9c4d5778add09ef2679dc025a72914b68dce5fb816e7cc38f4f36945f *go.d.plugin-v0.7.0.linux-ppc64le
+bd5e9f2e2677413eeb1390c750fcafa83875dbe6890de4d501db98d06640a0fe *config.tar.gz
+941e6319798a5eea2ddabb3c9b40e06830e2aee5758708c182c4072de720be4e *go.d.plugin-v0.8.0.darwin-386.tar.gz
+96b0d2c39854c3d4a3636b68a98f7d8d1b05754599499c456c2af0a6c477cea8 *go.d.plugin-v0.8.0.darwin-amd64.tar.gz
+351051a43e5d4059ffb8f91e95a64310f3342420bd058bf120ae84c900ef75c9 *go.d.plugin-v0.8.0.freebsd-386.tar.gz
+5b30abb8373956e3330202259cef7b0058a284f21367e573542901b8d1572a36 *go.d.plugin-v0.8.0.freebsd-amd64.tar.gz
+ba90aeae716f66bb6cd4306134e8151de749fd2259b9fc99d57e4b41fc36c6ef *go.d.plugin-v0.8.0.freebsd-arm.tar.gz
+9fcf0a2e4592eda0b6e7c8e4045ba2995239e41d910ef1ad9e48621e1ac2c98a *go.d.plugin-v0.8.0.linux-386.tar.gz
+f92efd1895fb54289c651955ad45627ad3c8a6c725915ac84c0223726a7fe153 *go.d.plugin-v0.8.0.linux-amd64.tar.gz
+60bb3636db91ba7b188d73fc7eeb97bbfab7a77b3eb8a91c456a173ef5982729 *go.d.plugin-v0.8.0.linux-arm.tar.gz
+062049e378349c98a2f629dc33bb44117e2f7dc60aa47a9d83660193850990bd *go.d.plugin-v0.8.0.linux-arm64.tar.gz
+ef57c14d3ca9ec7dca1b60d4d3bfb5cf04399139b1073f9713bb0536fadf9276 *go.d.plugin-v0.8.0.linux-mips.tar.gz
+735c4b3cfff31f1ffdf60ff35cc617914641e22406e70c92a884270f77c9001e *go.d.plugin-v0.8.0.linux-mips64.tar.gz
+3cafad1d94f7001d0c9aff9d60a384fb477210af2967cc7145572b635e180a7a *go.d.plugin-v0.8.0.linux-mips64le.tar.gz
+f1099a482bedcb92e3ac8857a06adbc55d9e612838cbc79d3c04b616af1d1ca3 *go.d.plugin-v0.8.0.linux-mipsle.tar.gz
+978f31c64215193b228c3ee9fc7c3fdf324ca4b543207fbe8e459730ae6409b2 *go.d.plugin-v0.8.0.linux-ppc64.tar.gz
+b5566fc280e4b9e7b07c3ae1f1296f232530c29eb2635398b7124c2caf14e53a *go.d.plugin-v0.8.0.linux-ppc64le.tar.gz
diff --git a/packaging/go.d.version b/packaging/go.d.version
new file mode 100644
index 00000000..b19b5211
--- /dev/null
+++ b/packaging/go.d.version
@@ -0,0 +1 @@
+v0.8.0
diff --git a/packaging/installer/README.md b/packaging/installer/README.md
index 67a7a912..b860b00f 100644
--- a/packaging/installer/README.md
+++ b/packaging/installer/README.md
@@ -7,15 +7,15 @@ The best way to install Netdata is directly from source. Our **automatic install
!!! warning
You can find Netdata packages distributed by third parties. In many cases, these packages are either too old or broken. So, the suggested ways to install Netdata are the ones in this page.
-1. [Automatic one line installation](#one-line-installation), easy installation from source, **this is the default**
-2. [Install pre-built static binary on any 64bit Linux](#linux-64bit-pre-built-static-binary)
-3. [Run Netdata in a docker container](#run-netdata-in-a-docker-container)
-4. [Manual installation, step by step](#install-netdata-on-linux-manually)
-5. [Install on FreeBSD](#freebsd)
-6. [Install on pfSense](#pfsense)
-7. [Enable on FreeNAS Corral](#freenas)
-8. [Install on macOS (OS X)](#macos)
-9. [Install on a Kubernetes cluster](https://github.com/netdata/helmchart#netdata-helm-chart-for-kubernetes-deployments)
+1. [Automatic one line installation](#one-line-installation), easy installation from source, **this is the default**
+2. [Install pre-built static binary on any 64bit Linux](#linux-64bit-pre-built-static-binary)
+3. [Run Netdata in a docker container](#run-netdata-in-a-docker-container)
+4. [Manual installation, step by step](#install-netdata-on-linux-manually)
+5. [Install on FreeBSD](#freebsd)
+6. [Install on pfSense](#pfsense)
+7. [Enable on FreeNAS Corral](#freenas)
+8. [Install on macOS (OS X)](#macos)
+9. [Install on a Kubernetes cluster](https://github.com/netdata/helmchart#netdata-helm-chart-for-kubernetes-deployments)
10. [Install using binary packages](#binary-packages)
See also the list of Netdata [package maintainers](../maintainers) for ASUSTOR NAS, OpenWRT, ReadyNAS, etc.
@@ -25,44 +25,48 @@ Note: From Netdata v1.12 and above, anonymous usage information is collected by
---
## One-line installation
+
![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0)
This method is **fully automatic on all Linux distributions**. FreeBSD and MacOS systems need some preparations before installing Netdata for the first time. Check the [FreeBSD](#freebsd) and the [MacOS](#macos) sections for more information.
To install Netdata from source, and keep it up to date with our **nightly releases** automatically, run the following:
-``` bash
+```bash
$ bash <(curl -Ss https://my-netdata.io/kickstart.sh)
```
!!! note
Do not use `sudo` for the one-line installer—it will escalate privileges itself if needed.
- To learn more about the pros and cons of using *nightly* vs. *stable* releases, see our [notice about the two options](#nightly-vs-stable-releases).
+```
+To learn more about the pros and cons of using *nightly* vs. *stable* releases, see our [notice about the two options](#nightly-vs-stable-releases).
+```
<details markdown="1"><summary>Click here for more information and advanced use of the one-line installation script.</summary>
Verify the integrity of the script with this:
-``` bash
-[ "8a2b054081a108dff915994ce77f2f2d" = "$(curl -Ss https://my-netdata.io/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID"
+```bash
+[ "b6d16c171ccad073b86327246151d875" = "$(curl -Ss https://my-netdata.io/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID"
```
-*It should print `OK, VALID` if the script is the one we ship.*
+
+_It should print `OK, VALID` if the script is the one we ship._
The `kickstart.sh` script:
-- detects the Linux distro and **installs the required system packages** for building Netdata (will ask for confirmation)
-- downloads the latest Netdata source tree to `/usr/src/netdata.git`.
-- installs Netdata by running `./netdata-installer.sh` from the source tree.
-- installs `netdata-updater.sh` to `cron.daily`, so your Netdata installation will be updated daily (you will get a message from cron only if the update fails).
-- For QA purposes, this installation method lets us know if it succeed or failed.
+- detects the Linux distro and **installs the required system packages** for building Netdata (will ask for confirmation)
+- downloads the latest Netdata source tree to `/usr/src/netdata.git`.
+- installs Netdata by running `./netdata-installer.sh` from the source tree.
+- installs `netdata-updater.sh` to `cron.daily`, so your Netdata installation will be updated daily (you will get a message from cron only if the update fails).
+- For QA purposes, this installation method lets us know if it succeed or failed.
The `kickstart.sh` script passes all its parameters to `netdata-installer.sh`, so you can add more parameters to customize your installation. Here are a few important parameters:
-- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages.
-- `--dont-start-it`: Prevent the installer from starting Netdata automatically.
-- `--stable-channel`: Automatically update only on the release of new major versions.
-- `--no-updates`: Prevent automatic updates of any kind.
+- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages.
+- `--dont-start-it`: Prevent the installer from starting Netdata automatically.
+- `--stable-channel`: Automatically update only on the release of new major versions.
+- `--no-updates`: Prevent automatic updates of any kind.
Example using all the above parameters:
@@ -77,6 +81,7 @@ Once Netdata is installed, see [Getting Started](../../docs/GettingStarted.md).
---
## Linux 64bit pre-built static binary
+
![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-86400&label=today&units=installations&precision=0)
You can install a pre-compiled static binary of Netdata on any Intel/AMD 64bit Linux system (even those that don't have a package manager, like CoreOS, CirrOS, busybox systems, etc). You can also use these packages on systems with broken or unsupported package managers.
@@ -90,38 +95,40 @@ $ bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh)
!!! note
Do not use `sudo` for this installer—it will escalate privileges itself if needed.
- To learn more about the pros and cons of using *nightly* vs. *stable* releases, see our [notice about the two options](README.md#nightly-vs-stable-releases).
+```
+To learn more about the pros and cons of using *nightly* vs. *stable* releases, see our [notice about the two options](README.md#nightly-vs-stable-releases).
- If your system does not have `bash` installed, open the `More information and advanced uses of the kickstart-static64.sh script` dropdown for instructions to run the installer without `bash`.
-
- This script installs Netdata at `/opt/netdata`.
+If your system does not have `bash` installed, open the `More information and advanced uses of the kickstart-static64.sh script` dropdown for instructions to run the installer without `bash`.
+
+This script installs Netdata at `/opt/netdata`.
+```
<details markdown="1"><summary>Click here for more information and advanced use of this command.</summary>
Verify the integrity of the script with this:
```bash
-[ "8779d8717ccaa8dac18d599502eef591" = "$(curl -Ss https://my-netdata.io/kickstart-static64.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID"
+[ "4415e8c13e529a795abb953a9be14ad5" = "$(curl -Ss https://my-netdata.io/kickstart-static64.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID"
```
*It should print `OK, VALID` if the script is the one we ship.*
The `kickstart-static64.sh` script passes all its parameters to `netdata-installer.sh`, so you can add more parameters to customize your installation. Here are a few important parameters:
-- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages.
-- `--dont-start-it`: Prevent the installer from starting Netdata automatically.
-- `--stable-channel`: Automatically update only on the release of new major versions.
-- `--no-updates`: Prevent automatic updates of any kind.
+- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages.
+- `--dont-start-it`: Prevent the installer from starting Netdata automatically.
+- `--stable-channel`: Automatically update only on the release of new major versions.
+- `--no-updates`: Prevent automatic updates of any kind.
Example using all the above parameters:
-```bash
+```sh
$ bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh) --dont-wait --dont-start-it --no-updates --stable-channel
```
If your shell fails to handle the above one liner, do this:
-```bash
+```sh
# download the script with curl
curl https://my-netdata.io/kickstart-static64.sh >/tmp/kickstart-static64.sh
@@ -132,10 +139,10 @@ wget -O /tmp/kickstart-static64.sh https://my-netdata.io/kickstart-static64.sh
sh /tmp/kickstart-static64.sh
```
-- The static binary files are kept in repo [binary-packages](https://github.com/netdata/binary-packages). You can download any of the `.run` files, and run it. These files are self-extracting shell scripts built with [makeself](https://github.com/megastep/makeself).
-- The target system does **not** need to have bash installed.
-- The same files can be used for updates too.
-- For QA purposes, this installation method lets us know if it succeed or failed.
+- The static binary files are kept in repo [binary-packages](https://github.com/netdata/binary-packages). You can download any of the `.run` files, and run it. These files are self-extracting shell scripts built with [makeself](https://github.com/megastep/makeself).
+- The target system does **not** need to have bash installed.
+- The same files can be used for updates too.
+- For QA purposes, this installation method lets us know if it succeed or failed.
</details>
@@ -153,13 +160,13 @@ You can [Install Netdata with Docker](../docker/#install-netdata-with-docker).
To install the latest git version of Netdata, please follow these 2 steps:
-1. [Prepare your system](#prepare-your-system)
+1. [Prepare your system](#prepare-your-system)
- Install the required packages on your system.
+ Install the required packages on your system.
-2. [Install Netdata](#install-netdata)
+2. [Install Netdata](#install-netdata)
- Download and install Netdata. You can also update it the same way.
+ Download and install Netdata. You can also update it the same way.
---
@@ -167,25 +174,25 @@ To install the latest git version of Netdata, please follow these 2 steps:
Try our experimental automatic requirements installer (no need to be root). This will try to find the packages that should be installed on your system to build and run Netdata. It supports most major Linux distributions released after 2010:
-* **Alpine** Linux and its derivatives
- * You have to install `bash` yourself, before using the installer.
+- **Alpine** Linux and its derivatives
+ - You have to install `bash` yourself, before using the installer.
-* **Arch** Linux and its derivatives
- * You need arch/aur for package Judy.
+- **Arch** Linux and its derivatives
+ - You need arch/aur for package Judy.
-* **Gentoo** Linux and its derivatives
+- **Gentoo** Linux and its derivatives
-* **Debian** Linux and its derivatives (including **Ubuntu**, **Mint**)
+- **Debian** Linux and its derivatives (including **Ubuntu**, **Mint**)
-* **Redhat Enterprise Linux** and its derivatives (including **Fedora**, **CentOS**, **Amazon Machine Image**)
- * Please note that for RHEL/CentOS you need
- [EPEL](http://www.tecmint.com/how-to-enable-epel-repository-for-rhel-centos-6-5/).
- In addition, RHEL/CentOS version 6 also need
- [OKay](https://okay.com.mx/blog-news/rpm-repositories-for-centos-6-and-7.html) for package libuv version 1.
+- **Redhat Enterprise Linux** and its derivatives (including **Fedora**, **CentOS**, **Amazon Machine Image**)
+ - Please note that for RHEL/CentOS you need
+ [EPEL](http://www.tecmint.com/how-to-enable-epel-repository-for-rhel-centos-6-5/).
+ In addition, RHEL/CentOS version 6 also need
+ [OKay](https://okay.com.mx/blog-news/rpm-repositories-for-centos-6-and-7.html) for package libuv version 1.
-* **SuSe** Linux and its derivatives (including **openSuSe**)
+- **SuSe** Linux and its derivatives (including **openSuSe**)
-* **SLE12** Must have your system registered with Suse Customer Center or have the DVD. See [#1162](https://github.com/netdata/netdata/issues/1162)
+- **SLE12** Must have your system registered with Suse Customer Center or have the DVD. See [#1162](https://github.com/netdata/netdata/issues/1162)
Install the packages for having a **basic Netdata installation** (system monitoring and many applications, without `mysql` / `mariadb`, `postgres`, `named`, hardware sensors and `SNMP`):
@@ -217,48 +224,47 @@ yum install autoconf automake curl gcc git libmnl-devel libuuid-devel openssl-de
# openSUSE
zypper install zlib-devel libuuid-devel libuv-devel liblz4-devel judy-devel libopenssl-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils python
-
```
Once Netdata is compiled, to run it the following packages are required (already installed using the above commands):
-package|description
-:-----:|-----------
-`libuuid`|part of `util-linux` for GUIDs management
-`zlib`|gzip compression for the internal Netdata web server
+| package | description|
+|:-----:|-----------|
+| `libuuid` | part of `util-linux` for GUIDs management|
+| `zlib` | gzip compression for the internal Netdata web server|
*Netdata will fail to start without the above.*
Netdata plugins and various aspects of Netdata can be enabled or benefit when these are installed (they are optional):
-package|description
-:-----:|-----------
-`bash`|for shell plugins and **alarm notifications**
-`curl`|for shell plugins and **alarm notifications**
-`iproute` or `iproute2`|for monitoring **Linux traffic QoS**<br/>use `iproute2` if `iproute` reports as not available or obsolete
-`python`|for most of the external plugins
-`python-yaml`|used for monitoring **beanstalkd**
-`python-beanstalkc`|used for monitoring **beanstalkd**
-`python-dnspython`|used for monitoring DNS query time
-`python-ipaddress`|used for monitoring **DHCPd**<br/>this package is required only if the system has python v2. python v3 has this functionality embedded
-`python-mysqldb`<br/>or<br/>`python-pymysql`|used for monitoring **mysql** or **mariadb** databases<br/>`python-mysqldb` is a lot faster and thus preferred
-`python-psycopg2`|used for monitoring **postgresql** databases
-`python-pymongo`|used for monitoring **mongodb** databases
-`nodejs`|used for `node.js` plugins for monitoring **named** and **SNMP** devices
-`lm-sensors`|for monitoring **hardware sensors**
-`libmnl`|for collecting netfilter metrics
-`netcat`|for shell plugins to collect metrics from remote systems
+| package |description|
+|:-----:|-----------|
+| `bash`|for shell plugins and **alarm notifications**|
+| `curl`|for shell plugins and **alarm notifications**|
+| `iproute` or `iproute2`|for monitoring **Linux traffic QoS**<br/>use `iproute2` if `iproute` reports as not available or obsolete|
+| `python`|for most of the external plugins|
+| `python-yaml`|used for monitoring **beanstalkd**|
+| `python-beanstalkc`|used for monitoring **beanstalkd**|
+| `python-dnspython`|used for monitoring DNS query time|
+| `python-ipaddress`|used for monitoring **DHCPd**<br/>this package is required only if the system has python v2. python v3 has this functionality embedded|
+| `python-mysqldb`<br/>or<br/>`python-pymysql`|used for monitoring **mysql** or **mariadb** databases<br/>`python-mysqldb` is a lot faster and thus preferred|
+| `python-psycopg2`|used for monitoring **postgresql** databases|
+| `python-pymongo`|used for monitoring **mongodb** databases|
+| `nodejs`|used for `node.js` plugins for monitoring **named** and **SNMP** devices|
+| `lm-sensors`|for monitoring **hardware sensors**|
+| `libmnl`|for collecting netfilter metrics|
+| `netcat`|for shell plugins to collect metrics from remote systems|
*Netdata will greatly benefit if you have the above packages installed, but it will still work without them.*
Netdata DB engine can be enabled when these are installed (they are optional):
-|package|description|
+| package | description|
|:-----:|-----------|
-|`libuv`|multi-platform support library with a focus on asynchronous I/O|
-|`liblz4`|Extremely Fast Compression algorithm|
-|`Judy`|General purpose dynamic array|
-|`openssl`|Cryptography and SSL/TLS Toolkit|
+| `libuv` | Multi-platform support library with a focus on asynchronous I/O, version 1 or greater|
+| `liblz4` | Extremely fast compression algorithm, version r129 or greater|
+| `Judy` | General purpose dynamic array|
+| `openssl`| Cryptography and SSL/TLS toolkit|
*Netdata will greatly benefit if you have the above packages installed, but it will still work without them.*
@@ -269,33 +275,32 @@ Netdata DB engine can be enabled when these are installed (they are optional):
Do this to install and run Netdata:
```sh
-
# download it - the directory 'netdata' will be created
git clone https://github.com/netdata/netdata.git --depth=100
cd netdata
# run script with root privileges to build, install, start Netdata
./netdata-installer.sh
-
```
-* If you don't want to run it straight-away, add `--dont-start-it` option.
+- If you don't want to run it straight-away, add `--dont-start-it` option.
-* You can also append `--stable-channel` to fetch and install only the official releases from GitHub, instead of the nightly builds.
+- You can also append `--stable-channel` to fetch and install only the official releases from GitHub, instead of the nightly builds.
-* If you don't want to install it on the default directories, you can run the installer like this: `./netdata-installer.sh --install /opt`. This one will install Netdata in `/opt/netdata`.
+- If you don't want to install it on the default directories, you can run the installer like this: `./netdata-installer.sh --install /opt`. This one will install Netdata in `/opt/netdata`.
-* If your server does not have access to the internet and you have manually put the installation directory on your server, you will need to pass the option `--disable-go` to the installer. The option will prevent the installer from attempting to download and install `go.d.plugin`.
+- If your server does not have access to the internet and you have manually put the installation directory on your server, you will need to pass the option `--disable-go` to the installer. The option will prevent the installer from attempting to download and install `go.d.plugin`.
Once the installer completes, the file `/etc/netdata/netdata.conf` will be created (if you changed the installation directory, the configuration will appear in that directory too).
-You can edit this file to set options. One common option to tweak is `history`, which controls the size of the memory database Netdata will use. By default is `3600` seconds (an hour of data at the charts) which makes Netdata use about 10-15MB of RAM (depending on the number of charts detected on your system). Check **[[Memory Requirements]]**.
+You can edit this file to set options. One common option to tweak is `history`, which controls the size of the memory database Netdata will use. By default is `3600` seconds (an hour of data at the charts) which makes Netdata use about 10-15MB of RAM (depending on the number of charts detected on your system). Check **\[[Memory Requirements]]**.
To apply the changes you made, you have to restart Netdata.
---
### Binary Packages
+
![](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/images/packaging-beta-tag.svg?sanitize=true)
We provide our own flavour of binary packages for the most common operating systems that comply with .RPM and .DEB packaging formats.
@@ -308,8 +313,8 @@ Netdata is committed to support installation of our solution to all operating sy
We provide two separate repositories, one for our stable releases and one for our nightly releases.
-1. Stable releases: Our stable production releases are hosted in [netdata/netdata](https://packagecloud.io/netdata/netdata) repository of package cloud
-2. Nightly releases: Our latest releases are hosted in [netdata/netdata-edge](https://packagecloud.io/netdata/netdata-edge) repository of package cloud
+1. Stable releases: Our stable production releases are hosted in [netdata/netdata](https://packagecloud.io/netdata/netdata) repository of package cloud
+2. Nightly releases: Our latest releases are hosted in [netdata/netdata-edge](https://packagecloud.io/netdata/netdata-edge) repository of package cloud
Visit the repository pages and follow the quick set-up instructions to get started.
@@ -317,8 +322,6 @@ Visit the repository pages and follow the quick set-up instructions to get start
## Other Systems
-
-
##### FreeBSD
You can install Netdata from ports or packages collection.
@@ -338,41 +341,47 @@ cd netdata
```
##### pfSense
+
To install Netdata on pfSense run the following commands (within a shell or under Diagnostics/Command Prompt within the pfSense web interface).
Change platform (i386/amd64, etc) and FreeBSD versions (10/11, etc) according to your environment and change Netdata version (1.10.0 in example) according to latest version present within the FreeSBD repository:-
Note first three packages are downloaded from the pfSense repository for maintaining compatibility with pfSense, Netdata is downloaded from the FreeBSD repository.
-```
+
+```sh
pkg install pkgconf
pkg install bash
pkg install e2fsprogs-libuuid
pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/python36-3.6.8_2.txz
pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/netdata-1.13.0.txz
```
+
To start Netdata manually run `service netdata onestart`
To start Netdata automatically at each boot add `service netdata onestart` as a Shellcmd within the pfSense web interface (under **Services/Shellcmd**, which you need to install beforehand under **System/Package Manager/Available Packages**).
Shellcmd Type should be set to `Shellcmd`.
![](https://i.imgur.com/wcKiPe1.png)
-Alternatively more information can be found in https://doc.pfsense.org/index.php/Installing_FreeBSD_Packages, for achieving the same via the command line and scripts.
+Alternatively more information can be found in <https://doc.pfsense.org/index.php/Installing_FreeBSD_Packages>, for achieving the same via the command line and scripts.
-If you experience an issue with `/usr/bin/install` absense on pfSense 2.3 or earlier, update pfSense or use workaround from [https://redmine.pfsense.org/issues/6643](https://redmine.pfsense.org/issues/6643)
+If you experience an issue with `/usr/bin/install` absense on pfSense 2.3 or earlier, update pfSense or use workaround from <https://redmine.pfsense.org/issues/6643>
**Note:** In pfSense, the Netdata configuration files are located under `/usr/local/etc/netdata`
##### FreeNAS
+
On FreeNAS-Corral-RELEASE (>=10.0.3), Netdata is pre-installed.
To use Netdata, the service will need to be enabled and started from the FreeNAS **[CLI](https://github.com/freenas/cli)**.
To enable the Netdata service:
-```
+
+```sh
service netdata config set enable=true
```
-To start the netdata service:
-```
+To start the Netdata service:
+
+```sh
service netdata start
```
@@ -392,7 +401,9 @@ or from source:
# install Xcode Command Line Tools
xcode-select --install
```
+
click `Install` in the software update popup window, then
+
```sh
# install HomeBrew package manager
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
@@ -414,7 +425,7 @@ The installer will also install a startup plist to start Netdata when your Mac b
Execute these commands to install Netdata in Alpine Linux 3.x:
-```
+```sh
# install required packages
apk add alpine-sdk bash curl zlib-dev util-linux-dev libmnl-dev gcc make git autoconf automake pkgconfig python logrotate
@@ -448,13 +459,13 @@ The documentation previously recommended installing the Debian Chroot package fr
The good news is that the 64-bit static installer works fine if your NAS is one that uses the amd64 architecture. It will install the content into `/opt/netdata`, making future removal safe and simple.
-When Netdata is first installed, it will run as _root_. This may or may not be acceptable for you, and since other installations run it as the _netdata_ user, you might wish to do the same. This requires some extra work:
+When Netdata is first installed, it will run as *root*. This may or may not be acceptable for you, and since other installations run it as the *netdata* user, you might wish to do the same. This requires some extra work:
-1. Creat a group `netdata` via the Synology group interface. Give it no access to anything.
-2. Create a user `netdata` via the Synology user interface. Give it no access to anything and a random password. Assign the user to the `netdata` group. Netdata will chuid to this user when running.
-3. Change ownership of the following directories, as defined in [Netdata Security](../../docs/netdata-security.md#security-design):
+1. Creat a group `netdata` via the Synology group interface. Give it no access to anything.
+2. Create a user `netdata` via the Synology user interface. Give it no access to anything and a random password. Assign the user to the `netdata` group. Netdata will chuid to this user when running.
+3. Change ownership of the following directories, as defined in [Netdata Security](../../docs/netdata-security.md#security-design):
-```
+```sh
$ chown -R root:netdata /opt/netdata/usr/share/netdata
$ chown -R netdata:netdata /opt/netdata/var/lib/netdata /opt/netdata/var/cache/netdata
$ chown -R netdata:root /opt/netdata/var/log/netdata
@@ -462,15 +473,14 @@ $ chown -R netdata:root /opt/netdata/var/log/netdata
Additionally, as of 2018/06/24, the Netdata installer doesn't recognize DSM as an operating system, so no init script is installed. You'll have to do this manually:
-1. Add [this file](https://gist.github.com/oskapt/055d474d7bfef32c49469c1b53e8225f) as `/etc/rc.netdata`. Make it executable with `chmod 0755 /etc/rc.netdata`.
-2. Edit `/etc/rc.local` and add a line calling `/etc/rc.netdata` to have it start on boot:
+1. Add [this file](https://gist.github.com/oskapt/055d474d7bfef32c49469c1b53e8225f) as `/etc/rc.netdata`. Make it executable with `chmod 0755 /etc/rc.netdata`.
+2. Edit `/etc/rc.local` and add a line calling `/etc/rc.netdata` to have it start on boot:
```
# Netdata startup
[ -x /etc/rc.netdata ] && /etc/rc.netdata start
```
-
## Nightly vs. stable releases
The Netdata team maintains two releases of the Netdata agent: **nightly** and **stable**. By default, Netdata's installation scripts will give you **automatic, nightly** updates, as that is our recommended configuration.
@@ -481,16 +491,15 @@ The Netdata team maintains two releases of the Netdata agent: **nightly** and **
**Pros of using nightly releases:**
- - Get the latest features and bugfixes as soon as they're available
- - Receive security-related fixes immediately
- - Use stable, fully-tested code that's always improving
- - Leverage the same Netdata experience our community is using
+- Get the latest features and bugfixes as soon as they're available
+- Receive security-related fixes immediately
+- Use stable, fully-tested code that's always improving
+- Leverage the same Netdata experience our community is using
**Pros of using stable releases:**
- - Protect yourself from the rare instance when major bugs slip through our testing and negatively affect a Netdata installation
- - Retain more control over the Netdata version you use
-
+- Protect yourself from the rare instance when major bugs slip through our testing and negatively affect a Netdata installation
+- Retain more control over the Netdata version you use
## Automatic updates
@@ -504,4 +513,4 @@ bash <(curl -Ss https://my-netdata.io/kickstart.sh) --no-updates
With automatic updates disabled, you can choose exactly when and how you [update Netdata](UPDATE.md).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/UNINSTALL.md b/packaging/installer/UNINSTALL.md
index 43d03b00..b8e5bd9e 100644
--- a/packaging/installer/UNINSTALL.md
+++ b/packaging/installer/UNINSTALL.md
@@ -1,6 +1,7 @@
-# Uninstalling netdata
+# Uninstalling Netdata
+
+Our self-contained uninstaller is able to remove Netdata installations created with shell installer. It doesn't need any other Netdata repository files to be run. All it needs is an .environment file, which is created during installation (with shell installer) and put in ${NETDATA_USER_CONFIG_DIR}/.environment (by default /etc/netdata/.environment). That file contains some parameters which are passed to our installer and which are needed during uninstallation process. Mainly two parameters are needed:
-Our self-contained uninstaller is able to remove netdata installations created with shell installer. It doesn't need any other netdata repository files to be run. All it needs is an .environment file, which is created during installation (with shell installer) and put in ${NETDATA_USER_CONFIG_DIR}/.environment (by default /etc/netdata/.environment). That file contains some parameters which are passed to our installer and which are needed during uninstallation process. Mainly two parameters are needed:
```
NETDATA_PREFIX
NETDATA_ADDED_TO_GROUPS
@@ -8,20 +9,24 @@ NETDATA_ADDED_TO_GROUPS
A workflow for uninstallation looks like this:
-1. Find your `.environment` file, which is usually `/etc/netdata/.environment` in a default installation.
-2. If you cannot find that file and would like to uninstall netdata, then create new file with following content:
+1. Find your `.environment` file, which is usually `/etc/netdata/.environment` in a default installation.
+2. If you cannot find that file and would like to uninstall Netdata, then create new file with following content:
+
```
NETDATA_PREFIX="<installation prefix>" # put what you used as a parameter to shell installed `--install` flag. Otherwise it should be empty
-NETDATA_ADDED_TO_GROUPS="<additional groups>" # Additional groups for a user running netdata process
+NETDATA_ADDED_TO_GROUPS="<additional groups>" # Additional groups for a user running the Netdata process
```
-3. Run `netdata-uninstaller.sh` as follows
+
+3. Run `netdata-uninstaller.sh` as follows
+
```
${NETDATA_PREFIX}/usr/libexec/netdata/netdata-uninstaller.sh --yes --env <environment_file>
```
Note: Existing installations may still need to download the file if it's not present.
To execute uninstall in that case, run the following commands:
-```
+
+```sh
wget https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/netdata-uninstaller.sh
chmod +x ./netdata-uninstaller.sh
./netdata-uninstaller.sh --yes --env <environment_file>
@@ -29,6 +34,6 @@ chmod +x ./netdata-uninstaller.sh
The default `environment_file` is `/etc/netdata/.environment`.
-Note: This uninstallation method assumes previous installation with netdata-installer.sh or kickstart script. Currently using it when netdata was installed by a package manager can work or cause unexpected results.
+Note: This uninstallation method assumes previous installation with `netdata-installer.sh` or the kickstart script. Currently using it when Netdata was installed by a package manager can work or cause unexpected results.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FUNINSTALL&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FUNINSTALL&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/UPDATE.md b/packaging/installer/UPDATE.md
index 0903ddb5..083d8da3 100644
--- a/packaging/installer/UPDATE.md
+++ b/packaging/installer/UPDATE.md
@@ -1,9 +1,8 @@
-# Updating netdata after its installation
+# Updating Netdata after its installation
![image8](https://cloud.githubusercontent.com/assets/2662304/14253735/536f4580-fa95-11e5-9f7b-99112b31a5d7.gif)
-
-We suggest to keep your netdata updated. We are actively developing it and you should always update to the latest version.
+We suggest to keep your Netdata updated. We are actively developing it and you should always update to the latest version.
The update procedure depends on how you installed it:
@@ -11,7 +10,7 @@ The update procedure depends on how you installed it:
### Manual update to get the latest git commit
-netdata versions older than `v1.12.0-rc2-52` had a `netdata-updater.sh` script in the root directory of the source code, which has now been deprecated. The manual process that works for all versions to get the latest commit in git is to use the `netdata-installer.sh`. The installer preserves your custom configuration and updates the information of the installation in the `.environment` file under the user configuration directory.
+Netdata versions older than `v1.12.0-rc2-52` had a `netdata-updater.sh` script in the root directory of the source code, which has now been deprecated. The manual process that works for all versions to get the latest commit in git is to use the `netdata-installer.sh`. The installer preserves your custom configuration and updates the information of the installation in the `.environment` file under the user configuration directory.
```sh
# go to the git downloaded directory
@@ -20,18 +19,19 @@ cd /path/to/git/downloaded/netdata
# update your local copy
git pull
-# run the netdata installer
+# run the Netdata installer
sudo ./netdata-installer.sh
```
_Netdata will be restarted with the new version._
-Keep in mind, netdata may now have new features, or certain old features may now behave differently. So pay some attention to it after updating.
+Keep in mind, Netdata may now have new features, or certain old features may now behave differently. So pay some attention to it after updating.
### Manual update to get the latest nightly build
The `kickstart.sh` one-liner will do a one-time update to the latest nightly build, if executed as follows:
-```
+
+```sh
bash <(curl -Ss https://my-netdata.io/kickstart.sh) --no-updates
```
@@ -40,16 +40,14 @@ bash <(curl -Ss https://my-netdata.io/kickstart.sh) --no-updates
_Please, consider the risks of running an auto-update. Something can always go wrong. Keep an eye on your installation, and run a manual update if something ever fails._
Calling the `netdata-installer.sh` with the `--auto-update` or `-u` option will create the `netdata-updater` script under
-either `/etc/cron.daily/`, or `/etc/periodic/daily/`. Whenever the `netdata-updater` is executed, it checks if a newer nightly build is available and then handles the download, installation and netdata restart.
-
-Note that after Jan 2019, the `kickstart.sh` one-liner `bash <(curl -Ss https://my-netdata.io/kickstart.sh)` calls the `netdata-installer.sh` with the auto-update option. So if you just run the one-liner without options once, your netdata will be kept auto-updated.
+either `/etc/cron.daily/`, or `/etc/periodic/daily/`. Whenever the `netdata-updater` is executed, it checks if a newer nightly build is available and then handles the download, installation and Netdata restart.
+Note that after Jan 2019, the `kickstart.sh` one-liner `bash <(curl -Ss https://my-netdata.io/kickstart.sh)` calls the `netdata-installer.sh` with the auto-update option. So if you just run the one-liner without options once, your Netdata will be kept auto-updated.
## You downloaded a binary package
If you installed it from a binary package, the best way is to **obtain a newer copy** from the source you got it in the first place. This includes the static binary installation via `kickstart-base64.sh`, which would need to be executed again.
-If a newer version of netdata is not available from the source you got it, we suggest to uninstall the version you have and follow the [installation](README.md) instructions for installing a fresh version of netdata.
-
+If a newer version of Netdata is not available from the source you got it, we suggest to uninstall the version you have and follow the [installation](README.md) instructions for installing a fresh version of Netdata.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FUPDATE&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FUPDATE&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/packaging/installer/functions.sh b/packaging/installer/functions.sh
index 6f999690..2e304434 100644
--- a/packaging/installer/functions.sh
+++ b/packaging/installer/functions.sh
@@ -380,10 +380,17 @@ install_netdata_service() {
fi
if [ "${SYSTEMD_DIRECTORY}x" != "x" ]; then
+ ENABLE_NETDATA_IF_PREVIOUSLY_ENABLED="run systemctl enable netdata"
+ IS_NETDATA_ENABLED="$(systemctl is-enabled netdata 2> /dev/null || echo "Netdata not there")"
+ if [ "${IS_NETDATA_ENABLED}" == "disabled" ]; then
+ echo >&2 "Netdata was there and disabled, make sure we don't re-enable it ourselves"
+ ENABLE_NETDATA_IF_PREVIOUSLY_ENABLED="true"
+ fi
+
echo >&2 "Installing systemd service..."
run cp system/netdata.service "${SYSTEMD_DIRECTORY}/netdata.service" &&
run systemctl daemon-reload &&
- run systemctl enable netdata &&
+ ${ENABLE_NETDATA_IF_PREVIOUSLY_ENABLED} &&
return 0
else
echo >&2 "no systemd directory; cannot install netdata.service"
diff --git a/packaging/installer/kickstart-static64.sh b/packaging/installer/kickstart-static64.sh
index a9f11238..34ca116d 100755
--- a/packaging/installer/kickstart-static64.sh
+++ b/packaging/installer/kickstart-static64.sh
@@ -189,7 +189,7 @@ done
# ---------------------------------------------------------------------------------------------------------------------
TMPDIR=$(create_tmp_directory)
-cd "${TMPDIR}" || :
+cd "${TMPDIR}"
set_tarball_urls "${RELEASE_CHANNEL}"
progress "Downloading static netdata binary: ${NETDATA_TARBALL_URL}"
@@ -202,12 +202,14 @@ fi
# ---------------------------------------------------------------------------------------------------------------------
progress "Installing netdata"
-
run ${sudo} sh "${TMPDIR}/netdata-latest.gz.run" ${opts} ${inner_opts}
#shellcheck disable=SC2181
if [ $? -eq 0 ]; then
- rm "${TMPDIR}/netdata-latest.gz.run"
+ run ${sudo} rm "${TMPDIR}/netdata-latest.gz.run"
+ if [ ! "${TMPDIR}" = "/" ] && [ -d "${TMPDIR}" ]; then
+ run ${sudo} rm -rf "${TMPDIR}"
+ fi
else
echo >&2 "NOTE: did not remove: ${TMPDIR}/netdata-latest.gz.run"
fi
diff --git a/packaging/installer/kickstart.sh b/packaging/installer/kickstart.sh
index d396f139..bab95fda 100755
--- a/packaging/installer/kickstart.sh
+++ b/packaging/installer/kickstart.sh
@@ -304,7 +304,7 @@ if [ "${INTERACTIVE}" = "0" ]; then
fi
TMPDIR=$(create_tmp_directory)
-cd ${TMPDIR} || :
+cd "${TMPDIR}"
dependencies
@@ -328,7 +328,9 @@ cd netdata-* || fatal "Cannot cd to netdata source tree"
if [ -x netdata-installer.sh ]; then
progress "Installing netdata..."
run ${sudo} ./netdata-installer.sh ${NETDATA_UPDATES} ${NETDATA_INSTALLER_OPTIONS} "${@}" || fatal "netdata-installer.sh exited with error"
- rm -rf "${TMPDIR}" >/dev/null 2>&1
+ if [ -d "${TMPDIR}" ] && [ ! "${TMPDIR}" = "/" ]; then
+ run ${sudo} rm -rf "${TMPDIR}" >/dev/null 2>&1
+ fi
else
fatal "Cannot install netdata from source (the source directory does not include netdata-installer.sh). Leaving all files in ${TMPDIR}"
fi
diff --git a/packaging/maintainers/README.md b/packaging/maintainers/README.md
deleted file mode 100644
index 9fb36e77..00000000
--- a/packaging/maintainers/README.md
+++ /dev/null
@@ -1,75 +0,0 @@
-# Package Maintainers
-
-This page tracks the package maintainers for netdata, for various operating systems and versions.
-
-> Feel free to update it, so that it reflects the current status.
-
-
----
-
-## Official Linux Distributions
-
-| Linux Distribution | Netdata Version | Maintainer | Related URL |
-| :-: | :-: | :-: | :-- |
-| Arch Linux | Release | @svenstaro | [netdata @ Arch Linux](https://www.archlinux.org/packages/community/x86_64/netdata/) |
-| Arch Linux AUR | Git | @sanskritfritz | [netdata @ AUR](https://aur.archlinux.org/packages/netdata-git/) |
-| Gentoo Linux | Release + Git | @candrews | [netdata @ gentoo](https://github.com/gentoo/gentoo/tree/master/net-analyzer/netdata) |
-| Debian | Release | @lhw @FedericoCeratto | [netdata @ debian](http://salsa.debian.org/debian/netdata) |
-| Slackware | Release | @willysr | [netdata @ slackbuilds](https://slackbuilds.org/repository/14.2/system/netdata/) |
-| Ubuntu | | | |
-| Red Hat / Fedora / Centos | | | |
-| SUSE SLE / openSUSE Tumbleweed & Leap | | | [netdata @ SUSE OpenBuildService](https://software.opensuse.org/package/netdata) |
-
----
-## FreeBSD
-
-| System | Initial PR | Core Developer | Package Maintainer
-|:-:|:-:|:-:|:-:|
-FreeBSD|#1321|@vlvkobal|@mmokhi
-
----
-## MacOS
-
-| System | URL | Core Developer | Package Maintainer
-|:-:|:-:|:-:|:-:|
-MacOS Homebrew Formula|[link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb)|@vlvkobal|@rickard-von-essen
-
----
-## Unofficial Linux Packages
-
-| Linux Distribution | Netdata Version | Maintainer | Related URL |
-| :-: | :-: | :-: | :-- |
-| Ubuntu | Release | @gslin | [netdata @ gslin ppa](https://launchpad.net/~gslin/+archive/ubuntu/netdata) https://github.com/netdata/netdata/issues/69#issuecomment-217458543 |
-
----
-## Embedded Linux
-
-| Embedded Linux | Netdata Version | Maintainer | Related URL |
-| :-: | :-: | :-: | :-- |
-| ASUSTOR NAS | ? | William Lin | https://www.asustor.com/apps/app_detail?id=532 |
-| OpenWRT | Release | @nitroshift | [openwrt package](https://github.com/openwrt/packages/tree/master/admin/netdata) |
-| ReadyNAS | Release | @NAStools | https://github.com/nastools/netdata |
-| QNAP | Release | QNAP_Stephane | https://forum.qnap.com/viewtopic.php?t=121518 |
-| DietPi | Release | @Fourdee | https://github.com/Fourdee/DietPi |
-
----
-## Linux Containers
-
-| Containers | Netdata Version | Maintainer | Related URL |
-| :-: | :-: | :-: | :-- |
-| Docker | Git | @titpetric | https://github.com/titpetric/netdata |
-
----
-## Automation Systems
-
-| Automation Systems | Netdata Version | Maintainer | Related URL |
-| :-: | :-: | :-: | :-- |
-| Ansible | git | @jffz | https://galaxy.ansible.com/jffz/netdata/ |
-| Chef | ? | @sergiopena | https://github.com/sergiopena/netdata-cookbook |
-
----
-## Packages summary from repology.org
-
-[![Packaging status](https://repology.org/badge/vertical-allrepos/netdata.svg)](https://repology.org/metapackage/netdata/versions)
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Fmaintainers%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/packaging/makeself/README.md b/packaging/makeself/README.md
deleted file mode 100644
index eb4c380b..00000000
--- a/packaging/makeself/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-# netdata static binary build
-
-To build the static binary 64-bit distribution package, run:
-
-```bash
-$ cd /path/to/netdata.git
-$ ./packaging/makeself/build-x86_64-static.sh
-```
-
-The program will:
-
-1. setup a new docker container with Alpine Linux
-2. install the required alpine packages (the build environment, needed libraries, etc)
-3. download and compile third party apps that are packaged with netdata (`bash`, `curl`, etc)
-4. compile netdata
-
-Once finished, a file named `netdata-vX.X.X-gGITHASH-x86_64-DATE-TIME.run` will be created in the current directory. This is the netdata binary package that can be run to install netdata on any other computer.
-
----
-
-## building binaries with debug info
-
-To build netdata binaries with debugging / tracing information in them, use:
-
-```bash
-$ cd /path/to/netdata.git
-$ ./packaging/makeself/build-x86_64-static.sh debug
-```
-
-These binaries are not optimized (they are a bit slower), they have certain features disables (like log flood protection), other features enables (like `debug flags`) and are not stripped (the binary files are bigger, since they now include source code tracing information).
-
-#### debugging netdata binaries
-
-Once you have installed a binary package with debugging info, you will need to install `valgrind` and run this command to start netdata:
-
-```bash
-PATH="/opt/netdata/bin:${PATH}" valgrind --undef-value-errors=no /opt/netdata/bin/srv/netdata -D
-```
-
-The above command, will run netdata under `valgrind`. While netdata runs under `valgrind` it will be 10x slower and use a lot more memory.
-
-If netdata crashes, `valgrind` will print a stack trace of the issue. Open a github issue to let us know.
-
-To stop netdata while it runs under `valgrind`, press Control-C on the console.
-
-> If you omit the parameter `--undef-value-errors=no` to valgrind, you will get hundreds of errors about conditional jumps that depend on uninitialized values. This is normal. Valgrind has heuristics to prevent it from printing such errors for system libraries, but for the static netdata binary, all the required libraries are built into netdata. So, valgrind cannot appply its heuristics and prints them.
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fmakeself%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/packaging/makeself/build-x86_64-static.sh b/packaging/makeself/build-x86_64-static.sh
deleted file mode 100755
index 69ddf2bf..00000000
--- a/packaging/makeself/build-x86_64-static.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-. $(dirname "$0")/../installer/functions.sh || exit 1
-
-set -e
-
-DOCKER_CONTAINER_NAME="netdata-package-x86_64-static-alpine37"
-
-if ! sudo docker inspect "${DOCKER_CONTAINER_NAME}" >/dev/null 2>&1
-then
- # To run interactively:
- # sudo docker run -it netdata-package-x86_64-static /bin/sh
- # (add -v host-dir:guest-dir:rw arguments to mount volumes)
- #
- # To remove images in order to re-create:
- # sudo docker rm -v $(sudo docker ps -a -q -f status=exited)
- # sudo docker rmi netdata-package-x86_64-static
- #
- # This command maps the current directory to
- # /usr/src/netdata.git
- # inside the container and runs the script install-alpine-packages.sh
- # (also inside the container)
- #
- run sudo docker run -v $(pwd):/usr/src/netdata.git:rw alpine:3.7 \
- /bin/sh /usr/src/netdata.git/packaging/makeself/install-alpine-packages.sh
-
- # save the changes made permanently
- id=$(sudo docker ps -l -q)
- run sudo docker commit ${id} "${DOCKER_CONTAINER_NAME}"
-fi
-
-# Run the build script inside the container
-run sudo docker run -a stdin -a stdout -a stderr -i -t -v \
- $(pwd):/usr/src/netdata.git:rw \
- "${DOCKER_CONTAINER_NAME}" \
- /bin/sh /usr/src/netdata.git/packaging/makeself/build.sh "${@}"
-
-if [ "${USER}" ]
- then
- sudo chown -R "${USER}" .
-fi
diff --git a/packaging/makeself/build.sh b/packaging/makeself/build.sh
deleted file mode 100755
index e5804c52..00000000
--- a/packaging/makeself/build.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -----------------------------------------------------------------------------
-# parse command line arguments
-
-export NETDATA_BUILD_WITH_DEBUG=0
-
-while [ ! -z "${1}" ]
-do
- case "${1}" in
- debug)
- export NETDATA_BUILD_WITH_DEBUG=1
- ;;
-
- *)
- ;;
- esac
-
- shift
-done
-
-
-# -----------------------------------------------------------------------------
-
-# First run install-alpine-packages.sh under alpine linux to install
-# the required packages. build-x86_64-static.sh will do this for you
-# using docker.
-
-cd $(dirname "$0") || exit 1
-
-# if we don't run inside the netdata repo
-# download it and run from it
-if [ ! -f ../../netdata-installer.sh ]
-then
- git clone https://github.com/netdata/netdata.git netdata.git || exit 1
- cd netdata.git/makeself || exit 1
- ./build.sh "$@"
- exit $?
-fi
-
-cat >&2 <<EOF
-
-This program will create a self-extracting shell package containing
-a statically linked netdata, able to run on any 64bit Linux system,
-without any dependencies from the target system.
-
-It can be used to have netdata running in no-time, or in cases the
-target Linux system cannot compile netdata.
-
-EOF
-
-# read -p "Press ENTER to continue > "
-
-if [ ! -d tmp ]
- then
- mkdir tmp || exit 1
-fi
-
-./run-all-jobs.sh "$@"
-exit $?
diff --git a/packaging/makeself/functions.sh b/packaging/makeself/functions.sh
deleted file mode 100755
index a0b72223..00000000
--- a/packaging/makeself/functions.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# -----------------------------------------------------------------------------
-
-# allow running the jobs by hand
-[ -z "${NETDATA_BUILD_WITH_DEBUG}" ] && export NETDATA_BUILD_WITH_DEBUG=0
-[ -z "${NETDATA_INSTALL_PATH}" ] && export NETDATA_INSTALL_PATH="${1-/opt/netdata}"
-[ -z "${NETDATA_MAKESELF_PATH}" ] && export NETDATA_MAKESELF_PATH="$(dirname "${0}")/../.."
-[ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ] && export NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}"
-[ -z "${NETDATA_SOURCE_PATH}" ] && export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/../.."
-export NULL=
-
-# make sure the path does not end with /
-if [ "${NETDATA_INSTALL_PATH:$(( ${#NETDATA_INSTALL_PATH} - 1)):1}" = "/" ]
- then
- export NETDATA_INSTALL_PATH="${NETDATA_INSTALL_PATH:0:$(( ${#NETDATA_INSTALL_PATH} - 1))}"
-fi
-
-# find the parent directory
-export NETDATA_INSTALL_PARENT="$(dirname "${NETDATA_INSTALL_PATH}")"
-
-# -----------------------------------------------------------------------------
-
-# bash strict mode
-set -euo pipefail
-
-# -----------------------------------------------------------------------------
-
-fetch() {
- local dir="${1}" url="${2}"
- local tar="${dir}.tar.gz"
-
- if [ ! -f "${NETDATA_MAKESELF_PATH}/tmp/${tar}" ]
- then
- run wget -O "${NETDATA_MAKESELF_PATH}/tmp/${tar}" "${url}"
- fi
-
- if [ ! -d "${NETDATA_MAKESELF_PATH}/tmp/${dir}" ]
- then
- cd "${NETDATA_MAKESELF_PATH}/tmp"
- run tar -zxpf "${tar}"
- cd -
- fi
-
- run cd "${NETDATA_MAKESELF_PATH}/tmp/${dir}"
-}
-
-# -----------------------------------------------------------------------------
-
-# load the functions of the netdata-installer.sh
-. "${NETDATA_SOURCE_PATH}/packaging/installer/functions.sh"
-
-# -----------------------------------------------------------------------------
-
-# debug
-echo "ME=${0}"
-echo "NETDATA_INSTALL_PARENT=${NETDATA_INSTALL_PARENT}"
-echo "NETDATA_INSTALL_PATH=${NETDATA_INSTALL_PATH}"
-echo "NETDATA_MAKESELF_PATH=${NETDATA_MAKESELF_PATH}"
-echo "NETDATA_SOURCE_PATH=${NETDATA_SOURCE_PATH}"
-echo "PROCESSORS=$(find_processors)"
diff --git a/packaging/makeself/install-alpine-packages.sh b/packaging/makeself/install-alpine-packages.sh
deleted file mode 100755
index bcb971f8..00000000
--- a/packaging/makeself/install-alpine-packages.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env sh
-#
-# Installation script for the alpine host
-# to prepare the static binary
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author: Paul Emm. Katsoulakis <paul@netdata.cloud>
-
-# Packaging update
-apk update
-
-# Add required APK packages
-apk add --no-cache \
- bash \
- wget \
- curl \
- ncurses \
- git \
- netcat-openbsd \
- alpine-sdk \
- autoconf \
- automake \
- gcc \
- make \
- libtool \
- pkgconfig \
- util-linux-dev \
- openssl-dev \
- gnutls-dev \
- zlib-dev \
- libmnl-dev \
- libnetfilter_acct-dev \
- libuv-dev \
- lz4-dev \
- openssl-dev \
- || exit 1
-
-# Judy doesnt seem to be available on the repositories, download manually and install it
-export JUDY_VER="1.0.5"
-wget -O /judy.tar.gz http://downloads.sourceforge.net/project/judy/judy/Judy-${JUDY_VER}/Judy-${JUDY_VER}.tar.gz
-cd /
-tar -xf judy.tar.gz
-rm judy.tar.gz
-cd /judy-${JUDY_VER}
-CFLAGS="-O2 -s" CXXFLAGS="-O2 -s" ./configure
-make
-make install;
diff --git a/packaging/makeself/install-or-update.sh b/packaging/makeself/install-or-update.sh
deleted file mode 100755
index 165e7920..00000000
--- a/packaging/makeself/install-or-update.sh
+++ /dev/null
@@ -1,234 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-. $(dirname "${0}")/functions.sh
-
-export LC_ALL=C
-umask 002
-
-# Be nice on production environments
-renice 19 $$ >/dev/null 2>/dev/null
-
-# -----------------------------------------------------------------------------
-
-STARTIT=1
-
-while [ ! -z "${1}" ]
-do
- if [ "${1}" = "--dont-start-it" ]
- then
- STARTIT=0
- else
- echo >&2 "Unknown option '${1}'. Ignoring it."
- fi
- shift
-done
-
-deleted_stock_configs=0
-if [ ! -f "etc/netdata/.installer-cleanup-of-stock-configs-done" ]
-then
-
- # -----------------------------------------------------------------------------
- progress "Deleting stock configuration files from user configuration directory"
-
- declare -A configs_signatures=()
- source "system/configs.signatures"
-
- if [ ! -d etc/netdata ]
- then
- run mkdir -p etc/netdata
- fi
-
- md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)"
- for x in $(find etc -type f)
- do
- # find it relative filename
- f="${x/etc\/netdata\//}"
-
- # find the stock filename
- t="${f/.conf.old/.conf}"
- t="${t/.conf.orig/.conf}"
-
- if [ ! -z "${md5sum}" ]
- then
- # find the checksum of the existing file
- md5="$( ${md5sum} <"${x}" | cut -d ' ' -f 1)"
- #echo >&2 "md5: ${md5}"
-
- # check if it matches
- if [ "${configs_signatures[${md5}]}" = "${t}" ]
- then
- # it matches the default
- run rm -f "${x}"
- deleted_stock_configs=$(( deleted_stock_configs + 1 ))
- fi
- fi
- done
-
- touch "etc/netdata/.installer-cleanup-of-stock-configs-done"
-fi
-
-# -----------------------------------------------------------------------------
-progress "Attempt to create user/group netdata/netadata"
-
-NETDATA_WANTED_GROUPS="docker nginx varnish haproxy adm nsd proxy squid ceph nobody"
-NETDATA_ADDED_TO_GROUPS=""
-# Default user/group
-NETDATA_USER="root"
-NETDATA_GROUP="root"
-
-if portable_add_group netdata; then
- if portable_add_user netdata "/opt/netdata"; then
- progress "Add user netdata to required user groups"
- for g in ${NETDATA_WANTED_GROUPS}; do
- # shellcheck disable=SC2086
- portable_add_user_to_group ${g} netdata && NETDATA_ADDED_TO_GROUPS="${NETDATA_ADDED_TO_GROUPS} ${g}" || run_failed "Failed to add netdata user to secondary groups"
- done
- NETDATA_USER="netdata"
- NETDATA_GROUP="netdata"
- else
- run_failed "I could not add user netdata, will be using root"
- fi
-else
- run_failed "I could not add group netdata, so no user netdata will be created as well. Netdata run as root:root"
-fi
-
-# -----------------------------------------------------------------------------
-progress "Check SSL certificates paths"
-
-if [ ! -f "/etc/ssl/certs/ca-certificates.crt" ]
-then
- if [ ! -f /opt/netdata/.curlrc ]
- then
- cacert=
-
- # CentOS
- [ -f "/etc/ssl/certs/ca-bundle.crt" ] && cacert="/etc/ssl/certs/ca-bundle.crt"
-
- if [ ! -z "${cacert}" ]
- then
- echo "Creating /opt/netdata/.curlrc with cacert=${cacert}"
- echo >/opt/netdata/.curlrc "cacert=${cacert}"
- else
- run_failed "Failed to find /etc/ssl/certs/ca-certificates.crt"
- fi
- fi
-fi
-
-
-# -----------------------------------------------------------------------------
-progress "Install logrotate configuration for netdata"
-
-install_netdata_logrotate || run_failed "Cannot install logrotate file for netdata."
-
-
-# -----------------------------------------------------------------------------
-progress "Install netdata at system init"
-
-install_netdata_service || run_failed "Cannot install netdata init service."
-
-
-# -----------------------------------------------------------------------------
-progress "creating quick links"
-
-dir_should_be_link() {
- local p="${1}" t="${2}" d="${3}" old
-
- old="${PWD}"
- cd "${p}" || return 0
-
- if [ -e "${d}" ]
- then
- if [ -h "${d}" ]
- then
- run rm "${d}"
- else
- run mv -f "${d}" "${d}.old.$$"
- fi
- fi
-
- run ln -s "${t}" "${d}"
- cd "${old}"
-}
-
-dir_should_be_link . bin sbin
-dir_should_be_link usr ../bin bin
-dir_should_be_link usr ../bin sbin
-dir_should_be_link usr . local
-
-dir_should_be_link . etc/netdata netdata-configs
-dir_should_be_link . usr/share/netdata/web netdata-web-files
-dir_should_be_link . usr/libexec/netdata netdata-plugins
-dir_should_be_link . var/lib/netdata netdata-dbs
-dir_should_be_link . var/cache/netdata netdata-metrics
-dir_should_be_link . var/log/netdata netdata-logs
-
-dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d orig
-
-if [ ${deleted_stock_configs} -gt 0 ]
-then
- dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d "000.-.USE.THE.orig.LINK.TO.COPY.AND.EDIT.STOCK.CONFIG.FILES"
-fi
-
-
-# -----------------------------------------------------------------------------
-
-progress "create user config directories"
-
-for x in "python.d" "charts.d" "node.d" "health.d" "statsd.d" "custom-plugins.d" "ssl"
-do
- if [ ! -d "etc/netdata/${x}" ]
- then
- run mkdir -p "etc/netdata/${x}" || exit 1
- fi
-done
-
-
-# -----------------------------------------------------------------------------
-progress "fix permissions"
-
-run chmod g+rx,o+rx /opt
-run chown -R ${NETDATA_USER}:${NETDATA_GROUP} /opt/netdata
-
-
-# -----------------------------------------------------------------------------
-
-progress "fix plugin permissions"
-
-for x in apps.plugin freeipmi.plugin ioping cgroup-network
-do
- f="usr/libexec/netdata/plugins.d/${x}"
-
- if [ -f "${f}" ]
- then
- run chown root:${NETDATA_GROUP} "${f}"
- run chmod 4750 "${f}"
- fi
-done
-
-# fix the fping binary
-if [ -f bin/fping ]
-then
- run chown root:${NETDATA_GROUP} bin/fping
- run chmod 4750 bin/fping
-fi
-
-
-# -----------------------------------------------------------------------------
-
-if [ ${STARTIT} -eq 0 ]; then
- create_netdata_conf "/opt/netdata/etc/netdata/netdata.conf"
- netdata_banner "is installed now!"
-else
- progress "starting netdata"
-
- if ! restart_netdata "/opt/netdata/bin/netdata"; then
- create_netdata_conf "/opt/netdata/etc/netdata/netdata.conf"
- netdata_banner "is installed and running now!"
- else
- create_netdata_conf "/opt/netdata/etc/netdata/netdata.conf" "http://localhost:19999/netdata.conf"
- netdata_banner "is installed now!"
- fi
-fi
-run chown "${NETDATA_USER}:${NETDATA_GROUP}" "/opt/netdata/etc/netdata/netdata.conf"
-run chmod 0664 "/opt/netdata/etc/netdata/netdata.conf"
diff --git a/packaging/makeself/jobs/10-prepare-destination.install.sh b/packaging/makeself/jobs/10-prepare-destination.install.sh
deleted file mode 100755
index 06dc82f2..00000000
--- a/packaging/makeself/jobs/10-prepare-destination.install.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-. $(dirname "${0}")/../functions.sh "${@}" || exit 1
-
-[ -d "${NETDATA_INSTALL_PATH}.old" ] && run rm -rf "${NETDATA_INSTALL_PATH}.old"
-[ -d "${NETDATA_INSTALL_PATH}" ] && run mv -f "${NETDATA_INSTALL_PATH}" "${NETDATA_INSTALL_PATH}.old"
-
-run mkdir -p "${NETDATA_INSTALL_PATH}/bin"
-run mkdir -p "${NETDATA_INSTALL_PATH}/usr"
-run cd "${NETDATA_INSTALL_PATH}"
-run ln -s bin sbin
-run cd "${NETDATA_INSTALL_PATH}/usr"
-run ln -s ../bin bin
-run ln -s ../sbin sbin
-run ln -s . local
diff --git a/packaging/makeself/jobs/50-bash-4.4.18.install.sh b/packaging/makeself/jobs/50-bash-4.4.18.install.sh
deleted file mode 100755
index a762d37a..00000000
--- a/packaging/makeself/jobs/50-bash-4.4.18.install.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-. $(dirname "${0}")/../functions.sh "${@}" || exit 1
-
-fetch "bash-4.4.18" "http://ftp.gnu.org/gnu/bash/bash-4.4.18.tar.gz"
-
-run ./configure \
- --prefix=${NETDATA_INSTALL_PATH} \
- --without-bash-malloc \
- --enable-static-link \
- --enable-net-redirections \
- --enable-array-variables \
- --disable-profiling \
- --disable-nls \
-# --disable-rpath \
-# --enable-alias \
-# --enable-arith-for-command \
-# --enable-array-variables \
-# --enable-brace-expansion \
-# --enable-casemod-attributes \
-# --enable-casemod-expansions \
-# --enable-command-timing \
-# --enable-cond-command \
-# --enable-cond-regexp \
-# --enable-directory-stack \
-# --enable-dparen-arithmetic \
-# --enable-function-import \
-# --enable-glob-asciiranges-default \
-# --enable-help-builtin \
-# --enable-job-control \
-# --enable-net-redirections \
-# --enable-process-substitution \
-# --enable-progcomp \
-# --enable-prompt-string-decoding \
-# --enable-readline \
-# --enable-select \
-
-
-run make clean
-run make -j$(find_processors)
-
-cat >examples/loadables/Makefile <<EOF
-all:
-clean:
-install:
-EOF
-
-run make install
-
-if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
-then
- run strip ${NETDATA_INSTALL_PATH}/bin/bash
-fi
diff --git a/packaging/makeself/jobs/50-curl-7.60.0.install.sh b/packaging/makeself/jobs/50-curl-7.60.0.install.sh
deleted file mode 100755
index c9159825..00000000
--- a/packaging/makeself/jobs/50-curl-7.60.0.install.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-. $(dirname "${0}")/../functions.sh "${@}" || exit 1
-
-fetch "curl-curl-7_60_0" "https://github.com/curl/curl/archive/curl-7_60_0.tar.gz"
-
-export LDFLAGS="-static"
-export PKG_CONFIG="pkg-config --static"
-
-run ./buildconf
-
-run ./configure \
- --prefix=${NETDATA_INSTALL_PATH} \
- --enable-optimize \
- --disable-shared \
- --enable-static \
- --enable-http \
- --enable-proxy \
- --enable-ipv6 \
- --enable-cookies \
- ${NULL}
-
-# Curl autoconf does not honour the curl_LDFLAGS environment variable
-run sed -i -e "s/curl_LDFLAGS =/curl_LDFLAGS = -all-static/" src/Makefile
-
-run make clean
-run make -j$(find_processors)
-run make install
-
-if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
-then
- run strip ${NETDATA_INSTALL_PATH}/bin/curl
-fi
diff --git a/packaging/makeself/jobs/50-fping-4.2.install.sh b/packaging/makeself/jobs/50-fping-4.2.install.sh
deleted file mode 100755
index a137753d..00000000
--- a/packaging/makeself/jobs/50-fping-4.2.install.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-. $(dirname "${0}")/../functions.sh "${@}" || exit 1
-
-fetch "fping-4.2" "https://github.com/schweikert/fping/releases/download/v4.2/fping-4.2.tar.gz"
-
-export CFLAGS="-static"
-
-run ./configure \
- --prefix=${NETDATA_INSTALL_PATH} \
- --enable-ipv4 \
- --enable-ipv6 \
- ${NULL}
-
-cat >doc/Makefile <<EOF
-all:
-clean:
-install:
-EOF
-
-run make clean
-run make -j$(find_processors)
-run make install
-
-if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
-then
- run strip ${NETDATA_INSTALL_PATH}/bin/fping
-fi
diff --git a/packaging/makeself/jobs/50-ioping-1.1.install.sh b/packaging/makeself/jobs/50-ioping-1.1.install.sh
deleted file mode 100755
index 83c778c1..00000000
--- a/packaging/makeself/jobs/50-ioping-1.1.install.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-. $(dirname "${0}")/../functions.sh "${@}" || exit 1
-
-fetch "netdata-ioping-43d15a5" "https://github.com/netdata/ioping/tarball/master"
-
-export CFLAGS="-static"
-
-run make clean
-run make -j$(find_processors)
-run mkdir -p ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/
-run install -o root -g root -m 4750 ioping ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/
-
-if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
-then
- run strip ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/ioping
-fi
diff --git a/packaging/makeself/jobs/70-netdata-git.install.sh b/packaging/makeself/jobs/70-netdata-git.install.sh
deleted file mode 100755
index 80fba315..00000000
--- a/packaging/makeself/jobs/70-netdata-git.install.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-. ${NETDATA_MAKESELF_PATH}/functions.sh "${@}" || exit 1
-
-cd "${NETDATA_SOURCE_PATH}" || exit 1
-
-if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
-then
- export CFLAGS="-static -O3"
-else
- export CFLAGS="-static -O1 -ggdb -Wall -Wextra -Wformat-signedness -fstack-protector-all -D_FORTIFY_SOURCE=2 -DNETDATA_INTERNAL_CHECKS=1"
-# export CFLAGS="-static -O1 -ggdb -Wall -Wextra -Wformat-signedness"
-fi
-
-# We export this to 'yes', installer sets this to .environment.
-# The updater consumes this one, so that it can tell whether it should update a static install or a non-static one
-export IS_NETDATA_STATIC_BINARY="yes"
-
-run ./netdata-installer.sh --install "${NETDATA_INSTALL_PARENT}" \
- --dont-wait \
- --dont-start-it \
- ${NULL}
-
-if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]
-then
- run strip ${NETDATA_INSTALL_PATH}/bin/netdata
- run strip ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/apps.plugin
- run strip ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/cgroup-network
-fi
diff --git a/packaging/makeself/jobs/99-makeself.install.sh b/packaging/makeself/jobs/99-makeself.install.sh
deleted file mode 100755
index f3056e6a..00000000
--- a/packaging/makeself/jobs/99-makeself.install.sh
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-. $(dirname "${0}")/../functions.sh "${@}" || exit 1
-
-run cd "${NETDATA_SOURCE_PATH}" || exit 1
-
-# -----------------------------------------------------------------------------
-# find the netdata version
-
-VERSION="$(git describe 2>/dev/null)"
-if [ -z "${VERSION}" ]; then
- VERSION=$(cat packaging/version)
-fi
-
-if [ "${VERSION}" == "" ]; then
- echo >&2 "Cannot find version number. Create makeself executable from source code with git tree structure."
- exit 1
-fi
-
-# -----------------------------------------------------------------------------
-# copy the files needed by makeself installation
-
-run mkdir -p "${NETDATA_INSTALL_PATH}/system"
-
-run cp \
- packaging/makeself/post-installer.sh \
- packaging/makeself/install-or-update.sh \
- packaging/installer/functions.sh \
- configs.signatures \
- system/netdata-init-d \
- system/netdata-lsb \
- system/netdata-openrc \
- system/netdata.logrotate \
- system/netdata.service \
- "${NETDATA_INSTALL_PATH}/system/"
-
-
-# -----------------------------------------------------------------------------
-# create a wrapper to start our netdata with a modified path
-
-run mkdir -p "${NETDATA_INSTALL_PATH}/bin/srv"
-
-run mv "${NETDATA_INSTALL_PATH}/bin/netdata" \
- "${NETDATA_INSTALL_PATH}/bin/srv/netdata" || exit 1
-
-cat >"${NETDATA_INSTALL_PATH}/bin/netdata" <<EOF
-#!${NETDATA_INSTALL_PATH}/bin/bash
-export NETDATA_BASH_LOADABLES="DISABLE"
-export PATH="${NETDATA_INSTALL_PATH}/bin:\${PATH}"
-exec "${NETDATA_INSTALL_PATH}/bin/srv/netdata" "\${@}"
-EOF
-run chmod 755 "${NETDATA_INSTALL_PATH}/bin/netdata"
-
-
-# -----------------------------------------------------------------------------
-# remove the links to allow untaring the archive
-
-run rm "${NETDATA_INSTALL_PATH}/sbin" \
- "${NETDATA_INSTALL_PATH}/usr/bin" \
- "${NETDATA_INSTALL_PATH}/usr/sbin" \
- "${NETDATA_INSTALL_PATH}/usr/local"
-
-
-# -----------------------------------------------------------------------------
-# create the makeself archive
-
-run sed "s|NETDATA_VERSION|${VERSION}|g" <"${NETDATA_MAKESELF_PATH}/makeself.lsm" >"${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp"
-
-run "${NETDATA_MAKESELF_PATH}/makeself.sh" \
- --gzip \
- --complevel 9 \
- --notemp \
- --needroot \
- --target "${NETDATA_INSTALL_PATH}" \
- --header "${NETDATA_MAKESELF_PATH}/makeself-header.sh" \
- --lsm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" \
- --license "${NETDATA_MAKESELF_PATH}/makeself-license.txt" \
- --help-header "${NETDATA_MAKESELF_PATH}/makeself-help-header.txt" \
- "${NETDATA_INSTALL_PATH}" \
- "${NETDATA_INSTALL_PATH}.gz.run" \
- "netdata, the real-time performance and health monitoring system" \
- ./system/post-installer.sh \
- ${NULL}
-
-run rm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp"
-
-# -----------------------------------------------------------------------------
-# copy it to the netdata build dir
-
-FILE="netdata-${VERSION}.gz.run"
-
-run mkdir -p artifacts
-run mv "${NETDATA_INSTALL_PATH}.gz.run" "artifacts/${FILE}"
-
-[ -f netdata-latest.gz.run ] && rm netdata-latest.gz.run
-run ln -s "artifacts/${FILE}" netdata-latest.gz.run
-
-echo >&2 "Self-extracting installer moved to 'artifacts/${FILE}'"
diff --git a/packaging/makeself/makeself-header.sh b/packaging/makeself/makeself-header.sh
deleted file mode 100755
index d77e1717..00000000
--- a/packaging/makeself/makeself-header.sh
+++ /dev/null
@@ -1,554 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-cat << EOF > "$archname"
-#!/bin/sh
-# This script was generated using Makeself $MS_VERSION
-
-ORIG_UMASK=\`umask\`
-if test "$KEEP_UMASK" = n; then
- umask 077
-fi
-
-CRCsum="$CRCsum"
-MD5="$MD5sum"
-TMPROOT=\${TMPDIR:=/tmp}
-USER_PWD="\$PWD"; export USER_PWD
-
-label="$LABEL"
-script="$SCRIPT"
-scriptargs="$SCRIPTARGS"
-licensetxt="$LICENSE"
-helpheader='$HELPHEADER'
-targetdir="$archdirname"
-filesizes="$filesizes"
-keep="$KEEP"
-nooverwrite="$NOOVERWRITE"
-quiet="n"
-accept="n"
-nodiskspace="n"
-export_conf="$EXPORT_CONF"
-
-print_cmd_arg=""
-if type printf > /dev/null; then
- print_cmd="printf"
-elif test -x /usr/ucb/echo; then
- print_cmd="/usr/ucb/echo"
-else
- print_cmd="echo"
-fi
-
-if test -d /usr/xpg4/bin; then
- PATH=/usr/xpg4/bin:\$PATH
- export PATH
-fi
-
-unset CDPATH
-
-MS_Printf()
-{
- \$print_cmd \$print_cmd_arg "\$1"
-}
-
-MS_PrintLicense()
-{
- if test x"\$licensetxt" != x; then
- echo "\$licensetxt"
- if test x"\$accept" != xy; then
- while true
- do
- MS_Printf "Please type y to accept, n otherwise: "
- read yn
- if test x"\$yn" = xn; then
- keep=n
- eval \$finish; exit 1
- break;
- elif test x"\$yn" = xy; then
- break;
- fi
- done
- fi
- fi
-}
-
-MS_diskspace()
-{
- (
- df -kP "\$1" | tail -1 | awk '{ if (\$4 ~ /%/) {print \$3} else {print \$4} }'
- )
-}
-
-MS_dd()
-{
- blocks=\`expr \$3 / 1024\`
- bytes=\`expr \$3 % 1024\`
- dd if="\$1" ibs=\$2 skip=1 obs=1024 conv=sync 2> /dev/null | \\
- { test \$blocks -gt 0 && dd ibs=1024 obs=1024 count=\$blocks ; \\
- test \$bytes -gt 0 && dd ibs=1 obs=1024 count=\$bytes ; } 2> /dev/null
-}
-
-MS_dd_Progress()
-{
- if test x"\$noprogress" = xy; then
- MS_dd \$@
- return \$?
- fi
- file="\$1"
- offset=\$2
- length=\$3
- pos=0
- bsize=4194304
- while test \$bsize -gt \$length; do
- bsize=\`expr \$bsize / 4\`
- done
- blocks=\`expr \$length / \$bsize\`
- bytes=\`expr \$length % \$bsize\`
- (
- dd ibs=\$offset skip=1 2>/dev/null
- pos=\`expr \$pos \+ \$bsize\`
- MS_Printf " 0%% " 1>&2
- if test \$blocks -gt 0; then
- while test \$pos -le \$length; do
- dd bs=\$bsize count=1 2>/dev/null
- pcent=\`expr \$length / 100\`
- pcent=\`expr \$pos / \$pcent\`
- if test \$pcent -lt 100; then
- MS_Printf "\b\b\b\b\b\b\b" 1>&2
- if test \$pcent -lt 10; then
- MS_Printf " \$pcent%% " 1>&2
- else
- MS_Printf " \$pcent%% " 1>&2
- fi
- fi
- pos=\`expr \$pos \+ \$bsize\`
- done
- fi
- if test \$bytes -gt 0; then
- dd bs=\$bytes count=1 2>/dev/null
- fi
- MS_Printf "\b\b\b\b\b\b\b" 1>&2
- MS_Printf " 100%% " 1>&2
- ) < "\$file"
-}
-
-MS_Help()
-{
- cat << EOH >&2
-\${helpheader}Makeself version $MS_VERSION
- 1) Getting help or info about \$0 :
- \$0 --help Print this message
- \$0 --info Print embedded info : title, default target directory, embedded script ...
- \$0 --lsm Print embedded lsm entry (or no LSM)
- \$0 --list Print the list of files in the archive
- \$0 --check Checks integrity of the archive
-
- 2) Running \$0 :
- \$0 [options] [--] [additional arguments to embedded script]
- with following options (in that order)
- --confirm Ask before running embedded script
- --quiet Do not print anything except error messages
- --accept Accept the license
- --noexec Do not run embedded script
- --keep Do not erase target directory after running
- the embedded script
- --noprogress Do not show the progress during the decompression
- --nox11 Do not spawn an xterm
- --nochown Do not give the extracted files to the current user
- --nodiskspace Do not check for available disk space
- --target dir Extract directly to a target directory
- directory path can be either absolute or relative
- --tar arg1 [arg2 ...] Access the contents of the archive through the tar command
- -- Following arguments will be passed to the embedded script
-EOH
-}
-
-MS_Check()
-{
- OLD_PATH="\$PATH"
- PATH=\${GUESS_MD5_PATH:-"\$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"}
- MD5_ARG=""
- MD5_PATH=\`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum\`
- test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which md5 || command -v md5 || type md5\`
- test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which digest || command -v digest || type digest\`
- PATH="\$OLD_PATH"
-
- if test x"\$quiet" = xn; then
- MS_Printf "Verifying archive integrity..."
- fi
- offset=\`head -n $SKIP "\$1" | wc -c | tr -d " "\`
- verb=\$2
- i=1
- for s in \$filesizes
- do
- crc=\`echo \$CRCsum | cut -d" " -f\$i\`
- if test -x "\$MD5_PATH"; then
- if test x"\`basename \$MD5_PATH\`" = xdigest; then
- MD5_ARG="-a md5"
- fi
- md5=\`echo \$MD5 | cut -d" " -f\$i\`
- if test x"\$md5" = x00000000000000000000000000000000; then
- test x"\$verb" = xy && echo " \$1 does not contain an embedded MD5 checksum." >&2
- else
- md5sum=\`MS_dd_Progress "\$1" \$offset \$s | eval "\$MD5_PATH \$MD5_ARG" | cut -b-32\`;
- if test x"\$md5sum" != x"\$md5"; then
- echo "Error in MD5 checksums: \$md5sum is different from \$md5" >&2
- exit 2
- else
- test x"\$verb" = xy && MS_Printf " MD5 checksums are OK." >&2
- fi
- crc="0000000000"; verb=n
- fi
- fi
- if test x"\$crc" = x0000000000; then
- test x"\$verb" = xy && echo " \$1 does not contain a CRC checksum." >&2
- else
- sum1=\`MS_dd_Progress "\$1" \$offset \$s | CMD_ENV=xpg4 cksum | awk '{print \$1}'\`
- if test x"\$sum1" = x"\$crc"; then
- test x"\$verb" = xy && MS_Printf " CRC checksums are OK." >&2
- else
- echo "Error in checksums: \$sum1 is different from \$crc" >&2
- exit 2;
- fi
- fi
- i=\`expr \$i + 1\`
- offset=\`expr \$offset + \$s\`
- done
- if test x"\$quiet" = xn; then
- echo " All good."
- fi
-}
-
-UnTAR()
-{
- if test x"\$quiet" = xn; then
- tar \$1vf - $UNTAR_EXTRA 2>&1 || { echo " ... Extraction failed." > /dev/tty; kill -15 \$$; }
- else
- tar \$1f - $UNTAR_EXTRA 2>&1 || { echo Extraction failed. > /dev/tty; kill -15 \$$; }
- fi
-}
-
-finish=true
-xterm_loop=
-noprogress=$NOPROGRESS
-nox11=$NOX11
-copy=$COPY
-ownership=y
-verbose=n
-
-initargs="\$@"
-
-while true
-do
- case "\$1" in
- -h | --help)
- MS_Help
- exit 0
- ;;
- -q | --quiet)
- quiet=y
- noprogress=y
- shift
- ;;
- --accept)
- accept=y
- shift
- ;;
- --info)
- echo Identification: "\$label"
- echo Target directory: "\$targetdir"
- echo Uncompressed size: $USIZE KB
- echo Compression: $COMPRESS
- echo Date of packaging: $DATE
- echo Built with Makeself version $MS_VERSION on $OSTYPE
- echo Build command was: "$MS_COMMAND"
- if test x"\$script" != x; then
- echo Script run after extraction:
- echo " " \$script \$scriptargs
- fi
- if test x"$copy" = xcopy; then
- echo "Archive will copy itself to a temporary location"
- fi
- if test x"$NEED_ROOT" = xy; then
- echo "Root permissions required for extraction"
- fi
- if test x"$KEEP" = xy; then
- echo "directory \$targetdir is permanent"
- else
- echo "\$targetdir will be removed after extraction"
- fi
- exit 0
- ;;
- --dumpconf)
- echo LABEL=\"\$label\"
- echo SCRIPT=\"\$script\"
- echo SCRIPTARGS=\"\$scriptargs\"
- echo archdirname=\"$archdirname\"
- echo KEEP=$KEEP
- echo NOOVERWRITE=$NOOVERWRITE
- echo COMPRESS=$COMPRESS
- echo filesizes=\"\$filesizes\"
- echo CRCsum=\"\$CRCsum\"
- echo MD5sum=\"\$MD5\"
- echo OLDUSIZE=$USIZE
- echo OLDSKIP=`expr $SKIP + 1`
- exit 0
- ;;
- --lsm)
-cat << EOLSM
-EOF
-eval "$LSM_CMD"
-cat << EOF >> "$archname"
-EOLSM
- exit 0
- ;;
- --list)
- echo Target directory: \$targetdir
- offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\`
- for s in \$filesizes
- do
- MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | UnTAR t
- offset=\`expr \$offset + \$s\`
- done
- exit 0
- ;;
- --tar)
- offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\`
- arg1="\$2"
- if ! shift 2; then MS_Help; exit 1; fi
- for s in \$filesizes
- do
- MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | tar "\$arg1" - "\$@"
- offset=\`expr \$offset + \$s\`
- done
- exit 0
- ;;
- --check)
- MS_Check "\$0" y
- exit 0
- ;;
- --confirm)
- verbose=y
- shift
- ;;
- --noexec)
- script=""
- shift
- ;;
- --keep)
- keep=y
- shift
- ;;
- --target)
- keep=y
- targetdir=\${2:-.}
- if ! shift 2; then MS_Help; exit 1; fi
- ;;
- --noprogress)
- noprogress=y
- shift
- ;;
- --nox11)
- nox11=y
- shift
- ;;
- --nochown)
- ownership=n
- shift
- ;;
- --nodiskspace)
- nodiskspace=y
- shift
- ;;
- --xwin)
- if test "$NOWAIT" = n; then
- finish="echo Press Return to close this window...; read junk"
- fi
- xterm_loop=1
- shift
- ;;
- --phase2)
- copy=phase2
- shift
- ;;
- --)
- shift
- break ;;
- -*)
- echo Unrecognized flag : "\$1" >&2
- MS_Help
- exit 1
- ;;
- *)
- break ;;
- esac
-done
-
-if test x"\$quiet" = xy -a x"\$verbose" = xy; then
- echo Cannot be verbose and quiet at the same time. >&2
- exit 1
-fi
-
-if test x"$NEED_ROOT" = xy -a \`id -u\` -ne 0; then
- echo "Administrative privileges required for this archive (use su or sudo)" >&2
- exit 1
-fi
-
-if test x"\$copy" \!= xphase2; then
- MS_PrintLicense
-fi
-
-case "\$copy" in
-copy)
- tmpdir=\$TMPROOT/makeself.\$RANDOM.\`date +"%y%m%d%H%M%S"\`.\$\$
- mkdir "\$tmpdir" || {
- echo "Could not create temporary directory \$tmpdir" >&2
- exit 1
- }
- SCRIPT_COPY="\$tmpdir/makeself"
- echo "Copying to a temporary location..." >&2
- cp "\$0" "\$SCRIPT_COPY"
- chmod +x "\$SCRIPT_COPY"
- cd "\$TMPROOT"
- exec "\$SCRIPT_COPY" --phase2 -- \$initargs
- ;;
-phase2)
- finish="\$finish ; rm -rf \`dirname \$0\`"
- ;;
-esac
-
-if test x"\$nox11" = xn; then
- if tty -s; then # Do we have a terminal?
- :
- else
- if test x"\$DISPLAY" != x -a x"\$xterm_loop" = x; then # No, but do we have X?
- if xset q > /dev/null 2>&1; then # Check for valid DISPLAY variable
- GUESS_XTERMS="xterm gnome-terminal rxvt dtterm eterm Eterm xfce4-terminal lxterminal kvt konsole aterm terminology"
- for a in \$GUESS_XTERMS; do
- if type \$a >/dev/null 2>&1; then
- XTERM=\$a
- break
- fi
- done
- chmod a+x \$0 || echo Please add execution rights on \$0
- if test \`echo "\$0" | cut -c1\` = "/"; then # Spawn a terminal!
- exec \$XTERM -title "\$label" -e "\$0" --xwin "\$initargs"
- else
- exec \$XTERM -title "\$label" -e "./\$0" --xwin "\$initargs"
- fi
- fi
- fi
- fi
-fi
-
-if test x"\$targetdir" = x.; then
- tmpdir="."
-else
- if test x"\$keep" = xy; then
- if test x"\$nooverwrite" = xy && test -d "\$targetdir"; then
- echo "Target directory \$targetdir already exists, aborting." >&2
- exit 1
- fi
- if test x"\$quiet" = xn; then
- echo "Creating directory \$targetdir" >&2
- fi
- tmpdir="\$targetdir"
- dashp="-p"
- else
- tmpdir="\$TMPROOT/selfgz\$\$\$RANDOM"
- dashp=""
- fi
- mkdir \$dashp \$tmpdir || {
- echo 'Cannot create target directory' \$tmpdir >&2
- echo 'You should try option --target dir' >&2
- eval \$finish
- exit 1
- }
-fi
-
-location="\`pwd\`"
-if test x"\$SETUP_NOCHECK" != x1; then
- MS_Check "\$0"
-fi
-offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\`
-
-if test x"\$verbose" = xy; then
- MS_Printf "About to extract $USIZE KB in \$tmpdir ... Proceed ? [Y/n] "
- read yn
- if test x"\$yn" = xn; then
- eval \$finish; exit 1
- fi
-fi
-
-if test x"\$quiet" = xn; then
- MS_Printf "Uncompressing \$label"
-fi
-res=3
-if test x"\$keep" = xn; then
- trap 'echo Signal caught, cleaning up >&2; cd \$TMPROOT; /bin/rm -rf \$tmpdir; eval \$finish; exit 15' 1 2 3 15
-fi
-
-if test x"\$nodiskspace" = xn; then
- leftspace=\`MS_diskspace \$tmpdir\`
- if test -n "\$leftspace"; then
- if test "\$leftspace" -lt $USIZE; then
- echo
- echo "Not enough space left in "\`dirname \$tmpdir\`" (\$leftspace KB) to decompress \$0 ($USIZE KB)" >&2
- echo "Use --nodiskspace option to skip this check and proceed anyway" >&2
- if test x"\$keep" = xn; then
- echo "Consider setting TMPDIR to a directory with more free space."
- fi
- eval \$finish; exit 1
- fi
- fi
-fi
-
-for s in \$filesizes
-do
- if MS_dd_Progress "\$0" \$offset \$s | eval "$GUNZIP_CMD" | ( cd "\$tmpdir"; umask \$ORIG_UMASK ; UnTAR xp ) 1>/dev/null; then
- if test x"\$ownership" = xy; then
- (cd "\$tmpdir"; chown -R \`id -u\` .; chgrp -R \`id -g\` .)
- fi
- else
- echo >&2
- echo "Unable to decompress \$0" >&2
- eval \$finish; exit 1
- fi
- offset=\`expr \$offset + \$s\`
-done
-if test x"\$quiet" = xn; then
- echo
-fi
-
-cd "\$tmpdir"
-res=0
-if test x"\$script" != x; then
- if test x"\$export_conf" = x"y"; then
- MS_BUNDLE="\$0"
- MS_LABEL="\$label"
- MS_SCRIPT="\$script"
- MS_SCRIPTARGS="\$scriptargs"
- MS_ARCHDIRNAME="\$archdirname"
- MS_KEEP="\$KEEP"
- MS_NOOVERWRITE="\$NOOVERWRITE"
- MS_COMPRESS="\$COMPRESS"
- export MS_BUNDLE MS_LABEL MS_SCRIPT MS_SCRIPTARGS
- export MS_ARCHDIRNAME MS_KEEP MS_NOOVERWRITE MS_COMPRESS
- fi
-
- if test x"\$verbose" = x"y"; then
- MS_Printf "OK to execute: \$script \$scriptargs \$* ? [Y/n] "
- read yn
- if test x"\$yn" = x -o x"\$yn" = xy -o x"\$yn" = xY; then
- eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$?;
- fi
- else
- eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$?
- fi
- if test "\$res" -ne 0; then
- test x"\$verbose" = xy && echo "The program '\$script' returned an error code (\$res)" >&2
- fi
-fi
-if test x"\$keep" = xn; then
- cd \$TMPROOT
- /bin/rm -rf \$tmpdir
-fi
-eval \$finish; exit \$res
-EOF
diff --git a/packaging/makeself/makeself-help-header.txt b/packaging/makeself/makeself-help-header.txt
deleted file mode 100644
index bf482c46..00000000
--- a/packaging/makeself/makeself-help-header.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-
- ^
- |.-. .-. .-. .-. . netdata
- | '-' '-' '-' '-' real-time performance monitoring, done right!
- +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->
-
- (C) Copyright 2017, Costa Tsaousis
- All rights reserved
- Released under GPL v3+
-
- You are about to install netdata to this system.
- netdata will be installed at:
-
- /opt/netdata
-
- The following changes will be made to your system:
-
- # USERS / GROUPS
- User 'netdata' and group 'netdata' will be added, if not present.
-
- # LOGROTATE
- This file will be installed if logrotate is present.
-
- - /etc/logrotate.d/netdata
-
- # SYSTEM INIT
- This file will be installed if this system runs with systemd:
-
- - /lib/systemd/system/netdata.service
-
- or, for older Centos, Debian/Ubuntu or OpenRC Gentoo:
-
- - /etc/init.d/netdata will be created
-
-
- This package can also update a netdata installation that has been
- created with another version of it.
-
- Your netdata configuration will be retained.
- After installation, netdata will be (re-)started.
-
- netdata re-distributes a lot of open source software components.
- Check its full license at:
- https://github.com/netdata/netdata/blob/master/LICENSE.md
diff --git a/packaging/makeself/makeself-license.txt b/packaging/makeself/makeself-license.txt
deleted file mode 100644
index bf482c46..00000000
--- a/packaging/makeself/makeself-license.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-
- ^
- |.-. .-. .-. .-. . netdata
- | '-' '-' '-' '-' real-time performance monitoring, done right!
- +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->
-
- (C) Copyright 2017, Costa Tsaousis
- All rights reserved
- Released under GPL v3+
-
- You are about to install netdata to this system.
- netdata will be installed at:
-
- /opt/netdata
-
- The following changes will be made to your system:
-
- # USERS / GROUPS
- User 'netdata' and group 'netdata' will be added, if not present.
-
- # LOGROTATE
- This file will be installed if logrotate is present.
-
- - /etc/logrotate.d/netdata
-
- # SYSTEM INIT
- This file will be installed if this system runs with systemd:
-
- - /lib/systemd/system/netdata.service
-
- or, for older Centos, Debian/Ubuntu or OpenRC Gentoo:
-
- - /etc/init.d/netdata will be created
-
-
- This package can also update a netdata installation that has been
- created with another version of it.
-
- Your netdata configuration will be retained.
- After installation, netdata will be (re-)started.
-
- netdata re-distributes a lot of open source software components.
- Check its full license at:
- https://github.com/netdata/netdata/blob/master/LICENSE.md
diff --git a/packaging/makeself/makeself.lsm b/packaging/makeself/makeself.lsm
deleted file mode 100644
index 6bd4703d..00000000
--- a/packaging/makeself/makeself.lsm
+++ /dev/null
@@ -1,16 +0,0 @@
-Begin3
-Title: netdata
-Version: NETDATA_VERSION
-Description: netdata is a system for distributed real-time performance and health monitoring.
- It provides unparalleled insights, in real-time, of everything happening on the
- system it runs (including applications such as web and database servers), using
- modern interactive web dashboards. netdata is fast and efficient, designed to
- permanently run on all systems (physical & virtual servers, containers, IoT
- devices), without disrupting their core function.
-Keywords: real-time performance and health monitoring
-Author: Costa Tsaousis (costa@tsaousis.gr)
-Maintained-by: Costa Tsaousis (costa@tsaousis.gr)
-Original-site: https://my-netdata.io/
-Platform: Unix
-Copying-policy: GPL
-End
diff --git a/packaging/makeself/makeself.sh b/packaging/makeself/makeself.sh
deleted file mode 100755
index f3cb6997..00000000
--- a/packaging/makeself/makeself.sh
+++ /dev/null
@@ -1,621 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Makeself version 2.3.x
-# by Stephane Peter <megastep@megastep.org>
-#
-# Utility to create self-extracting tar.gz archives.
-# The resulting archive is a file holding the tar.gz archive with
-# a small Shell script stub that uncompresses the archive to a temporary
-# directory and then executes a given script from withing that directory.
-#
-# Makeself home page: http://makeself.io/
-#
-# Version 2.0 is a rewrite of version 1.0 to make the code easier to read and maintain.
-#
-# Version history :
-# - 1.0 : Initial public release
-# - 1.1 : The archive can be passed parameters that will be passed on to
-# the embedded script, thanks to John C. Quillan
-# - 1.2 : Package distribution, bzip2 compression, more command line options,
-# support for non-temporary archives. Ideas thanks to Francois Petitjean
-# - 1.3 : More patches from Bjarni R. Einarsson and Francois Petitjean:
-# Support for no compression (--nocomp), script is no longer mandatory,
-# automatic launch in an xterm, optional verbose output, and -target
-# archive option to indicate where to extract the files.
-# - 1.4 : Improved UNIX compatibility (Francois Petitjean)
-# Automatic integrity checking, support of LSM files (Francois Petitjean)
-# - 1.5 : Many bugfixes. Optionally disable xterm spawning.
-# - 1.5.1 : More bugfixes, added archive options -list and -check.
-# - 1.5.2 : Cosmetic changes to inform the user of what's going on with big
-# archives (Quake III demo)
-# - 1.5.3 : Check for validity of the DISPLAY variable before launching an xterm.
-# More verbosity in xterms and check for embedded command's return value.
-# Bugfix for Debian 2.0 systems that have a different "print" command.
-# - 1.5.4 : Many bugfixes. Print out a message if the extraction failed.
-# - 1.5.5 : More bugfixes. Added support for SETUP_NOCHECK environment variable to
-# bypass checksum verification of archives.
-# - 1.6.0 : Compute MD5 checksums with the md5sum command (patch from Ryan Gordon)
-# - 2.0 : Brand new rewrite, cleaner architecture, separated header and UNIX ports.
-# - 2.0.1 : Added --copy
-# - 2.1.0 : Allow multiple tarballs to be stored in one archive, and incremental updates.
-# Added --nochown for archives
-# Stopped doing redundant checksums when not necesary
-# - 2.1.1 : Work around insane behavior from certain Linux distros with no 'uncompress' command
-# Cleaned up the code to handle error codes from compress. Simplified the extraction code.
-# - 2.1.2 : Some bug fixes. Use head -n to avoid problems.
-# - 2.1.3 : Bug fixes with command line when spawning terminals.
-# Added --tar for archives, allowing to give arbitrary arguments to tar on the contents of the archive.
-# Added --noexec to prevent execution of embedded scripts.
-# Added --nomd5 and --nocrc to avoid creating checksums in archives.
-# Added command used to create the archive in --info output.
-# Run the embedded script through eval.
-# - 2.1.4 : Fixed --info output.
-# Generate random directory name when extracting files to . to avoid problems. (Jason Trent)
-# Better handling of errors with wrong permissions for the directory containing the files. (Jason Trent)
-# Avoid some race conditions (Ludwig Nussel)
-# Unset the $CDPATH variable to avoid problems if it is set. (Debian)
-# Better handling of dot files in the archive directory.
-# - 2.1.5 : Made the md5sum detection consistent with the header code.
-# Check for the presence of the archive directory
-# Added --encrypt for symmetric encryption through gpg (Eric Windisch)
-# Added support for the digest command on Solaris 10 for MD5 checksums
-# Check for available disk space before extracting to the target directory (Andreas Schweitzer)
-# Allow extraction to run asynchronously (patch by Peter Hatch)
-# Use file descriptors internally to avoid error messages (patch by Kay Tiong Khoo)
-# - 2.1.6 : Replaced one dot per file progress with a realtime progress percentage and a spining cursor (Guy Baconniere)
-# Added --noprogress to prevent showing the progress during the decompression (Guy Baconniere)
-# Added --target dir to allow extracting directly to a target directory (Guy Baconniere)
-# - 2.2.0 : Many bugfixes, updates and contributions from users. Check out the project page on Github for the details.
-# - 2.3.0 : Option to specify packaging date to enable byte-for-byte reproducibility. (Marc Pawlowsky)
-#
-# (C) 1998-2017 by Stephane Peter <megastep@megastep.org>
-#
-# This software is released under the terms of the GNU GPL version 2 and above
-# Please read the license at http://www.gnu.org/copyleft/gpl.html
-#
-
-MS_VERSION=2.3.1
-MS_COMMAND="$0"
-unset CDPATH
-
-for f in "${1+"$@"}"; do
- MS_COMMAND="$MS_COMMAND \\\\
- \\\"$f\\\""
-done
-
-# For Solaris systems
-if test -d /usr/xpg4/bin; then
- PATH=/usr/xpg4/bin:$PATH
- export PATH
-fi
-
-# Procedures
-
-MS_Usage()
-{
- echo "Usage: $0 [params] archive_dir file_name label startup_script [args]"
- echo "params can be one or more of the following :"
- echo " --version | -v : Print out Makeself version number and exit"
- echo " --help | -h : Print out this help message"
- echo " --tar-quietly : Suppress verbose output from the tar command"
- echo " --quiet | -q : Do not print any messages other than errors."
- echo " --gzip : Compress using gzip (default if detected)"
- echo " --pigz : Compress with pigz"
- echo " --bzip2 : Compress using bzip2 instead of gzip"
- echo " --pbzip2 : Compress using pbzip2 instead of gzip"
- echo " --xz : Compress using xz instead of gzip"
- echo " --lzo : Compress using lzop instead of gzip"
- echo " --lz4 : Compress using lz4 instead of gzip"
- echo " --compress : Compress using the UNIX 'compress' command"
- echo " --complevel lvl : Compression level for gzip pigz xz lzo lz4 bzip2 and pbzip2 (default 9)"
- echo " --base64 : Instead of compressing, encode the data using base64"
- echo " --gpg-encrypt : Instead of compressing, encrypt the data using GPG"
- echo " --gpg-asymmetric-encrypt-sign"
- echo " : Instead of compressing, asymmetrically encrypt and sign the data using GPG"
- echo " --gpg-extra opt : Append more options to the gpg command line"
- echo " --ssl-encrypt : Instead of compressing, encrypt the data using OpenSSL"
- echo " --nocomp : Do not compress the data"
- echo " --notemp : The archive will create archive_dir in the"
- echo " current directory and uncompress in ./archive_dir"
- echo " --needroot : Check that the root user is extracting the archive before proceeding"
- echo " --copy : Upon extraction, the archive will first copy itself to"
- echo " a temporary directory"
- echo " --append : Append more files to an existing Makeself archive"
- echo " The label and startup scripts will then be ignored"
- echo " --target dir : Extract directly to a target directory"
- echo " directory path can be either absolute or relative"
- echo " --nooverwrite : Do not extract the archive if the specified target directory exists"
- echo " --current : Files will be extracted to the current directory"
- echo " Both --current and --target imply --notemp"
- echo " --tar-extra opt : Append more options to the tar command line"
- echo " --untar-extra opt : Append more options to the during the extraction of the tar archive"
- echo " --nomd5 : Don't calculate an MD5 for archive"
- echo " --nocrc : Don't calculate a CRC for archive"
- echo " --header file : Specify location of the header script"
- echo " --follow : Follow the symlinks in the archive"
- echo " --noprogress : Do not show the progress during the decompression"
- echo " --nox11 : Disable automatic spawn of a xterm"
- echo " --nowait : Do not wait for user input after executing embedded"
- echo " program from an xterm"
- echo " --lsm file : LSM file describing the package"
- echo " --license file : Append a license file"
- echo " --help-header file : Add a header to the archive's --help output"
- echo " --packaging-date date"
- echo " : Use provided string as the packaging date"
- echo " instead of the current date."
- echo
- echo " --keep-umask : Keep the umask set to shell default, rather than overriding when executing self-extracting archive."
- echo " --export-conf : Export configuration variables to startup_script"
- echo
- echo "Do not forget to give a fully qualified startup script name"
- echo "(i.e. with a ./ prefix if inside the archive)."
- exit 1
-}
-
-# Default settings
-if type gzip 2>&1 > /dev/null; then
- COMPRESS=gzip
-else
- COMPRESS=Unix
-fi
-COMPRESS_LEVEL=9
-KEEP=n
-CURRENT=n
-NOX11=n
-NOWAIT=n
-APPEND=n
-TAR_QUIETLY=n
-KEEP_UMASK=n
-QUIET=n
-NOPROGRESS=n
-COPY=none
-NEED_ROOT=n
-TAR_ARGS=cvf
-TAR_EXTRA=""
-GPG_EXTRA=""
-DU_ARGS=-ks
-HEADER=`dirname "$0"`/makeself-header.sh
-TARGETDIR=""
-NOOVERWRITE=n
-DATE=`LC_ALL=C date`
-EXPORT_CONF=n
-
-# LSM file stuff
-LSM_CMD="echo No LSM. >> \"\$archname\""
-
-while true
-do
- case "$1" in
- --version | -v)
- echo Makeself version $MS_VERSION
- exit 0
- ;;
- --pbzip2)
- COMPRESS=pbzip2
- shift
- ;;
- --bzip2)
- COMPRESS=bzip2
- shift
- ;;
- --gzip)
- COMPRESS=gzip
- shift
- ;;
- --pigz)
- COMPRESS=pigz
- shift
- ;;
- --xz)
- COMPRESS=xz
- shift
- ;;
- --lzo)
- COMPRESS=lzo
- shift
- ;;
- --lz4)
- COMPRESS=lz4
- shift
- ;;
- --compress)
- COMPRESS=Unix
- shift
- ;;
- --base64)
- COMPRESS=base64
- shift
- ;;
- --gpg-encrypt)
- COMPRESS=gpg
- shift
- ;;
- --gpg-asymmetric-encrypt-sign)
- COMPRESS=gpg-asymmetric
- shift
- ;;
- --gpg-extra)
- GPG_EXTRA="$2"
- if ! shift 2; then MS_Help; exit 1; fi
- ;;
- --ssl-encrypt)
- COMPRESS=openssl
- shift
- ;;
- --nocomp)
- COMPRESS=none
- shift
- ;;
- --complevel)
- COMPRESS_LEVEL="$2"
- if ! shift 2; then MS_Help; exit 1; fi
- ;;
- --notemp)
- KEEP=y
- shift
- ;;
- --copy)
- COPY=copy
- shift
- ;;
- --current)
- CURRENT=y
- KEEP=y
- shift
- ;;
- --tar-extra)
- TAR_EXTRA="$2"
- if ! shift 2; then MS_Help; exit 1; fi
- ;;
- --untar-extra)
- UNTAR_EXTRA="$2"
- if ! shift 2; then MS_Help; exit 1; fi
- ;;
- --target)
- TARGETDIR="$2"
- KEEP=y
- if ! shift 2; then MS_Help; exit 1; fi
- ;;
- --nooverwrite)
- NOOVERWRITE=y
- shift
- ;;
- --needroot)
- NEED_ROOT=y
- shift
- ;;
- --header)
- HEADER="$2"
- if ! shift 2; then MS_Help; exit 1; fi
- ;;
- --license)
- LICENSE=`cat $2`
- if ! shift 2; then MS_Help; exit 1; fi
- ;;
- --follow)
- TAR_ARGS=cvhf
- DU_ARGS=-ksL
- shift
- ;;
- --noprogress)
- NOPROGRESS=y
- shift
- ;;
- --nox11)
- NOX11=y
- shift
- ;;
- --nowait)
- NOWAIT=y
- shift
- ;;
- --nomd5)
- NOMD5=y
- shift
- ;;
- --nocrc)
- NOCRC=y
- shift
- ;;
- --append)
- APPEND=y
- shift
- ;;
- --lsm)
- LSM_CMD="cat \"$2\" >> \"\$archname\""
- if ! shift 2; then MS_Help; exit 1; fi
- ;;
- --packaging-date)
- DATE="$2"
- if ! shift 2; then MS_Help; exit 1; fi
- ;;
- --help-header)
- HELPHEADER=`sed -e "s/'/'\\\\\''/g" $2`
- if ! shift 2; then MS_Help; exit 1; fi
- [ -n "$HELPHEADER" ] && HELPHEADER="$HELPHEADER
-"
- ;;
- --tar-quietly)
- TAR_QUIETLY=y
- shift
- ;;
- --keep-umask)
- KEEP_UMASK=y
- shift
- ;;
- --export-conf)
- EXPORT_CONF=y
- shift
- ;;
- -q | --quiet)
- QUIET=y
- shift
- ;;
- -h | --help)
- MS_Usage
- ;;
- -*)
- echo Unrecognized flag : "$1"
- MS_Usage
- ;;
- *)
- break
- ;;
- esac
-done
-
-if test $# -lt 1; then
- MS_Usage
-else
- if test -d "$1"; then
- archdir="$1"
- else
- echo "Directory $1 does not exist." >&2
- exit 1
- fi
-fi
-archname="$2"
-
-if test "$QUIET" = "y" || test "$TAR_QUIETLY" = "y"; then
- if test "$TAR_ARGS" = "cvf"; then
- TAR_ARGS="cf"
- elif test "$TAR_ARGS" = "cvhf";then
- TAR_ARGS="chf"
- fi
-fi
-
-if test "$APPEND" = y; then
- if test $# -lt 2; then
- MS_Usage
- fi
-
- # Gather the info from the original archive
- OLDENV=`sh "$archname" --dumpconf`
- if test $? -ne 0; then
- echo "Unable to update archive: $archname" >&2
- exit 1
- else
- eval "$OLDENV"
- fi
-else
- if test "$KEEP" = n -a $# = 3; then
- echo "ERROR: Making a temporary archive with no embedded command does not make sense!" >&2
- echo >&2
- MS_Usage
- fi
- # We don't want to create an absolute directory unless a target directory is defined
- if test "$CURRENT" = y; then
- archdirname="."
- elif test x$TARGETDIR != x; then
- archdirname="$TARGETDIR"
- else
- archdirname=`basename "$1"`
- fi
-
- if test $# -lt 3; then
- MS_Usage
- fi
-
- LABEL="$3"
- SCRIPT="$4"
- test "x$SCRIPT" = x || shift 1
- shift 3
- SCRIPTARGS="$*"
-fi
-
-if test "$KEEP" = n -a "$CURRENT" = y; then
- echo "ERROR: It is A VERY DANGEROUS IDEA to try to combine --notemp and --current." >&2
- exit 1
-fi
-
-case $COMPRESS in
-gzip)
- GZIP_CMD="gzip -c$COMPRESS_LEVEL"
- GUNZIP_CMD="gzip -cd"
- ;;
-pigz)
- GZIP_CMD="pigz -$COMPRESS_LEVEL"
- GUNZIP_CMD="gzip -cd"
- ;;
-pbzip2)
- GZIP_CMD="pbzip2 -c$COMPRESS_LEVEL"
- GUNZIP_CMD="bzip2 -d"
- ;;
-bzip2)
- GZIP_CMD="bzip2 -$COMPRESS_LEVEL"
- GUNZIP_CMD="bzip2 -d"
- ;;
-xz)
- GZIP_CMD="xz -c$COMPRESS_LEVEL"
- GUNZIP_CMD="xz -d"
- ;;
-lzo)
- GZIP_CMD="lzop -c$COMPRESS_LEVEL"
- GUNZIP_CMD="lzop -d"
- ;;
-lz4)
- GZIP_CMD="lz4 -c$COMPRESS_LEVEL"
- GUNZIP_CMD="lz4 -d"
- ;;
-base64)
- GZIP_CMD="base64"
- GUNZIP_CMD="base64 -d -i"
- ;;
-gpg)
- GZIP_CMD="gpg $GPG_EXTRA -ac -z$COMPRESS_LEVEL"
- GUNZIP_CMD="gpg -d"
- ;;
-gpg-asymmetric)
- GZIP_CMD="gpg $GPG_EXTRA -z$COMPRESS_LEVEL -es"
- GUNZIP_CMD="gpg --yes -d"
- ;;
-openssl)
- GZIP_CMD="openssl aes-256-cbc -a -salt -md sha256"
- GUNZIP_CMD="openssl aes-256-cbc -d -a -md sha256"
- ;;
-Unix)
- GZIP_CMD="compress -cf"
- GUNZIP_CMD="exec 2>&-; uncompress -c || test \\\$? -eq 2 || gzip -cd"
- ;;
-none)
- GZIP_CMD="cat"
- GUNZIP_CMD="cat"
- ;;
-esac
-
-tmpfile="${TMPDIR:=/tmp}/mkself$$"
-
-if test -f "$HEADER"; then
- oldarchname="$archname"
- archname="$tmpfile"
- # Generate a fake header to count its lines
- SKIP=0
- . "$HEADER"
- SKIP=`cat "$tmpfile" |wc -l`
- # Get rid of any spaces
- SKIP=`expr $SKIP`
- rm -f "$tmpfile"
- if test "$QUIET" = "n";then
- echo Header is $SKIP lines long >&2
- fi
-
- archname="$oldarchname"
-else
- echo "Unable to open header file: $HEADER" >&2
- exit 1
-fi
-
-if test "$QUIET" = "n";then
- echo
-fi
-
-if test "$APPEND" = n; then
- if test -f "$archname"; then
- echo "WARNING: Overwriting existing file: $archname" >&2
- fi
-fi
-
-USIZE=`du $DU_ARGS "$archdir" | awk '{print $1}'`
-
-if test "." = "$archdirname"; then
- if test "$KEEP" = n; then
- archdirname="makeself-$$-`date +%Y%m%d%H%M%S`"
- fi
-fi
-
-test -d "$archdir" || { echo "Error: $archdir does not exist."; rm -f "$tmpfile"; exit 1; }
-if test "$QUIET" = "n";then
- echo About to compress $USIZE KB of data...
- echo Adding files to archive named \"$archname\"...
-fi
-exec 3<> "$tmpfile"
-( cd "$archdir" && ( tar $TAR_EXTRA -$TAR_ARGS - . | eval "$GZIP_CMD" >&3 ) ) || \
- { echo Aborting: archive directory not found or temporary file: "$tmpfile" could not be created.; exec 3>&-; rm -f "$tmpfile"; exit 1; }
-exec 3>&- # try to close the archive
-
-fsize=`cat "$tmpfile" | wc -c | tr -d " "`
-
-# Compute the checksums
-
-md5sum=00000000000000000000000000000000
-crcsum=0000000000
-
-if test "$NOCRC" = y; then
- if test "$QUIET" = "n";then
- echo "skipping crc at user request"
- fi
-else
- crcsum=`cat "$tmpfile" | CMD_ENV=xpg4 cksum | sed -e 's/ /Z/' -e 's/ /Z/' | cut -dZ -f1`
- if test "$QUIET" = "n";then
- echo "CRC: $crcsum"
- fi
-fi
-
-if test "$NOMD5" = y; then
- if test "$QUIET" = "n";then
- echo "skipping md5sum at user request"
- fi
-else
- # Try to locate a MD5 binary
- OLD_PATH=$PATH
- PATH=${GUESS_MD5_PATH:-"$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"}
- MD5_ARG=""
- MD5_PATH=`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum`
- test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which md5 || command -v md5 || type md5`
- test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which digest || command -v digest || type digest`
- PATH=$OLD_PATH
- if test -x "$MD5_PATH"; then
- if test `basename ${MD5_PATH}`x = digestx; then
- MD5_ARG="-a md5"
- fi
- md5sum=`cat "$tmpfile" | eval "$MD5_PATH $MD5_ARG" | cut -b-32`;
- if test "$QUIET" = "n";then
- echo "MD5: $md5sum"
- fi
- else
- if test "$QUIET" = "n";then
- echo "MD5: none, MD5 command not found"
- fi
- fi
-fi
-
-if test "$APPEND" = y; then
- mv "$archname" "$archname".bak || exit
-
- # Prepare entry for new archive
- filesizes="$filesizes $fsize"
- CRCsum="$CRCsum $crcsum"
- MD5sum="$MD5sum $md5sum"
- USIZE=`expr $USIZE + $OLDUSIZE`
- # Generate the header
- . "$HEADER"
- # Append the original data
- tail -n +$OLDSKIP "$archname".bak >> "$archname"
- # Append the new data
- cat "$tmpfile" >> "$archname"
-
- chmod +x "$archname"
- rm -f "$archname".bak
- if test "$QUIET" = "n";then
- echo Self-extractable archive \"$archname\" successfully updated.
- fi
-else
- filesizes="$fsize"
- CRCsum="$crcsum"
- MD5sum="$md5sum"
-
- # Generate the header
- . "$HEADER"
-
- # Append the compressed tar data after the stub
- if test "$QUIET" = "n";then
- echo
- fi
- cat "$tmpfile" >> "$archname"
- chmod +x "$archname"
- if test "$QUIET" = "n";then
- echo Self-extractable archive \"$archname\" successfully created.
- fi
-fi
-rm -f "$tmpfile"
diff --git a/packaging/makeself/post-installer.sh b/packaging/makeself/post-installer.sh
deleted file mode 100755
index 38cc41ef..00000000
--- a/packaging/makeself/post-installer.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# This script is started using the shell of the system
-# and executes our 'install-or-update.sh' script
-# using the netdata supplied, statically linked BASH
-#
-# so, at 'install-or-update.sh' we are always sure
-# we run under BASH v4.
-
-./bin/bash system/install-or-update.sh "${@}"
diff --git a/packaging/makeself/run-all-jobs.sh b/packaging/makeself/run-all-jobs.sh
deleted file mode 100755
index f7507c2d..00000000
--- a/packaging/makeself/run-all-jobs.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-LC_ALL=C
-umask 002
-
-# be nice
-renice 19 $$ >/dev/null 2>/dev/null
-
-# -----------------------------------------------------------------------------
-# prepare the environment for the jobs
-
-# installation directory
-export NETDATA_INSTALL_PATH="${1-/opt/netdata}"
-
-# our source directory
-export NETDATA_MAKESELF_PATH="$(dirname "${0}")"
-if [ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ]
- then
- export NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}"
-fi
-
-# netdata source directory
-export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/../.."
-
-# make sure ${NULL} is empty
-export NULL=
-
-# -----------------------------------------------------------------------------
-
-cd "${NETDATA_MAKESELF_PATH}" || exit 1
-
-. ./functions.sh "${@}" || exit 1
-
-for x in jobs/*.install.sh
-do
- progress "running ${x}"
- "${x}" "${NETDATA_INSTALL_PATH}"
-done
-
-echo >&2 "All jobs for static packaging done successfully."
-exit 0
diff --git a/packaging/manual_nightly_deployment.sh b/packaging/manual_nightly_deployment.sh
deleted file mode 100755
index a0999bb1..00000000
--- a/packaging/manual_nightly_deployment.sh
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/usr/bin/env bash
-#
-# This tool allows netdata team to manually deploy nightlies
-# It emulates the nightly operations required for a new version to be published for our users
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud>
-#
-set -e
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
-CWD=$(git rev-parse --show-cdup || echo "")
-if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as .travis/$(basename "$0") from top level directory of netdata git repository"
- echo "Changelog generation process aborted"
- exit 1
-fi
-
-if [ $# -lt 1 ] || [ $# -gt 2 ]; then
- echo "Run as ./$(basename "$0") [docker|gcs]|all] from the top level directory of netdata GIT repository"
- exit 1
-fi
-
-GSUTIL_BINARY=$(command -v gsutil 2> /dev/null)
-if [ -z "${GSUTIL_BINARY}" ]; then
- echo "No gsutil utility available, you need gsutil deployed to manually deploy to GCS"
- exit 1
-fi;
-
-# Function declarations
-publish_docker() {
-
- # Ensure REPOSITORY present
- if [ -z "${REPOSITORY}" ]; then
- echo "Please provide the repository to deploy the containers:"
- read -r REPOSITORY
- export REPOSITORY
- else
- echo "Docker publishing to ${REPOSITORY}"
- fi
-
- # Ensure DOCKER_USERNAME present
- if [ -z "${DOCKER_USERNAME}" ]; then
- echo "For repository ${REPOSITORY}, Please provide the docker USERNAME to use:"
- read -r DOCKER_USERNAME
- export DOCKER_USERNAME
- else
- echo "Using docker username ${DOCKER_USERNAME}"
- fi
-
- # Ensure DOCKER_PASS present
- if [ -z "${DOCKER_PASS}" ]; then
- echo "Username ${DOCKER_USERNAME} received, now give me the password:"
- read -r -s DOCKER_PASS
- export DOCKER_PASS
- else
- echo "Docker password has already been set to env, using that"
- fi
-
- echo "Building Docker images.."
- packaging/docker/build.sh
-
- echo "Publishing Docker images.."
- packaging/docker/publish.sh
-}
-
-publish_nightly_binaries() {
- echo "Publishing nightly binaries to GCS"
-
- echo "Please select the bucket to sync, from the ones available to you:"
- bucket_list=$(${GSUTIL_BINARY} list | tr '\n' ' ')
- declare -A buckets
- idx=0
- for abucket in ${bucket_list}; do
- echo "${idx}. ${abucket}"
- buckets["${idx}"]=${abucket}
- ((idx=idx+1))
- done
- read -p"Selection>" -r -n 1 selected_bucket
-
- echo "Ok!"
- echo "Syncing artifacts directory contents with GCS bucket: ${buckets[${selected_bucket}]}"
- if [ -d artifacts ]; then
- ${GSUTIL_BINARY} -m rsync -r artifacts "${buckets["${selected_bucket}"]}"
- echo "GCS Sync complete!"
- else
- echo "Directory artifacts does not exist, nothing to do on GCS"
- fi
-}
-
-prepare_and_publish_gcs() {
- # Prepare the artifacts directory
- echo "Preparing artifacts directory contents"
- .travis/create_artifacts.sh
-
- # Publish it to GCS
- publish_nightly_binaries
-
- # Clean up
- echo "Cleaning up repository"
- make clean || echo "Nothing to clean"
- make distclean || echo "Nothing to distclean"
- rm -rf artifacts
-}
-
-# Mandatory variable declarations
-export TRAVIS_REPO_SLUG="netdata/netdata"
-
-echo "Manual nightly deployment procedure started"
-case "$1" in
- "docker")
- publish_docker
- ;;
- "gcs")
- prepare_and_publish_gcs
- ;;
- "all")
- publish_docker
- prepare_and_publish_gcs
- ;;
- *)
- echo "ERROR: Invalid request parameter $1. Valid values are: docker, gcs, all"
- ;;
-esac
-echo "Manual nightly deployment completed!"
diff --git a/packaging/version b/packaging/version
index e17963c5..07c3efad 100644
--- a/packaging/version
+++ b/packaging/version
@@ -1 +1 @@
-v1.16.1
+v1.17.0
diff --git a/registry/Makefile.in b/registry/Makefile.in
new file mode 100644
index 00000000..95412b41
--- /dev/null
+++ b/registry/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = registry
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu registry/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu registry/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/registry/README.md b/registry/README.md
index 73890807..f852809b 100644
--- a/registry/README.md
+++ b/registry/README.md
@@ -1,7 +1,7 @@
# Registry
-The Netdata registry implements the node menu on the top left corner of the netdata dashboards and enables the Netdata cloud features, such as the node view.
-The node menu lists the netdata servers you have visited. The node view offers a lot of additional features on top of the menu,
+The Netdata registry implements the node menu on the top left corner of the Netdata dashboards and enables the Netdata cloud features, such as the node view.
+The node menu lists the Netdata servers you have visited. The node view offers a lot of additional features on top of the menu,
[with many more to come](https://blog.netdata.cloud/posts/netdata-cloud-announcement/).
To enable the global Netdata registry and the cloud features, you need to Sign In to Netdata cloud. By signing in, you opt in to let the registry receive and store
the information described [here](#what-data-does-the-registry-store).
@@ -11,26 +11,26 @@ You can still get the node menu, but not the cloud features, if you [run your ow
Netdata provides distributed monitoring.
-Traditional monitoring solutions centralize all the data to provide unified dashboards across all servers. Before netdata, this was the standard practice. However it has a few issues:
+Traditional monitoring solutions centralize all the data to provide unified dashboards across all servers. Before Netdata, this was the standard practice. However it has a few issues:
-1. due to the resources required, the number of metrics collected is limited.
-1. for the same reason, the data collection frequency is not that high, at best it will be once every 10 or 15 seconds, at worst every 5 or 10 mins.
-1. the central monitoring solution needs dedicated resources, thus becoming "another bottleneck" in the whole ecosystem. It also requires maintenance, administration, etc.
-1. most centralized monitoring solutions are usually only good for presenting *statistics of past performance* (i.e. cannot be used for real-time performance troubleshooting).
+1. due to the resources required, the number of metrics collected is limited.
+2. for the same reason, the data collection frequency is not that high, at best it will be once every 10 or 15 seconds, at worst every 5 or 10 mins.
+3. the central monitoring solution needs dedicated resources, thus becoming "another bottleneck" in the whole ecosystem. It also requires maintenance, administration, etc.
+4. most centralized monitoring solutions are usually only good for presenting _statistics of past performance_ (i.e. cannot be used for real-time performance troubleshooting).
Netdata follows a different approach:
-1. data collection happens per second
-1. thousands of metrics per server are collected
-1. data do not leave the server where they are collected
-1. netdata servers do not talk to each other
-1. your browser connects all the netdata servers
+1. data collection happens per second
+2. thousands of metrics per server are collected
+3. data do not leave the server where they are collected
+4. Netdata servers do not talk to each other
+5. your browser connects all the Netdata servers
-Using netdata, your monitoring infrastructure is embedded on each server, limiting significantly the need of additional resources. Netdata is blazingly fast, very resource efficient and utilizes server resources that already exist and are spare (on each server). This allows **scaling out** the monitoring infrastructure.
+Using Netdata, your monitoring infrastructure is embedded on each server, limiting significantly the need of additional resources. Netdata is blazingly fast, very resource efficient and utilizes server resources that already exist and are spare (on each server). This allows **scaling out** the monitoring infrastructure.
-However, the netdata approach introduces a few new issues that need to be addressed, one being **the list of netdata we have installed**, i.e. the URLs our netdata servers are listening.
+However, the Netdata approach introduces a few new issues that need to be addressed, one being **the list of Netdata we have installed**, i.e. the URLs our Netdata servers are listening.
-To solve this, netdata utilizes a **central registry**. This registry, together with certain browser features, allow netdata to provide unified cross-server dashboards.
+To solve this, Netdata utilizes a **central registry**. This registry, together with certain browser features, allow Netdata to provide unified cross-server dashboards.
For example, when you jump from server to server using the node menu, several session settings (like the currently viewed charts, the current zoom and pan operations on the charts, etc.) are propagated to the new server, so that the new dashboard will come with exactly the same view.
Netdata cloud has a roadmap to [offer many more features](https://blog.netdata.cloud/posts/netdata-cloud-announcement/) over and above the simple node menu.
@@ -38,38 +38,37 @@ Netdata cloud has a roadmap to [offer many more features](https://blog.netdata.c
The registry keeps track of 4 entities:
-1. **machines**: i.e. the netdata installations (a random GUID generated by each netdata the first time it starts; we call this **machine_guid**)
+1. **machines**: i.e. the Netdata installations (a random GUID generated by each Netdata the first time it starts; we call this **machine_guid**)
- For each netdata installation (each `machine_guid`) the registry keeps track of the different URLs it is accessed.
+ For each Netdata installation (each `machine_guid`) the registry keeps track of the different URLs it is accessed.
-2. **persons**: i.e. the web browsers accessing the netdata installations (a random GUID generated by the registry the first time it sees a new web browser; we call this **person_guid**)
+2. **persons**: i.e. the web browsers accessing the Netdata installations (a random GUID generated by the registry the first time it sees a new web browser; we call this **person_guid**)
- For each person, the registry keeps track of the netdata installations it has accessed and their URLs.
+ For each person, the registry keeps track of the Netdata installations it has accessed and their URLs.
-3. **URLs** of netdata installations (as seen by the web browsers)
+3. **URLs** of Netdata installations (as seen by the web browsers)
- For each URL, the registry keeps the URL and nothing more. Each URL is linked to *persons* and *machines*. The only way to find a URL is to know its **machine_guid** or have a **person_guid** it is linked to it.
+ For each URL, the registry keeps the URL and nothing more. Each URL is linked to _persons_ and _machines_. The only way to find a URL is to know its **machine_guid** or have a **person_guid** it is linked to it.
-4. **accounts**: i.e. the information used to sign-in via one of the available sign-in methods. Depending on the method, this may include an email, an email and a profile picture.
+4. **accounts**: i.e. the information used to sign-in via one of the available sign-in methods. Depending on the method, this may include an email, an email and a profile picture.
-For *persons*/*accounts* and *machines*, the registry keeps links to *URLs*, each link with 2 timestamps (first time seen, last time seen) and a counter (number of times it has been seen).
-*machines*, *persons* and timestamps are stored in the netdata registry regardless of whether you sign in or not.
+For _persons_/_accounts_ and _machines_, the registry keeps links to _URLs_, each link with 2 timestamps (first time seen, last time seen) and a counter (number of times it has been seen).
+*machines_, _persons_ and timestamps are stored in the Netdata registry regardless of whether you sign in or not.
## Who talks to the registry?
Your web browser **only**! If sending this information is against your policies, you can [run your own registry](#run-your-own-registry)
-Your netdata servers do not talk to the registry. This is a UML diagram of its operation:
+Your Netdata servers do not talk to the registry. This is a UML diagram of its operation:
![registry](https://cloud.githubusercontent.com/assets/2662304/19448565/11a70632-94ab-11e6-9d80-f410b4acb797.png)
-
## Which is the default registry?
`https://registry.my-netdata.io`, which is currently served by `https://london.my-netdata.io`. This registry listens to both HTTP and HTTPS requests but the default is HTTPS.
`https://netdata.cloud` is the additional registry endpoint, that enables [the cloud features](https://blog.netdata.cloud/posts/netdata-cloud-announcement/). It only accepts HTTPS.
-### Can this registry handle the global load of netdata installations?
+### Can this registry handle the global load of Netdata installations?
Yeap! The registry can handle 50.000 - 100.000 requests **per second per core** (depending on the type of CPU, the computer's memory bandwidth, etc). 50.000 is on J1900 (celeron 2Ghz).
@@ -77,9 +76,9 @@ We believe, it can do it...
## Run your own registry
-**Every netdata can be a registry**. Just pick one and configure it.
+**Every Netdata can be a registry**. Just pick one and configure it.
-**To turn any netdata into a registry**, edit `/etc/netdata/netdata.conf` and set:
+**To turn any Netdata into a registry**, edit `/etc/netdata/netdata.conf` and set:
```
[registry]
@@ -87,9 +86,9 @@ We believe, it can do it...
registry to announce = http://your.registry:19999
```
-Restart your netdata to activate it.
+Restart your Netdata to activate it.
-Then, you need to tell **all your other netdata servers to advertise your registry**, instead of the default. To do this, on each of your netdata servers, edit `/etc/netdata/netdata.conf` and set:
+Then, you need to tell **all your other Netdata servers to advertise your registry**, instead of the default. To do this, on each of your Netdata servers, edit `/etc/netdata/netdata.conf` and set:
```
[registry]
@@ -97,11 +96,11 @@ Then, you need to tell **all your other netdata servers to advertise your regist
registry to announce = http://your.registry:19999
```
-Note that we have not enabled the registry on the other servers. Only one netdata (the registry) needs `[registry].enabled = yes`.
+Note that we have not enabled the registry on the other servers. Only one Netdata (the registry) needs `[registry].enabled = yes`.
This is it. You have your registry now.
-You may also want to give your server different names under the node menu (i.e. to have them sorted / grouped). You can change its registry name, by setting on each netdata server:
+You may also want to give your server different names under the node menu (i.e. to have them sorted / grouped). You can change its registry name, by setting on each Netdata server:
```
[registry]
@@ -112,15 +111,16 @@ So this server will appear in the node menu as `Group1 - Master DB`. The max nam
### Limiting access to the registry
-netdata v1.9+ support limiting access to the registry from given IPs, like this:
+Netdata v1.9+ support limiting access to the registry from given IPs, like this:
+
```
[registry]
allow from = *
```
-`allow from` settings are [netdata simple patterns](../libnetdata/simple_pattern/): string matches that use `*` as wildcard (any number of times) and a `!` prefix for a negative match. So: `allow from = !10.1.2.3 10.*` will allow all IPs in `10.*` except `10.1.2.3`. The order is important: left to right, the first positive or negative match is used.
+`allow from` settings are [Netdata simple patterns](../libnetdata/simple_pattern/): string matches that use `*` as wildcard (any number of times) and a `!` prefix for a negative match. So: `allow from = !10.1.2.3 10.*` will allow all IPs in `10.*` except `10.1.2.3`. The order is important: left to right, the first positive or negative match is used.
-Keep in mind that connections to netdata API ports are filtered by `[web].allow connections from`. So, IPs allowed by `[registry].allow from` should also be allowed by `[web].allow connection from`.
+Keep in mind that connections to Netdata API ports are filtered by `[web].allow connections from`. So, IPs allowed by `[registry].allow from` should also be allowed by `[web].allow connection from`.
### Where is the registry database stored?
@@ -128,25 +128,25 @@ Keep in mind that connections to netdata API ports are filtered by `[web].allow
There can be up to 2 files:
-- `registry-log.db`, the transaction log
+- `registry-log.db`, the transaction log
- all incoming requests that affect the registry are saved in this file in real-time.
+ all incoming requests that affect the registry are saved in this file in real-time.
-- `registry.db`, the database
+- `registry.db`, the database
- every `[registry].registry save db every new entries` entries in `registry-log.db`, netdata will save its database to `registry.db` and empty `registry-log.db`.
+ every `[registry].registry save db every new entries` entries in `registry-log.db`, Netdata will save its database to `registry.db` and empty `registry-log.db`.
Both files are machine readable text files.
## The future
-The registry opens a whole world of new possibilities for netdata. Check here what we think: https://github.com/netdata/netdata/issues/416
+The registry opens a whole world of new possibilities for Netdata. Check here what we think: <https://github.com/netdata/netdata/issues/416>
## Troubleshooting the registry
-The registry URL should be set to the URL of a netdata dashboard. This server has to have `[registry].enabled = yes`. So, accessing the registry URL directly with your web browser, should present the dashboard of the netdata operating the registry.
+The registry URL should be set to the URL of a Netdata dashboard. This server has to have `[registry].enabled = yes`. So, accessing the registry URL directly with your web browser, should present the dashboard of the Netdata operating the registry.
-To use the registry, your web browser needs to support **third party cookies**, since the cookies are set by the registry while you are browsing the dashboard of another netdata server. The registry, the first time it sees a new web browser it tries to figure if the web browser has cookies enabled or not. It does this by setting a cookie and redirecting the browser back to itself hoping that it will receive the cookie. If it does not receive the cookie, the registry will keep redirecting your web browser back to itself, which after a few redirects will fail with an error like this:
+To use the registry, your web browser needs to support **third party cookies**, since the cookies are set by the registry while you are browsing the dashboard of another Netdata server. The registry, the first time it sees a new web browser it tries to figure if the web browser has cookies enabled or not. It does this by setting a cookie and redirecting the browser back to itself hoping that it will receive the cookie. If it does not receive the cookie, the registry will keep redirecting your web browser back to itself, which after a few redirects will fail with an error like this:
```
ERROR 409: Cannot ACCESS netdata registry: https://registry.my-netdata.io responded with: {"status":"redirect","registry":"https://registry.my-netdata.io"}
@@ -154,4 +154,4 @@ ERROR 409: Cannot ACCESS netdata registry: https://registry.my-netdata.io respon
This error is printed on your web browser console (press F12 on your browser to see it).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fregistry%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fregistry%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/streaming/Makefile.in b/streaming/Makefile.in
new file mode 100644
index 00000000..86ec1a16
--- /dev/null
+++ b/streaming/Makefile.in
@@ -0,0 +1,571 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = streaming
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_libconfig_DATA) \
+ $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(libconfigdir)"
+DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_libconfig_DATA = \
+ stream.conf \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu streaming/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu streaming/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(libconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_libconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_libconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_libconfigDATA install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_libconfigDATA
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/streaming/README.md b/streaming/README.md
index 1bfbb236..7fba3552 100644
--- a/streaming/README.md
+++ b/streaming/README.md
@@ -1,40 +1,39 @@
# Streaming and replication
-Each netdata is able to replicate/mirror its database to another netdata, by streaming collected
+Each Netdata is able to replicate/mirror its database to another Netdata, by streaming collected
metrics, in real-time to it. This is quite different to [data archiving to third party time-series
databases](../backends).
-When a netdata streams metrics to another netdata, the receiving one is able to perform everything
-a netdata performs:
+When Netdata streams metrics to another Netdata, the receiving one is able to perform everything a Netdata instance is capable of:
-- visualize them with a dashboard
-- run health checks that trigger alarms and send alarm notifications
-- archive metrics to a backend time-series database
+- visualize them with a dashboard
+- run health checks that trigger alarms and send alarm notifications
+- archive metrics to a backend time-series database
## Supported configurations
-### netdata without a database or web API (headless collector)
+### Netdata without a database or web API (headless collector)
-Local netdata (`slave`), **without any database or alarms**, collects metrics and sends them to
-another netdata (`master`).
+Local Netdata (`slave`), **without any database or alarms**, collects metrics and sends them to
+another Netdata (`master`).
-The node menu shows a list of all "databases streamed to" the master. Clicking one of those links allows the user to view the full dashboard of the `slave` netdata. The URL has the form http://master-host:master-port/host/slave-host/.
+The node menu shows a list of all "databases streamed to" the master. Clicking one of those links allows the user to view the full dashboard of the `slave` Netdata. The URL has the form <http://master-host:master-port/host/slave-host/>.
Alarms for the `slave` are served by the `master`.
In this mode the `slave` is just a plain data collector. It spawns all external plugins, but instead
of maintaining a local database and accepting dashboard requests, it streams all metrics to the
-`master`. The memory footprint is reduced significantly, to between 6 MiB and 40 MiB, depending on the enabled plugins. To reduce the memory usage as much as possible, refer to [running netdata in embedded devices](../docs/Performance.md#running-netdata-in-embedded-devices).
+`master`. The memory footprint is reduced significantly, to between 6 MiB and 40 MiB, depending on the enabled plugins. To reduce the memory usage as much as possible, refer to [running Netdata in embedded devices](../docs/Performance.md#running-netdata-in-embedded-devices).
The same `master` can collect data for any number of `slaves`.
### database replication
-Local netdata (`slave`), **with a local database (and possibly alarms)**, collects metrics and
-sends them to another netdata (`master`).
+Local Netdata (`slave`), **with a local database (and possibly alarms)**, collects metrics and
+sends them to another Netdata (`master`).
-The user can use all the functions **at both** http://slave-ip:slave-port/ and
-http://master-host:master-port/host/slave-host/.
+The user can use all the functions **at both** <http://slave-ip:slave-port/> and
+<http://master-host:master-port/host/slave-host/>.
The `slave` and the `master` may have different data retention policies for the same metrics.
@@ -43,15 +42,15 @@ each can have different alarms configurations or have alarms disabled).
Take a note, that custom chart names, configured on the `slave`, should be in the form `type.name` to work correctly. The `master` will truncate the `type` part and substitute the original chart `type` to store the name in the database.
-### netdata proxies
+### Netdata proxies
-Local netdata (`slave`), with or without a database, collects metrics and sends them to another
-netdata (`proxy`), which may or may not maintain a database, which forwards them to another
-netdata (`master`).
+Local Netdata (`slave`), with or without a database, collects metrics and sends them to another
+Netdata (`proxy`), which may or may not maintain a database, which forwards them to another
+Netdata (`master`).
Alarms for the slave can be triggered by any of the involved hosts that maintains a database.
-Any number of daisy chaining netdata servers are supported, each with or without a database and
+Any number of daisy chaining Netdata servers are supported, each with or without a database and
with or without alarms for the `slave` metrics.
### mix and match with backends
@@ -61,17 +60,17 @@ This allows quite complex setups.
Example:
-1. netdata `A`, `B` do not maintain a database and stream metrics to netdata `C`(live streaming functionality, i.e. this PR)
-2. netdata `C` maintains a database for `A`, `B`, `C` and archives all metrics to `graphite` with 10 second detail (backends functionality)
-3. netdata `C` also streams data for `A`, `B`, `C` to netdata `D`, which also collects data from `E`, `F` and `G` from another DMZ (live streaming functionality, i.e. this PR)
-4. netdata `D` is just a proxy, without a database, that streams all data to a remote site at netdata `H`
-5. netdata `H` maintains a database for `A`, `B`, `C`, `D`, `E`, `F`, `G`, `H` and sends all data to `opentsdb` with 5 seconds detail (backends functionality)
-6. alarms are triggered by `H` for all hosts
-7. users can use all the netdata that maintain a database to view metrics (i.e. at `H` all hosts can be viewed).
+1. Netdata `A`, `B` do not maintain a database and stream metrics to Netdata `C`(live streaming functionality, i.e. this PR)
+2. Netdata `C` maintains a database for `A`, `B`, `C` and archives all metrics to `graphite` with 10 second detail (backends functionality)
+3. Netdata `C` also streams data for `A`, `B`, `C` to Netdata `D`, which also collects data from `E`, `F` and `G` from another DMZ (live streaming functionality, i.e. this PR)
+4. Netdata `D` is just a proxy, without a database, that streams all data to a remote site at Netdata `H`
+5. Netdata `H` maintains a database for `A`, `B`, `C`, `D`, `E`, `F`, `G`, `H` and sends all data to `opentsdb` with 5 seconds detail (backends functionality)
+6. alarms are triggered by `H` for all hosts
+7. users can use all the Netdata that maintain a database to view metrics (i.e. at `H` all hosts can be viewed).
## Configuration
-These are options that affect the operation of netdata in this area:
+These are options that affect the operation of Netdata in this area:
```
[global]
@@ -87,7 +86,7 @@ monitoring (there cannot be health monitoring without a database).
accept a streaming request every seconds = 0
```
-`[web].mode = none` disables the API (netdata will not listen to any ports).
+`[web].mode = none` disables the API (Netdata will not listen to any ports).
This also disables the registry (there cannot be a registry without an API).
`accept a streaming request every seconds` can be used to set a limit on how often a master Netdata server will accept streaming requests from the slaves. 0 sets no limit, 1 means maximum once every second. If this is set, you may see error log entries "... too busy to accept new streaming request. Will be allowed in X secs".
@@ -107,18 +106,18 @@ this host).
A new file is introduced: [stream.conf](stream.conf) (to edit it on your system run
`/etc/netdata/edit-config stream.conf`). This file holds streaming configuration for both the
-sending and the receiving netdata.
+sending and the receiving Netdata.
-API keys are used to authorize the communication of a pair of sending-receiving netdata.
-Once the communication is authorized, the sending netdata can push metrics for any number of hosts.
+API keys are used to authorize the communication of a pair of sending-receiving Netdata.
+Once the communication is authorized, the sending Netdata can push metrics for any number of hosts.
You can generate an API key with the command `uuidgen`. API keys are just random GUIDs.
-You can use the same API key on all your netdata, or use a different API key for any pair of
-sending-receiving netdata.
+You can use the same API key on all your Netdata, or use a different API key for any pair of
+sending-receiving Netdata.
##### options for the sending node
-This is the section for the sending netdata. On the receiving node, `[stream].enabled` can be `no`.
+This is the section for the sending Netdata. On the receiving node, `[stream].enabled` can be `no`.
If it is `yes`, the receiving node will also stream the metrics to another node (i.e. it will be
a `proxy`).
@@ -131,12 +130,12 @@ a `proxy`).
This is an overview of how these options can be combined:
-target | memory<br/>mode | web<br/>mode | stream<br/>enabled | backend | alarms | dashboard
--------|:-----------:|:---:|:------:|:-------:|:---------:|:----:
-headless collector|`none`|`none`|`yes`|only for `data source = as collected`|not possible|no
-headless proxy|`none`|not `none`|`yes`|only for `data source = as collected`|not possible|no
-proxy with db|not `none`|not `none`|`yes`|possible|possible|yes
-central netdata|not `none`|not `none`|`no`|possible|possible|yes
+| target|memory<br/>mode|web<br/>mode|stream<br/>enabled|backend|alarms|dashboard|
+|------|:-------------:|:----------:|:----------------:|:-----:|:----:|:-------:|
+| headless collector|`none`|`none`|`yes`|only for `data source = as collected`|not possible|no|
+| headless proxy|`none`|not `none`|`yes`|only for `data source = as collected`|not possible|no|
+| proxy with db|not `none`|not `none`|`yes`|possible|possible|yes|
+| central netdata|not `none`|not `none`|`no`|possible|possible|yes|
For the options to encrypt the data stream between the slave and the master, refer to [securing the communication](#securing-streaming-communications)
@@ -170,22 +169,22 @@ You can also add sections like this:
```
The above is the receiver configuration of a single host, at the receiver end. `MACHINE_GUID` is
-the unique id the netdata generating the metrics (i.e. the netdata that originally collects
-them `/var/lib/netdata/registry/netdata.unique.id`). So, metrics for netdata `A` that pass through
-any number of other netdata, will have the same `MACHINE_GUID`.
+the unique id the Netdata generating the metrics (i.e. the Netdata that originally collects
+them `/var/lib/netdata/registry/netdata.unique.id`). So, metrics for Netdata `A` that pass through
+any number of other Netdata, will have the same `MACHINE_GUID`.
You can also use `default memory mode = dbengine` for an API key or `memory mode = dbengine` for
a single host. The additional `page cache size` and `dbengine disk space` configuration options
- are inherited from the global netdata configuration.
+ are inherited from the global Netdata configuration.
##### allow from
-`allow from` settings are [netdata simple patterns](../libnetdata/simple_pattern): string matches
+`allow from` settings are [Netdata simple patterns](../libnetdata/simple_pattern): string matches
that use `*` as wildcard (any number of times) and a `!` prefix for a negative match.
So: `allow from = !10.1.2.3 10.*` will allow all IPs in `10.*` except `10.1.2.3`. The order is
important: left to right, the first positive or negative match is used.
-`allow from` is available in netdata v1.9+
+`allow from` is available in Netdata v1.9+
##### tracing
@@ -211,7 +210,7 @@ The receiving end (`proxy` or `master`) logs entries like these:
2017-02-25 01:58:14: netdata: INFO : STREAM costa-pc [receive from [10.11.12.11]:33554]: receiving metrics...
```
-For netdata v1.9+, streaming can also be monitored via `access.log`.
+For Netdata v1.9+, streaming can also be monitored via `access.log`.
### Securing streaming communications
@@ -287,29 +286,28 @@ With this configuration, the `CApath` option tells Netdata to search for trusted
With the introduction of TLS/SSL, the master-slave communication behaves as shown in the table below, depending on the following configurations:
-- **Master TLS (Yes/No)**: Whether the `[web]` section in `netdata.conf` has `ssl key` and `ssl certificate`.
-- **Master port TLS (-/force/optional)**: Depends on whether the `[web]` section `bind to` contains a `^SSL=force` or `^SSL=optional` directive on the port(s) used for streaming.
-- **Slave TLS (Yes/No)**: Whether the destination in the slave's `stream.conf` has `:SSL` at the end.
-- **Slave TLS Verification (yes/no)**: Value of the slave's `stream.conf` `ssl skip certificate verification` parameter (default is no).
+- **Master TLS (Yes/No)**: Whether the `[web]` section in `netdata.conf` has `ssl key` and `ssl certificate`.
+- **Master port TLS (-/force/optional)**: Depends on whether the `[web]` section `bind to` contains a `^SSL=force` or `^SSL=optional` directive on the port(s) used for streaming.
+- **Slave TLS (Yes/No)**: Whether the destination in the slave's `stream.conf` has `:SSL` at the end.
+- **Slave TLS Verification (yes/no)**: Value of the slave's `stream.conf` `ssl skip certificate verification` parameter (default is no).
- Master TLS enabled | Master port SSL | Slave TLS | Slave SSL Ver. | Behavior
-:------:|:-----:|:-----:|:-----:|:--------
-No | - | No | no | Legacy behavior. The master-slave stream is unencrypted.
-Yes | force | No | no | The master rejects the slave connection.
-Yes | -/optional | No | no | The master-slave stream is unencrypted (expected situation for legacy slaves and newer masters)
-Yes | -/force/optional | Yes | no | The master-slave stream is encrypted, provided that the master has a valid TLS/SSL certificate. Otherwise, the slave refuses to connect.
-Yes | -/force/optional | Yes | yes | The master-slave stream is encrypted.
+| Master TLS enabled|Master port SSL|Slave TLS|Slave SSL Ver.|Behavior|
+|:----------------:|:-------------:|:-------:|:------------:|:-------|
+| No|-|No|no|Legacy behavior. The master-slave stream is unencrypted.|
+| Yes|force|No|no|The master rejects the slave connection.|
+| Yes|-/optional|No|no|The master-slave stream is unencrypted (expected situation for legacy slaves and newer masters)|
+| Yes|-/force/optional|Yes|no|The master-slave stream is encrypted, provided that the master has a valid TLS/SSL certificate. Otherwise, the slave refuses to connect.|
+| Yes|-/force/optional|Yes|yes|The master-slave stream is encrypted.|
## Viewing remote host dashboards, using mirrored databases
-On any receiving netdata, that maintains remote databases and has its web server enabled,
+On any receiving Netdata, that maintains remote databases and has its web server enabled,
The node menu will include a list of the mirrored databases.
![image](https://cloud.githubusercontent.com/assets/2662304/24080824/24cd2d3c-0caf-11e7-909d-a8dd1dbb95d7.png)
Selecting any of these, the server will offer a dashboard using the mirrored metrics.
-
## Monitoring ephemeral nodes
Auto-scaling is probably the most trendy service deployment strategy these days.
@@ -326,32 +324,32 @@ In auto-scaling, all servers are ephemeral, they live for just a few hours. Ever
So, how can we monitor them? How can we be sure that everything is working as expected on all of them?
-### The netdata way
+### The Netdata way
-We recently made a significant improvement at the core of netdata to support monitoring such setups.
+We recently made a significant improvement at the core of Netdata to support monitoring such setups.
-Following the netdata way of monitoring, we wanted:
+Following the Netdata way of monitoring, we wanted:
-1. **real-time performance monitoring**, collecting **_thousands of metrics per server per second_**, visualized in interactive, automatically created dashboards.
-2. **real-time alarms**, for all nodes.
-3. **zero configuration**, all ephemeral servers should have exactly the same configuration, and nothing should be configured at any system for each of the ephemeral nodes. We shouldn't care if 10 or 100 servers are spawned to handle the load.
-4. **self-cleanup**, so that nothing needs to be done for cleaning up the monitoring infrastructure from the hundreds of nodes that may have been monitored through time.
+1. **real-time performance monitoring**, collecting ***thousands of metrics per server per second***, visualized in interactive, automatically created dashboards.
+2. **real-time alarms**, for all nodes.
+3. **zero configuration**, all ephemeral servers should have exactly the same configuration, and nothing should be configured at any system for each of the ephemeral nodes. We shouldn't care if 10 or 100 servers are spawned to handle the load.
+4. **self-cleanup**, so that nothing needs to be done for cleaning up the monitoring infrastructure from the hundreds of nodes that may have been monitored through time.
### How it works
-All monitoring solutions, including netdata, work like this:
+All monitoring solutions, including Netdata, work like this:
-1. `collect metrics`, from the system and the running applications
-2. `store metrics`, in a time-series database
-3. `examine metrics` periodically, for triggering alarms and sending alarm notifications
-4. `visualize metrics`, so that users can see what exactly is happening
+1. `collect metrics`, from the system and the running applications
+2. `store metrics`, in a time-series database
+3. `examine metrics` periodically, for triggering alarms and sending alarm notifications
+4. `visualize metrics`, so that users can see what exactly is happening
-netdata used to be self-contained, so that all these functions were handled entirely by each server. The changes we made, allow each netdata to be configured independently for each function. So, each netdata can now act as:
+Netdata used to be self-contained, so that all these functions were handled entirely by each server. The changes we made, allow each Netdata to be configured independently for each function. So, each Netdata can now act as:
-- a `self contained system`, much like it used to be.
-- a `data collector`, that collects metrics from a host and pushes them to another netdata (with or without a local database and alarms).
-- a `proxy`, that receives metrics from other hosts and pushes them immediately to other netdata servers. netdata proxies can also be `store and forward proxies` meaning that they are able to maintain a local database for all metrics passing through them (with or without alarms).
-- a `time-series database` node, where data are kept, alarms are run and queries are served to visualise the metrics.
+- a `self contained system`, much like it used to be.
+- a `data collector`, that collects metrics from a host and pushes them to another Netdata (with or without a local database and alarms).
+- a `proxy`, that receives metrics from other hosts and pushes them immediately to other Netdata servers. Netdata proxies can also be `store and forward proxies` meaning that they are able to maintain a local database for all metrics passing through them (with or without alarms).
+- a `time-series database` node, where data are kept, alarms are run and queries are served to visualise the metrics.
### Configuring an auto-scaling setup
@@ -359,7 +357,7 @@ netdata used to be self-contained, so that all these functions were handled enti
<img src="https://cloud.githubusercontent.com/assets/2662304/23627468/96daf7ba-02b9-11e7-95ac-1f767dd8dab8.png"/>
</p>
-You need a netdata `master`. This node should not be ephemeral. It will be the node where all ephemeral nodes (let's call them `slaves`) will be sending their metrics.
+You need a Netdata `master`. This node should not be ephemeral. It will be the node where all ephemeral nodes (let's call them `slaves`) will be sending their metrics.
The master will need to authorize the slaves for accepting their metrics. This is done with an API key.
@@ -389,15 +387,16 @@ On the master, edit `/etc/netdata/stream.conf` (to edit it on your system run `/
# alarms checks, only while the slave is connected
health enabled by default = auto
```
-*`stream.conf` on master, to enable receiving metrics from slaves using the API key.*
+
+_`stream.conf` on master, to enable receiving metrics from slaves using the API key._
If you used many API keys, you can add one such section for each API key.
-When done, restart netdata on the `master` node. It is now ready to receive metrics.
+When done, restart Netdata on the `master` node. It is now ready to receive metrics.
-Note that `health enabled by default = auto` will still trigger `last_collected` alarms, if a connected slave does not exit gracefully. If the netdata running on the slave is
+Note that `health enabled by default = auto` will still trigger `last_collected` alarms, if a connected slave does not exit gracefully. If the `netdata` process running on the slave is
stopped, it will close the connection to the master, ensuring that no `last_collected` alarms are triggered. For example, a proper container restart would first terminate
-the netdata process, but a system power issue would leave the connection open on the master side. In the second case, you will still receive alarms.
+the `netdata` process, but a system power issue would leave the connection open on the master side. In the second case, you will still receive alarms.
#### Configuring the `slaves`
@@ -405,7 +404,7 @@ On each of the slaves, edit `/etc/netdata/stream.conf` (to edit it on your syste
```bash
[stream]
- # stream metrics to another netdata
+ # stream metrics to another Netdata
enabled = yes
# the IP and PORT of the master
@@ -414,9 +413,10 @@ On each of the slaves, edit `/etc/netdata/stream.conf` (to edit it on your syste
# the API key to use
api key = 11111111-2222-3333-4444-555555555555
```
-*`stream.conf` on slaves, to enable pushing metrics to master at `10.11.12.13:19999`.*
-Using just the above configuration, the `slaves` will be pushing their metrics to the `master` netdata, but they will still maintain a local database of the metrics and run health checks. To disable them, edit `/etc/netdata/netdata.conf` and set:
+_`stream.conf` on slaves, to enable pushing metrics to master at `10.11.12.13:19999`._
+
+Using just the above configuration, the `slaves` will be pushing their metrics to the `master` Netdata, but they will still maintain a local database of the metrics and run health checks. To disable them, edit `/etc/netdata/netdata.conf` and set:
```bash
[global]
@@ -427,13 +427,14 @@ Using just the above configuration, the `slaves` will be pushing their metrics t
# disable health checks
enabled = no
```
-*`netdata.conf` configuration on slaves, to disable the local database and health checks.*
+
+_`netdata.conf` configuration on slaves, to disable the local database and health checks._
Keep in mind that setting `memory mode = none` will also force `[health].enabled = no` (health checks require access to a local database). But you can keep the database and disable health checks if you need to. You are however sending all the metrics to the master server, which can handle the health checking (`[health].enabled = yes`)
-#### netdata unique id
+#### Netdata unique id
-The file `/var/lib/netdata/registry/netdata.public.unique.id` contains a random GUID that **uniquely identifies each netdata**. This file is automatically generated, by netdata, the first time it is started and remains unaltered forever.
+The file `/var/lib/netdata/registry/netdata.public.unique.id` contains a random GUID that **uniquely identifies each Netdata**. This file is automatically generated, by Netdata, the first time it is started and remains unaltered forever.
> If you are building an image to be used for automated provisioning of autoscaled VMs, it important to delete that file from the image, so that each instance of your image will generate its own.
@@ -441,7 +442,6 @@ The file `/var/lib/netdata/registry/netdata.public.unique.id` contains a random
Both the sender and the receiver of metrics log information at `/var/log/netdata/error.log`.
-
On both master and slave do this:
```
@@ -469,15 +469,15 @@ and something like this on the slave:
### Archiving to a time-series database
-The `master` netdata node can also archive metrics, for all `slaves`, to a time-series database. At the time of this writing, netdata supports:
+The `master` Netdata node can also archive metrics, for all `slaves`, to a time-series database. At the time of this writing, Netdata supports:
-- graphite
-- opentsdb
-- prometheus
-- json document DBs
-- all the compatibles to the above (e.g. kairosdb, influxdb, etc)
+- graphite
+- opentsdb
+- prometheus
+- json document DBs
+- all the compatibles to the above (e.g. kairosdb, influxdb, etc)
-Check the netdata [backends documentation](../backends) for configuring this.
+Check the Netdata [backends documentation](../backends) for configuring this.
This is how such a solution will work:
@@ -487,7 +487,7 @@ This is how such a solution will work:
### An advanced setup
-netdata also supports `proxies` with and without a local database, and data retention can be different between all nodes.
+Netdata also supports `proxies` with and without a local database, and data retention can be different between all nodes.
This means a setup like the following is also possible:
@@ -495,21 +495,20 @@ This means a setup like the following is also possible:
<img src="https://cloud.githubusercontent.com/assets/2662304/23629551/bb1fd9c2-02c0-11e7-90f5-cab5a3ed4c53.png"/>
</p>
-
## proxies
-A proxy is a netdata that is receiving metrics from a netdata, and streams them to another netdata.
+A proxy is a Netdata instance that is receiving metrics from a Netdata, and streams them to another Netdata.
-netdata proxies may or may not maintain a database for the metrics passing through them.
+Netdata proxies may or may not maintain a database for the metrics passing through them.
When they maintain a database, they can also run health checks (alarms and notifications)
for the remote host that is streaming the metrics.
-To configure a proxy, configure it as a receiving and a sending netdata at the same time,
+To configure a proxy, configure it as a receiving and a sending Netdata at the same time,
using [stream.conf](stream.conf).
-The sending side of a netdata proxy, connects and disconnects to the final destination of the
+The sending side of a Netdata proxy, connects and disconnects to the final destination of the
metrics, following the same pattern of the receiving side.
For a practical example see [Monitoring ephemeral nodes](#monitoring-ephemeral-nodes).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fstreaming%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fstreaming%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/system/Makefile.in b/system/Makefile.in
new file mode 100644
index 00000000..7915017c
--- /dev/null
+++ b/system/Makefile.in
@@ -0,0 +1,638 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = system
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_config_SCRIPTS) \
+ $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(configdir)"
+SCRIPTS = $(dist_config_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA) $(nodist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+
+#
+# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ edit-config \
+ netdata-openrc \
+ netdata.logrotate \
+ netdata.service \
+ netdata-init-d \
+ netdata-lsb \
+ netdata-freebsd \
+ netdata.plist \
+ $(NULL)
+
+SUFFIXES = .in
+dist_config_SCRIPTS = \
+ edit-config \
+ $(NULL)
+
+nodist_noinst_DATA = \
+ netdata-openrc \
+ netdata.logrotate \
+ netdata.service \
+ netdata-init-d \
+ netdata-lsb \
+ netdata-freebsd \
+ netdata.plist \
+ $(NULL)
+
+dist_noinst_DATA = \
+ edit-config.in \
+ netdata-openrc.in \
+ netdata.logrotate.in \
+ netdata.service.in \
+ netdata-init-d.in \
+ netdata-lsb.in \
+ netdata-freebsd.in \
+ netdata.plist.in \
+ netdata.conf \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu system/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu system/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_configSCRIPTS: $(dist_config_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_config_SCRIPTS)'; test -n "$(configdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(configdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(configdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(configdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(configdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_configSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_config_SCRIPTS)'; test -n "$(configdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(configdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(configdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_configSCRIPTS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_configSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_configSCRIPTS install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_configSCRIPTS
+
+.PRECIOUS: Makefile
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/system/edit-config b/system/edit-config
new file mode 100644
index 00000000..7c785a95
--- /dev/null
+++ b/system/edit-config
@@ -0,0 +1,101 @@
+#!/usr/bin/env sh
+
+[ -f /etc/profile ] && . /etc/profile
+
+file="${1}"
+
+if [ "$(command -v editor)" ] ; then
+ EDITOR="${EDITOR-editor}"
+else
+ EDITOR="${EDITOR-vi}"
+fi
+
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/lib/netdata/conf.d"
+
+if [ -z "${file}" ]
+then
+ cat <<USAGE
+
+USAGE:
+ ${0} FILENAME
+
+ Copy and edit the stock config file named: FILENAME
+ if FILENAME is already copied, it will be edited as-is.
+
+ The EDITOR shell variable is used to define the editor to be used.
+
+ Stock config files at: '${NETDATA_STOCK_CONFIG_DIR}'
+ User config files at: '${NETDATA_USER_CONFIG_DIR}'
+
+ Available files in '${NETDATA_STOCK_CONFIG_DIR}' to copy and edit:
+
+USAGE
+
+ cd "${NETDATA_STOCK_CONFIG_DIR}" || exit 1
+ ls >&2 -R *.conf */*.conf
+ exit 1
+
+fi
+
+file_is_in_path() {
+ local file path real
+ file="${1}"
+ path="${2}"
+
+ real="$(readlink -f "${file}")"
+
+ # we don't have working readlink
+ [ -z "${real}" ] && return 0
+
+ if [ ! -z "${real}" ] && [ -z "$(echo "${real}" | grep -E "^${path}.*$")" ]
+ then
+ echo >&2 "File '${file}' is physically at '${real}', which is not in '${path}'. Aborting."
+ exit 1
+ fi
+
+ return 0
+}
+
+edit() {
+ echo >&2 "Editing '${1}' ..."
+
+ # check we can edit
+ file_is_in_path "${1}" "${NETDATA_USER_CONFIG_DIR}" || exit 1
+
+ "${EDITOR}" "${1}"
+ exit $?
+}
+
+copy_and_edit() {
+ # check we can copy
+ file_is_in_path "${NETDATA_STOCK_CONFIG_DIR}/${1}" "${NETDATA_STOCK_CONFIG_DIR}" || exit 1
+
+ if [ ! -f "${NETDATA_USER_CONFIG_DIR}/${1}" ]
+ then
+ echo >&2 "Copying '${NETDATA_STOCK_CONFIG_DIR}/${1}' to '${NETDATA_USER_CONFIG_DIR}/${1}' ... "
+ cp -p "${NETDATA_STOCK_CONFIG_DIR}/${1}" "${NETDATA_USER_CONFIG_DIR}/${1}" || exit 1
+ fi
+
+ edit "${NETDATA_USER_CONFIG_DIR}/${1}"
+}
+
+# make sure it is not absolute filename
+c1="$(echo "${file}" | cut -b 1)"
+if [ "${c1}" = "/" ] || [ "${c1}" = "." ]
+then
+ echo >&2 "Please don't use filenames starting with '/' or '.'"
+ exit 1
+fi
+
+# already exists
+if [ -f "${NETDATA_USER_CONFIG_DIR}/${file}" ]
+then
+ edit "${NETDATA_USER_CONFIG_DIR}/${file}"
+fi
+
+[ -f "${NETDATA_USER_CONFIG_DIR}/${file}" ] && edit "${NETDATA_USER_CONFIG_DIR}/${file}"
+[ -f "${NETDATA_STOCK_CONFIG_DIR}/${file}" ] && copy_and_edit "${file}"
+
+echo >&2 "File '${file}' is not found in '${NETDATA_STOCK_CONFIG_DIR}'"
+exit 1
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 92e6db0f..0aa5af24 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -5,11 +5,8 @@ MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
CLEANFILES = \
health_mgmtapi/health-cmdapi-test.sh \
-<<<<<<< HEAD
acls/acl.sh \
-=======
urls/request.sh \
->>>>>>> 63a4cadd346df71255d2350128eebcf317e81d0f
$(NULL)
include $(top_srcdir)/build/subst.inc
diff --git a/tests/Makefile.in b/tests/Makefile.in
new file mode 100644
index 00000000..a539b06c
--- /dev/null
+++ b/tests/Makefile.in
@@ -0,0 +1,631 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = tests
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_SCRIPTS) \
+ $(dist_plugins_SCRIPTS) $(dist_noinst_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)"
+SCRIPTS = $(dist_noinst_SCRIPTS) $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ health_mgmtapi/health-cmdapi-test.sh \
+ acls/acl.sh \
+ urls/request.sh \
+ $(NULL)
+
+SUFFIXES = .in
+dist_noinst_DATA = \
+ README.md \
+ web/lib/jasmine-jquery.js \
+ web/easypiechart.chart.spec.js \
+ web/easypiechart.percentage.spec.js \
+ web/karma.conf.js \
+ web/fixtures/easypiechart.chart.fixture1.html \
+ node.d/fronius.chart.spec.js \
+ node.d/fronius.parse.spec.js \
+ node.d/fronius.process.spec.js \
+ node.d/fronius.validation.spec.js \
+ health_mgmtapi/health-cmdapi-test.sh.in \
+ acls/acl.sh.in \
+ urls/request.sh.in \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ health_mgmtapi/health-cmdapi-test.sh \
+ acls/acl.sh \
+ urls/request.sh \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ stress.sh \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu tests/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu tests/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(am__empty):
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_pluginsSCRIPTS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_pluginsSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_pluginsSCRIPTS install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
+ uninstall-am uninstall-dist_pluginsSCRIPTS
+
+.PRECIOUS: Makefile
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ -e 's#[@]registrydir_POST@#$(registrydir)#g' \
+ -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/tests/README.md b/tests/README.md
index 4ac3f210..fe8b8b20 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -1,4 +1,5 @@
# Testing
+
This readme is a manual on how to get started with unit testing on javascript and nodejs
Original author: BrainDoctor (github), July 2017
@@ -24,10 +25,10 @@ Other browsers work too (Chrome, Firefox). However, only the Chromium Browser 59
The commands above leave me with the following versions (July 2017):
- - nodejs: v4.2.6
- - npm: 3.5.2
- - chromium-browser: 59.0.3071.109
- - WebStorm (optional): 2017.1.4
+- nodejs: v4.2.6
+- npm: 3.5.2
+- chromium-browser: 59.0.3071.109
+- WebStorm (optional): 2017.1.4
## Configuration
@@ -46,22 +47,24 @@ If you use the JetBrains WebStorm IDE, you can integrate the karma runtime.
#### for Karma (Client side testing)
Headless Chromium:
-1. Run > Edit Configurations
-2. "+" > Karma
-3. - Name: Karma Headless Chromium
- - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
- - Browsers to start: ChromiumHeadless
- - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
- - Karma package: /path/to/your/netdata/node_modules/karma
+
+1. Run > Edit Configurations
+2. "+" > Karma
+3. - Name: Karma Headless Chromium
+ - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
+ - Browsers to start: ChromiumHeadless
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - Karma package: /path/to/your/netdata/node_modules/karma
GUI Chromium is similar:
-1. Run > Edit Configurations
-2. "+" > Karma
-3. - Name: Karma Chromium
- - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
- - Browsers to start: Chromium
- - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
- - Karma package: /path/to/your/netdata/node_modules/karma
+
+1. Run > Edit Configurations
+2. "+" > Karma
+3. - Name: Karma Chromium
+ - Configuration file: /path/to/your/netdata/tests/web/karma.conf.js
+ - Browsers to start: Chromium
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - Karma package: /path/to/your/netdata/node_modules/karma
You may add other browsers too (comma separated). With the "Browsers to start" field you can override any settings in karma.conf.js.
@@ -69,18 +72,19 @@ Also it is recommended to install WebStorm IDE Extension/Addon to Chrome/Chromiu
#### for node.d plugins (nodejs)
-1. Run > Edit Configurations
-2. "+" > Node.js
-3. - Name: Node.d plugins
- - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
- - JavaScript file: node_modules/jasmine-node/bin/jasmine-node
- - Application parameters: --captureExceptions tests/node.d
+1. Run > Edit Configurations
+2. "+" > Node.js
+3. - Name: Node.d plugins
+ - Node interpreter: /usr/bin/nodejs (MUST be absolute, NVM works too)
+ - JavaScript file: node_modules/jasmine-node/bin/jasmine-node
+ - Application parameters: --captureExceptions tests/node.d
## Running
### In WebStorm
#### Karma
+
Just run the configured run configurations and they produce nice test trees:
![karma_run_2](https://user-images.githubusercontent.com/12159026/28277789-559149f6-6b1b-11e7-9cc7-a81d81d12c35.png)
@@ -99,6 +103,7 @@ cd /path/to/your/netdata
nodejs ./node_modules/karma/bin/karma start tests/web/karma.conf.js --single-run=true --browsers=ChromiumHeadless
```
+
will start the karma server, start chromium in headless mode and exit.
If a test fails, it produces even a stack trace:
@@ -135,5 +140,4 @@ The karma and node.d runners can be integrated in Travis (AFAIK), but that is ou
Note: Karma is for browser-testing. On a build server, no GUI or browser might by available, unless browsers support headless mode.
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Ftests%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Ftests%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/tests/acls/acl.sh b/tests/acls/acl.sh
new file mode 100644
index 00000000..772d6640
--- /dev/null
+++ b/tests/acls/acl.sh
@@ -0,0 +1,119 @@
+#!/bin/bash -x
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+BASICURL="http://127.0.0.1"
+BASICURLS="https://127.0.0.1"
+
+NETDATA_VARLIB_DIR="/var/lib/netdata"
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[0;43m'
+
+#change the previous acl file and with a new
+#and store it on a new file
+change_file(){
+ sed "s/$1/$2/g" netdata.cfg > "$4"
+}
+
+change_ssl_file(){
+ KEYROW="ssl key = $3/key.pem"
+ CERTROW="ssl certificate = $3/cert.pem"
+ sed "s@ssl key =@$KEYROW@g" netdata.ssl.cfg > tmp
+ sed "s@ssl certificate =@$CERTROW@g" tmp > tmp2
+ sed "s/$1/$2/g" tmp2 > "$4"
+}
+
+run_acl_tests() {
+ #Give a time for netdata start properly
+ sleep 2
+
+ curl -v -k --tls-max 1.2 --create-dirs -o index.html "$2" 2> log_index.txt
+ curl -v -k --tls-max 1.2 --create-dirs -o netdata.txt "$2/netdata.conf" 2> log_nc.txt
+ curl -v -k --tls-max 1.2 --create-dirs -o badge.csv "$2/api/v1/badge.svg?chart=cpu.cpu0_interrupts" 2> log_badge.txt
+ curl -v -k --tls-max 1.2 --create-dirs -o info.txt "$2/api/v1/info" 2> log_info.txt
+ curl -H "X-Auth-Token: $1" -v -k --tls-max 1.2 --create-dirs -o health.csv "$2/api/v1/manage/health?cmd=LIST" 2> log_health.txt
+
+ TOT=$(grep -c "HTTP/1.1 301" log_*.txt | cut -d: -f2| grep -c 1)
+ if [ "$TOT" -ne "$4" ]; then
+ echo -e "${RED}I got a wrong number of redirects($TOT) when SSL is activated, It was expected $4"
+ rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt
+ killall netdata
+ exit 1
+ elif [ "$TOT" -eq "$4" ] && [ "$4" -ne "0" ]; then
+ echo -e "${YELLOW}I got the correct number of redirects($4) when SSL is activated and I try to access with HTTP."
+ return
+ fi
+
+ TOT=$(grep -c "HTTP/1.1 200 OK" log_* | cut -d: -f2| grep -c 1)
+ if [ "$TOT" -ne "$3" ]; then
+ echo -e "${RED}I got a wrong number of \"200 OK\" from the queries, it was expected $3."
+ killall netdata
+ rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt
+ exit 1
+ fi
+
+ echo -e "${GREEN}ACLs were applied correctly"
+}
+
+CONF=$(grep "bind" netdata.cfg)
+MUSER=$(grep run netdata.cfg | cut -d= -f2|sed 's/^[ \t]*//')
+
+openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 -sha512 -subj "/C=US/ST=Denied/L=Somewhere/O=Dis/CN=www.example.com" -keyout key.pem -out cert.pem
+chown "$MUSER" key.pem cert.pem
+CWD=$(pwd)
+
+if [ -f "${NETDATA_VARLIB_DIR}/netdata.api.key" ] ;then
+ read -r TOKEN < "${NETDATA_VARLIB_DIR}/netdata.api.key"
+else
+ TOKEN="NULL"
+fi
+
+change_file "$CONF" " bind to = *" "$CWD" "netdata.conf.test0"
+netdata -c "netdata.conf.test0"
+run_acl_tests $TOKEN "$BASICURL:19999" 5 0
+killall netdata
+
+change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management *:20001=dashboard|registry|netdata.conf^SSL=optional *:20002=dashboard|registry" "$CWD" "netdata.conf.test1"
+netdata -c "netdata.conf.test1"
+run_acl_tests $TOKEN "$BASICURL:19999" 5 5
+run_acl_tests $TOKEN "$BASICURLS:19999" 5 0
+
+run_acl_tests $TOKEN "$BASICURL:20000" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20000" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20001" 4 0
+run_acl_tests $TOKEN "$BASICURLS:20001" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20002" 3 5
+run_acl_tests $TOKEN "$BASICURLS:20002" 3 0
+killall netdata
+
+change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management *:20001=dashboard|registry|netdata.conf^SSL=force *:20002=dashboard|registry" "$CWD" "netdata.conf.test2"
+netdata -c "netdata.conf.test2"
+run_acl_tests $TOKEN "$BASICURL:19999" 5 5
+run_acl_tests $TOKEN "$BASICURLS:19999" 5 0
+
+run_acl_tests $TOKEN "$BASICURL:20000" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20000" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20001" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20001" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20002" 3 5
+run_acl_tests $TOKEN "$BASICURLS:20002" 3 0
+killall netdata
+
+change_ssl_file "$CONF" " bind to = *=dashboard|registry|badges|management|netdata.conf *:20000=dashboard|registry|badges|management^SSL=optional *:20001=dashboard|registry|netdata.conf^SSL=force" "$CWD" "netdata.conf.test3"
+netdata -c "netdata.conf.test3"
+run_acl_tests $TOKEN "$BASICURL:19999" 5 5
+run_acl_tests $TOKEN "$BASICURLS:19999" 5 0
+
+run_acl_tests $TOKEN "$BASICURL:20000" 4 0
+run_acl_tests $TOKEN "$BASICURLS:20000" 4 0
+
+run_acl_tests $TOKEN "$BASICURL:20001" 4 5
+run_acl_tests $TOKEN "$BASICURLS:20001" 4 0
+killall netdata
+
+rm log_* netdata.conf.test* netdata.txt health.csv index.html badge.csv tmp* key.pem cert.pem info.txt
+echo "All the tests were successful"
diff --git a/tests/acls/netdata.cfg b/tests/acls/netdata.cfg
deleted file mode 100644
index 1dcb4a5c..00000000
--- a/tests/acls/netdata.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-# netdata configuration
-#
-# You can download the latest version of this file, using:
-#
-# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
-# or
-# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
-#
-# You can uncomment and change any of the options below.
-# The value shown in the commented settings, is the default value.
-#
-
-[global]
- run as user = netdata
-
- # the default database size - 1 hour
- history = 3600
-
- # by default do not expose the netdata port
- bind to = localhost
diff --git a/tests/acls/netdata.ssl.cfg b/tests/acls/netdata.ssl.cfg
deleted file mode 100644
index 28e0030d..00000000
--- a/tests/acls/netdata.ssl.cfg
+++ /dev/null
@@ -1,24 +0,0 @@
-# netdata configuration
-#
-# You can download the latest version of this file, using:
-#
-# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
-# or
-# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
-#
-# You can uncomment and change any of the options below.
-# The value shown in the commented settings, is the default value.
-#
-
-[global]
- run as user = netdata
-
- # the default database size - 1 hour
- history = 3600
-
- # by default do not expose the netdata port
- bind to = localhost
-
-[web]
- ssl key =
- ssl certificate =
diff --git a/tests/backends/prometheus-avg-oldunits.txt b/tests/backends/prometheus-avg-oldunits.txt
deleted file mode 100644
index 53ee8ffa..00000000
--- a/tests/backends/prometheus-avg-oldunits.txt
+++ /dev/null
@@ -1,148 +0,0 @@
-nd_apps_cpu_percent_average
-nd_apps_cpu_system_percent_average
-nd_apps_cpu_user_percent_average
-nd_apps_files_open_files_average
-nd_apps_lreads_kilobytes_persec_average
-nd_apps_lwrites_kilobytes_persec_average
-nd_apps_major_faults_page_faults_persec_average
-nd_apps_mem_MB_average
-nd_apps_minor_faults_page_faults_persec_average
-nd_apps_pipes_open_pipes_average
-nd_apps_preads_kilobytes_persec_average
-nd_apps_processes_processes_average
-nd_apps_pwrites_kilobytes_persec_average
-nd_apps_sockets_open_sockets_average
-nd_apps_swap_MB_average
-nd_apps_threads_threads_average
-nd_apps_vmem_MB_average
-nd_cpu_core_throttling_events_persec_average
-nd_cpu_cpu_percent_average
-nd_cpu_interrupts_interrupts_persec_average
-nd_cpu_softirqs_softirqs_persec_average
-nd_cpu_softnet_stat_events_persec_average
-nd_disk_avgsz_kilobytes_per_operation_average
-nd_disk_await_ms_per_operation_average
-nd_disk_backlog_milliseconds_average
-nd_disk_inodes_Inodes_average
-nd_disk_io_kilobytes_persec_average
-nd_disk_iotime_milliseconds_persec_average
-nd_disk_mops_merged_operations_persec_average
-nd_disk_ops_operations_persec_average
-nd_disk_space_GB_average
-nd_disk_svctm_ms_per_operation_average
-nd_disk_util___of_time_working_average
-nd_ip_bcast_kilobits_persec_average
-nd_ip_bcastpkts_packets_persec_average
-nd_ip_ecnpkts_packets_persec_average
-nd_ip_inerrors_packets_persec_average
-nd_ip_mcast_kilobits_persec_average
-nd_ip_mcastpkts_packets_persec_average
-nd_ip_tcp_accept_queue_packets_persec_average
-nd_ip_tcpconnaborts_connections_persec_average
-nd_ip_tcpofo_packets_persec_average
-nd_ip_tcpreorders_packets_persec_average
-nd_ipv4_errors_packets_persec_average
-nd_ipv4_icmp_errors_packets_persec_average
-nd_ipv4_icmpmsg_packets_persec_average
-nd_ipv4_icmp_packets_persec_average
-nd_ipv4_packets_packets_persec_average
-nd_ipv4_sockstat_sockets_sockets_average
-nd_ipv4_sockstat_tcp_mem_KB_average
-nd_ipv4_sockstat_tcp_sockets_sockets_average
-nd_ipv4_sockstat_udp_mem_KB_average
-nd_ipv4_sockstat_udp_sockets_sockets_average
-nd_ipv4_tcperrors_packets_persec_average
-nd_ipv4_tcphandshake_events_persec_average
-nd_ipv4_tcpopens_connections_persec_average
-nd_ipv4_tcppackets_packets_persec_average
-nd_ipv4_tcpsock_active_connections_average
-nd_ipv4_udperrors_events_persec_average
-nd_ipv4_udppackets_packets_persec_average
-nd_ipv6_ect_packets_persec_average
-nd_ipv6_errors_packets_persec_average
-nd_ipv6_icmperrors_errors_persec_average
-nd_ipv6_icmp_messages_persec_average
-nd_ipv6_icmpmldv2_reports_persec_average
-nd_ipv6_icmpneighbor_messages_persec_average
-nd_ipv6_icmprouter_messages_persec_average
-nd_ipv6_icmptypes_messages_persec_average
-nd_ipv6_mcast_kilobits_persec_average
-nd_ipv6_mcastpkts_packets_persec_average
-nd_ipv6_packets_packets_persec_average
-nd_ipv6_sockstat6_raw_sockets_sockets_average
-nd_ipv6_sockstat6_tcp_sockets_sockets_average
-nd_ipv6_sockstat6_udp_sockets_sockets_average
-nd_ipv6_udperrors_events_persec_average
-nd_ipv6_udppackets_packets_persec_average
-nd_mem_available_MB_average
-nd_mem_committed_MB_average
-nd_mem_kernel_MB_average
-nd_mem_pgfaults_page_faults_persec_average
-nd_mem_slab_MB_average
-nd_mem_transparent_hugepages_MB_average
-nd_mem_writeback_MB_average
-nd_netdata_apps_children_fix_percent_average
-nd_netdata_apps_cpu_milliseconds_persec_average
-nd_netdata_apps_fix_percent_average
-nd_netdata_apps_sizes_files_persec_average
-nd_netdata_clients_connected_clients_average
-nd_netdata_compression_ratio_percent_average
-nd_netdata_go_plugin_execution_time_ms_average
-nd_netdata_net_kilobits_persec_average
-nd_netdata_plugin_cgroups_cpu_milliseconds_persec_average
-nd_netdata_plugin_diskspace_dt_milliseconds_run_average
-nd_netdata_plugin_diskspace_milliseconds_persec_average
-nd_netdata_plugin_proc_cpu_milliseconds_persec_average
-nd_netdata_plugin_proc_modules_milliseconds_run_average
-nd_netdata_plugin_tc_cpu_milliseconds_persec_average
-nd_netdata_plugin_tc_time_milliseconds_run_average
-nd_netdata_private_charts_charts_average
-nd_netdata_pythond_runtime_ms_average
-nd_netdata_requests_requests_persec_average
-nd_netdata_response_time_milliseconds_request_average
-nd_netdata_server_cpu_milliseconds_persec_average
-nd_netdata_statsd_bytes_kilobits_persec_average
-nd_netdata_statsd_cpu_milliseconds_persec_average
-nd_netdata_statsd_events_events_persec_average
-nd_netdata_statsd_metrics_metrics_average
-nd_netdata_statsd_packets_packets_persec_average
-nd_netdata_statsd_reads_reads_persec_average
-nd_netdata_statsd_useful_metrics_metrics_average
-nd_netdata_tcp_connected_sockets_average
-nd_netdata_tcp_connects_events_average
-nd_netdata_web_cpu_milliseconds_persec_average
-nd_net_drops_drops_persec_average
-nd_net_net_kilobits_persec_average
-nd_net_packets_packets_persec_average
-nd_services_cpu_percent_average
-nd_services_mem_usage_MB_average
-nd_services_swap_usage_MB_average
-nd_services_throttle_io_ops_read_operations_persec_average
-nd_services_throttle_io_ops_write_operations_persec_average
-nd_services_throttle_io_read_kilobytes_persec_average
-nd_services_throttle_io_write_kilobytes_persec_average
-nd_system_active_processes_processes_average
-nd_system_cpu_percent_average
-nd_system_ctxt_context_switches_persec_average
-nd_system_entropy_entropy_average
-nd_system_forks_processes_persec_average
-nd_system_idlejitter_microseconds_lost_persec_average
-nd_system_interrupts_interrupts_persec_average
-nd_system_intr_interrupts_persec_average
-nd_system_io_kilobytes_persec_average
-nd_system_ipc_semaphore_arrays_arrays_average
-nd_system_ipc_semaphores_semaphores_average
-nd_system_ip_kilobits_persec_average
-nd_system_ipv6_kilobits_persec_average
-nd_system_load_load_average
-nd_system_net_kilobits_persec_average
-nd_system_pgpgio_kilobytes_persec_average
-nd_system_processes_processes_average
-nd_system_ram_MB_average
-nd_system_shared_memory_bytes_bytes_average
-nd_system_shared_memory_segments_segments_average
-nd_system_softirqs_softirqs_persec_average
-nd_system_softnet_stat_events_persec_average
-nd_system_swapio_kilobytes_persec_average
-nd_system_swap_MB_average
-nd_system_uptime_seconds_average
diff --git a/tests/backends/prometheus-avg.txt b/tests/backends/prometheus-avg.txt
deleted file mode 100644
index 1aedff2b..00000000
--- a/tests/backends/prometheus-avg.txt
+++ /dev/null
@@ -1,148 +0,0 @@
-nd_apps_cpu_percentage_average
-nd_apps_cpu_system_percentage_average
-nd_apps_cpu_user_percentage_average
-nd_apps_files_open_files_average
-nd_apps_lreads_KiB_persec_average
-nd_apps_lwrites_KiB_persec_average
-nd_apps_major_faults_page_faults_persec_average
-nd_apps_mem_MiB_average
-nd_apps_minor_faults_page_faults_persec_average
-nd_apps_pipes_open_pipes_average
-nd_apps_preads_KiB_persec_average
-nd_apps_processes_processes_average
-nd_apps_pwrites_KiB_persec_average
-nd_apps_sockets_open_sockets_average
-nd_apps_swap_MiB_average
-nd_apps_threads_threads_average
-nd_apps_vmem_MiB_average
-nd_cpu_core_throttling_events_persec_average
-nd_cpu_cpu_percentage_average
-nd_cpu_interrupts_interrupts_persec_average
-nd_cpu_softirqs_softirqs_persec_average
-nd_cpu_softnet_stat_events_persec_average
-nd_disk_avgsz_KiB_operation_average
-nd_disk_await_milliseconds_operation_average
-nd_disk_backlog_milliseconds_average
-nd_disk_inodes_inodes_average
-nd_disk_io_KiB_persec_average
-nd_disk_iotime_milliseconds_persec_average
-nd_disk_mops_merged_operations_persec_average
-nd_disk_ops_operations_persec_average
-nd_disk_space_GiB_average
-nd_disk_svctm_milliseconds_operation_average
-nd_disk_util___of_time_working_average
-nd_ip_bcast_kilobits_persec_average
-nd_ip_bcastpkts_packets_persec_average
-nd_ip_ecnpkts_packets_persec_average
-nd_ip_inerrors_packets_persec_average
-nd_ip_mcast_kilobits_persec_average
-nd_ip_mcastpkts_packets_persec_average
-nd_ip_tcp_accept_queue_packets_persec_average
-nd_ip_tcpconnaborts_connections_persec_average
-nd_ip_tcpofo_packets_persec_average
-nd_ip_tcpreorders_packets_persec_average
-nd_ipv4_errors_packets_persec_average
-nd_ipv4_icmp_errors_packets_persec_average
-nd_ipv4_icmpmsg_packets_persec_average
-nd_ipv4_icmp_packets_persec_average
-nd_ipv4_packets_packets_persec_average
-nd_ipv4_sockstat_sockets_sockets_average
-nd_ipv4_sockstat_tcp_mem_KiB_average
-nd_ipv4_sockstat_tcp_sockets_sockets_average
-nd_ipv4_sockstat_udp_mem_KiB_average
-nd_ipv4_sockstat_udp_sockets_sockets_average
-nd_ipv4_tcperrors_packets_persec_average
-nd_ipv4_tcphandshake_events_persec_average
-nd_ipv4_tcpopens_connections_persec_average
-nd_ipv4_tcppackets_packets_persec_average
-nd_ipv4_tcpsock_active_connections_average
-nd_ipv4_udperrors_events_persec_average
-nd_ipv4_udppackets_packets_persec_average
-nd_ipv6_ect_packets_persec_average
-nd_ipv6_errors_packets_persec_average
-nd_ipv6_icmperrors_errors_persec_average
-nd_ipv6_icmp_messages_persec_average
-nd_ipv6_icmpmldv2_reports_persec_average
-nd_ipv6_icmpneighbor_messages_persec_average
-nd_ipv6_icmprouter_messages_persec_average
-nd_ipv6_icmptypes_messages_persec_average
-nd_ipv6_mcast_kilobits_persec_average
-nd_ipv6_mcastpkts_packets_persec_average
-nd_ipv6_packets_packets_persec_average
-nd_ipv6_sockstat6_raw_sockets_sockets_average
-nd_ipv6_sockstat6_tcp_sockets_sockets_average
-nd_ipv6_sockstat6_udp_sockets_sockets_average
-nd_ipv6_udperrors_events_persec_average
-nd_ipv6_udppackets_packets_persec_average
-nd_mem_available_MiB_average
-nd_mem_committed_MiB_average
-nd_mem_kernel_MiB_average
-nd_mem_pgfaults_faults_persec_average
-nd_mem_slab_MiB_average
-nd_mem_transparent_hugepages_MiB_average
-nd_mem_writeback_MiB_average
-nd_netdata_apps_children_fix_percentage_average
-nd_netdata_apps_cpu_milliseconds_persec_average
-nd_netdata_apps_fix_percentage_average
-nd_netdata_apps_sizes_files_persec_average
-nd_netdata_clients_connected_clients_average
-nd_netdata_compression_ratio_percentage_average
-nd_netdata_go_plugin_execution_time_ms_average
-nd_netdata_net_kilobits_persec_average
-nd_netdata_plugin_cgroups_cpu_milliseconds_persec_average
-nd_netdata_plugin_diskspace_dt_milliseconds_run_average
-nd_netdata_plugin_diskspace_milliseconds_persec_average
-nd_netdata_plugin_proc_cpu_milliseconds_persec_average
-nd_netdata_plugin_proc_modules_milliseconds_run_average
-nd_netdata_plugin_tc_cpu_milliseconds_persec_average
-nd_netdata_plugin_tc_time_milliseconds_run_average
-nd_netdata_private_charts_charts_average
-nd_netdata_pythond_runtime_ms_average
-nd_netdata_requests_requests_persec_average
-nd_netdata_response_time_milliseconds_request_average
-nd_netdata_server_cpu_milliseconds_persec_average
-nd_netdata_statsd_bytes_kilobits_persec_average
-nd_netdata_statsd_cpu_milliseconds_persec_average
-nd_netdata_statsd_events_events_persec_average
-nd_netdata_statsd_metrics_metrics_average
-nd_netdata_statsd_packets_packets_persec_average
-nd_netdata_statsd_reads_reads_persec_average
-nd_netdata_statsd_useful_metrics_metrics_average
-nd_netdata_tcp_connected_sockets_average
-nd_netdata_tcp_connects_events_average
-nd_netdata_web_cpu_milliseconds_persec_average
-nd_net_drops_drops_persec_average
-nd_net_net_kilobits_persec_average
-nd_net_packets_packets_persec_average
-nd_services_cpu_percentage_average
-nd_services_mem_usage_MiB_average
-nd_services_swap_usage_MiB_average
-nd_services_throttle_io_ops_read_operations_persec_average
-nd_services_throttle_io_ops_write_operations_persec_average
-nd_services_throttle_io_read_KiB_persec_average
-nd_services_throttle_io_write_KiB_persec_average
-nd_system_active_processes_processes_average
-nd_system_cpu_percentage_average
-nd_system_ctxt_context_switches_persec_average
-nd_system_entropy_entropy_average
-nd_system_forks_processes_persec_average
-nd_system_idlejitter_microseconds_lost_persec_average
-nd_system_interrupts_interrupts_persec_average
-nd_system_intr_interrupts_persec_average
-nd_system_io_KiB_persec_average
-nd_system_ipc_semaphore_arrays_arrays_average
-nd_system_ipc_semaphores_semaphores_average
-nd_system_ip_kilobits_persec_average
-nd_system_ipv6_kilobits_persec_average
-nd_system_load_load_average
-nd_system_net_kilobits_persec_average
-nd_system_pgpgio_KiB_persec_average
-nd_system_processes_processes_average
-nd_system_ram_MiB_average
-nd_system_shared_memory_bytes_bytes_average
-nd_system_shared_memory_segments_segments_average
-nd_system_softirqs_softirqs_persec_average
-nd_system_softnet_stat_events_persec_average
-nd_system_swapio_KiB_persec_average
-nd_system_swap_MiB_average
-nd_system_uptime_seconds_average
diff --git a/tests/backends/prometheus-raw.txt b/tests/backends/prometheus-raw.txt
deleted file mode 100644
index 2ac4c2c7..00000000
--- a/tests/backends/prometheus-raw.txt
+++ /dev/null
@@ -1,156 +0,0 @@
-nd_apps_cpu
-nd_apps_cpu_system
-nd_apps_cpu_user
-nd_apps_files
-nd_apps_lreads
-nd_apps_lwrites
-nd_apps_major_faults
-nd_apps_mem
-nd_apps_minor_faults
-nd_apps_pipes
-nd_apps_preads
-nd_apps_processes
-nd_apps_pwrites
-nd_apps_sockets
-nd_apps_swap
-nd_apps_threads
-nd_apps_vmem
-nd_cpu_core_throttling_total
-nd_cpu_cpu_total
-nd_cpu_interrupts_total
-nd_cpu_softirqs_total
-nd_cpu_softnet_stat_total
-nd_disk_avgsz
-nd_disk_await
-nd_disk_backlog_total
-nd_disk_inodes
-nd_disk_iotime_total
-nd_disk_io_total
-nd_disk_mops_total
-nd_disk_ops_total
-nd_disk_space
-nd_disk_svctm
-nd_disk_util_total
-nd_ip_bcastpkts_total
-nd_ip_bcast_total
-nd_ip_ecnpkts_total
-nd_ip_inerrors_total
-nd_ip_mcastpkts_total
-nd_ip_mcast_total
-nd_ip_tcp_accept_queue_total
-nd_ip_tcpconnaborts_total
-nd_ip_tcpofo_total
-nd_ip_tcpreorders_total
-nd_ipv4_errors_total
-nd_ipv4_icmp_errors_total
-nd_ipv4_icmpmsg_total
-nd_ipv4_icmp_total
-nd_ipv4_packets_total
-nd_ipv4_sockstat_sockets
-nd_ipv4_sockstat_tcp_mem
-nd_ipv4_sockstat_tcp_sockets
-nd_ipv4_sockstat_udp_mem
-nd_ipv4_sockstat_udp_sockets
-nd_ipv4_tcperrors_total
-nd_ipv4_tcphandshake_total
-nd_ipv4_tcpopens_total
-nd_ipv4_tcppackets_total
-nd_ipv4_tcpsock
-nd_ipv4_udperrors_total
-nd_ipv4_udppackets_total
-nd_ipv6_ect_total
-nd_ipv6_errors_total
-nd_ipv6_icmperrors_total
-nd_ipv6_icmpmldv2_total
-nd_ipv6_icmpneighbor_total
-nd_ipv6_icmprouter_total
-nd_ipv6_icmp_total
-nd_ipv6_icmptypes_total
-nd_ipv6_mcastpkts_total
-nd_ipv6_mcast_total
-nd_ipv6_packets_total
-nd_ipv6_sockstat6_raw_sockets
-nd_ipv6_sockstat6_tcp_sockets
-nd_ipv6_sockstat6_udp_sockets
-nd_ipv6_udperrors_total
-nd_ipv6_udppackets_total
-nd_mem_available
-nd_mem_committed
-nd_mem_kernel
-nd_mem_pgfaults_total
-nd_mem_slab
-nd_mem_transparent_hugepages
-nd_mem_writeback
-nd_netdata_apps_children_fix
-nd_netdata_apps_cpu_total
-nd_netdata_apps_fix
-nd_netdata_apps_sizes_calls_total
-nd_netdata_apps_sizes_fds
-nd_netdata_apps_sizes_filenames_total
-nd_netdata_apps_sizes_files_total
-nd_netdata_apps_sizes_inode_changes_total
-nd_netdata_apps_sizes_link_changes_total
-nd_netdata_apps_sizes_new_pids_total
-nd_netdata_apps_sizes_pids
-nd_netdata_apps_sizes_targets
-nd_netdata_clients
-nd_netdata_compression_ratio
-nd_netdata_go_plugin_execution_time
-nd_netdata_net_total
-nd_netdata_plugin_cgroups_cpu_total
-nd_netdata_plugin_diskspace_dt
-nd_netdata_plugin_diskspace_total
-nd_netdata_plugin_proc_cpu_total
-nd_netdata_plugin_proc_modules
-nd_netdata_plugin_tc_cpu_total
-nd_netdata_plugin_tc_time
-nd_netdata_private_charts
-nd_netdata_pythond_runtime
-nd_netdata_requests_total
-nd_netdata_response_time
-nd_netdata_server_cpu_total
-nd_netdata_statsd_bytes_total
-nd_netdata_statsd_cpu_total
-nd_netdata_statsd_events_total
-nd_netdata_statsd_metrics
-nd_netdata_statsd_packets_total
-nd_netdata_statsd_reads_total
-nd_netdata_statsd_useful_metrics
-nd_netdata_tcp_connected
-nd_netdata_tcp_connects_total
-nd_netdata_web_cpu_total
-nd_net_drops_total
-nd_net_net_total
-nd_net_packets_total
-nd_services_cpu_total
-nd_services_mem_usage
-nd_services_swap_usage
-nd_services_throttle_io_ops_read_total
-nd_services_throttle_io_ops_write_total
-nd_services_throttle_io_read_total
-nd_services_throttle_io_write_total
-nd_system_active_processes
-nd_system_cpu_total
-nd_system_ctxt_total
-nd_system_entropy
-nd_system_forks_total
-nd_system_idlejitter
-nd_system_interrupts_total
-nd_system_intr_total
-nd_system_io_total
-nd_system_ipc_semaphore_arrays
-nd_system_ipc_semaphores
-nd_system_ip_total
-nd_system_ipv6_total
-nd_system_load
-nd_system_net_total
-nd_system_pgpgio_total
-nd_system_processes
-nd_system_ram
-nd_system_shared_memory_bytes
-nd_system_shared_memory_segments
-nd_system_softirqs_total
-nd_system_softnet_stat_total
-nd_system_swap
-nd_system_swapio_total
-nd_system_uptime
diff --git a/tests/backends/prometheus.bats b/tests/backends/prometheus.bats
deleted file mode 100755
index d52f39d5..00000000
--- a/tests/backends/prometheus.bats
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bats
-
-validate_metrics() {
- fname="${1}"
- params="${2}"
-
- curl -sS "http://localhost:19999/api/v1/allmetrics?format=prometheus&prefix=nd&timestamps=no${params}" |
- grep -E 'nd_system_|nd_cpu_|nd_system_|nd_net_|nd_disk_|nd_ip_|nd_ipv4_|nd_ipv6_|nd_mem_|nd_netdata_|nd_apps_|nd_services_' |
- sed -ne 's/{.*//p' | sort | uniq > tests/backends/new-${fname}
- diff tests/backends/${fname} tests/backends/new-${fname}
- rm tests/backends/new-${fname}
-}
-
-
-if [ ! -f .gitignore ]; then
- echo "Need to run as ./tests/backends/$(basename "$0") from top level directory of git repository" >&2
- exit 1
-fi
-
-
-@test "prometheus raw" {
- validate_metrics prometheus-raw.txt "&data=raw"
-}
-
-@test "prometheus avg" {
- validate_metrics prometheus-avg.txt ""
-}
-
-@test "prometheus avg oldunits" {
- validate_metrics prometheus-avg-oldunits.txt "&oldunits=yes"
-}
diff --git a/tests/health_mgmtapi/README.md b/tests/health_mgmtapi/README.md
deleted file mode 100644
index 8473b35e..00000000
--- a/tests/health_mgmtapi/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Health command API tester
-
-The directory `tests/health_cmdapi` contains the test script `health-cmdapi-test.sh` for the [health command API](../../web/api/health).
-
-The script can be executed with options to prepare the system for the tests, run them and restore the system to its previous state.
-
-It depends on the management API being accessible on localhost:19999 and on the responses to the api/v1/alarms?all requests being functional.
-It also requires read access to the management API key that is usually under `/var/lib/netdata/netdata.api.key` (`@varlibdir_POST@/netdata.api.key`).
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Ftests%2Fhealth_mgmtapi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
-
-
diff --git a/tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json b/tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json
deleted file mode 100644
index 9f05efe7..00000000
--- a/tests/health_mgmtapi/expected_list/ALARM_CPU_IOWAIT-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_iowait" }, { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json b/tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json
deleted file mode 100644
index dbf87992..00000000
--- a/tests/health_mgmtapi/expected_list/ALARM_CPU_USAGE-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger", "context": "system.cpu" }, { "alarm": "*10min_cpu_usage *load_trigger", "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json b/tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json
deleted file mode 100644
index a267cfd6..00000000
--- a/tests/health_mgmtapi/expected_list/CONTEXT_SYSTEM_CPU-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "DISABLE", "silencers": [ { "context": "system.cpu" }, { "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/DISABLE-list.json b/tests/health_mgmtapi/expected_list/DISABLE-list.json
deleted file mode 100644
index c2c77810..00000000
--- a/tests/health_mgmtapi/expected_list/DISABLE-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "DISABLE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json b/tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json
deleted file mode 100644
index bbc3f4f0..00000000
--- a/tests/health_mgmtapi/expected_list/DISABLE_ALL-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": true, "type": "DISABLE", "silencers": [] }
diff --git a/tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json b/tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json
deleted file mode 100644
index e8aee179..00000000
--- a/tests/health_mgmtapi/expected_list/DISABLE_ALL_ERROR-list.json
+++ /dev/null
@@ -1 +0,0 @@
-Auth Error
diff --git a/tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json b/tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json
deleted file mode 100644
index a7fc1cb8..00000000
--- a/tests/health_mgmtapi/expected_list/DISABLE_SYSTEM_LOAD-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "DISABLE", "silencers": [ { "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json b/tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json
deleted file mode 100644
index 50119f79..00000000
--- a/tests/health_mgmtapi/expected_list/FAMILIES_LOAD-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "None", "silencers": [ { "families": "load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/HOSTS-list.json b/tests/health_mgmtapi/expected_list/HOSTS-list.json
deleted file mode 100644
index 9db21b6c..00000000
--- a/tests/health_mgmtapi/expected_list/HOSTS-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "hosts": "*" } ] }
diff --git a/tests/health_mgmtapi/expected_list/RESET-list.json b/tests/health_mgmtapi/expected_list/RESET-list.json
deleted file mode 100644
index 2d3f09d6..00000000
--- a/tests/health_mgmtapi/expected_list/RESET-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "None", "silencers": [] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE-list.json b/tests/health_mgmtapi/expected_list/SILENCE-list.json
deleted file mode 100644
index d157f2d3..00000000
--- a/tests/health_mgmtapi/expected_list/SILENCE-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_2-list.json b/tests/health_mgmtapi/expected_list/SILENCE_2-list.json
deleted file mode 100644
index d5e6fa2d..00000000
--- a/tests/health_mgmtapi/expected_list/SILENCE_2-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "families": "load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_3-list.json b/tests/health_mgmtapi/expected_list/SILENCE_3-list.json
deleted file mode 100644
index 69e98cc1..00000000
--- a/tests/health_mgmtapi/expected_list/SILENCE_3-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [] } WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors.
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json b/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json
deleted file mode 100644
index dd789cd3..00000000
--- a/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger", "chart": "system.load" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json b/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json
deleted file mode 100644
index d157f2d3..00000000
--- a/tests/health_mgmtapi/expected_list/SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": false, "type": "SILENCE", "silencers": [ { "alarm": "*10min_cpu_usage *load_trigger" } ] }
diff --git a/tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json b/tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json
deleted file mode 100644
index c88ef9fd..00000000
--- a/tests/health_mgmtapi/expected_list/SILENCE_ALL-list.json
+++ /dev/null
@@ -1 +0,0 @@
-{ "all": true, "type": "SILENCE", "silencers": [] }
diff --git a/tests/health_mgmtapi/health-cmdapi-test.sh b/tests/health_mgmtapi/health-cmdapi-test.sh
new file mode 100644
index 00000000..da981b14
--- /dev/null
+++ b/tests/health_mgmtapi/health-cmdapi-test.sh
@@ -0,0 +1,226 @@
+#!/usr/bin/env bash
+# shellcheck disable=SC1117,SC2034,SC2059,SC2086,SC2181
+
+NETDATA_VARLIB_DIR="/var/lib/netdata"
+
+check () {
+ sec=1
+ echo -e " ${GRAY}Check: '${1}' in $sec sec"
+ sleep $sec
+ number=$RANDOM
+ resp=$(curl -s "http://$URL/api/v1/alarms?all&$number")
+ r=$(echo "${resp}" | \
+ python3 -c "import sys, json; d=json.load(sys.stdin); \
+ print(\
+ d['alarms']['system.cpu.10min_cpu_usage']['disabled'], \
+ d['alarms']['system.cpu.10min_cpu_usage']['silenced'] , \
+ d['alarms']['system.cpu.10min_cpu_iowait']['disabled'], \
+ d['alarms']['system.cpu.10min_cpu_iowait']['silenced'], \
+ d['alarms']['system.load.load_trigger']['disabled'], \
+ d['alarms']['system.load.load_trigger']['silenced'], \
+ );" 2>&1)
+ if [ $? -ne 0 ] ; then
+ echo -e " ${RED}ERROR: Unexpected response stored in /tmp/resp-$number.json"
+ echo "$resp" > /tmp/resp-$number.json
+ err=$((err+1))
+ iter=0
+ elif [ "${r}" != "${2}" ] ; then
+ echo -e " ${GRAY}WARNING: 'Got ${r}'. Expected '${2}'"
+ iter=$((iter+1))
+ if [ $iter -lt 10 ] ; then
+ echo -e " ${GRAY}Repeating test "
+ check "$1" "$2"
+ else
+ echo -e " ${RED}ERROR: 'Got ${r}'. Expected '${2}'"
+ iter=0
+ err=$((err+1))
+ fi
+ else
+ echo -e " ${GREEN}Success"
+ iter=0
+ fi
+}
+
+cmd () {
+ echo -e "${WHITE}Cmd '${1}'"
+ echo -en " ${GRAY}Expecting '${2}' : "
+ RESPONSE=$(curl -s "http://$URL/api/v1/manage/health?${1}" -H "X-Auth-Token: $TOKEN" 2>&1)
+ if [ "${RESPONSE}" != "${2}" ] ; then
+ echo -e "${RED}ERROR: Response '${RESPONSE}'"
+ err=$((err+1))
+ else
+ echo -e "${GREEN}Success"
+ fi
+}
+
+check_list() {
+ RESPONSE=$(curl -s "http://$URL/api/v1/manage/health?cmd=LIST" -H "X-Auth-Token: $TOKEN" 2>&1)
+
+ NAME="$1-list.json"
+ echo $RESPONSE > $NAME
+ diff $NAME expected_list/$NAME 1>/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo -e "${GREEN}Success: The list command got the correct answer for $NAME!"
+ else
+ echo -e "${RED}ERROR: the files $NAME and expected_list/$NAME does not match."
+ exit 1
+ fi
+}
+
+WHITE='\033[0;37m'
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+GRAY='\033[0;37m'
+
+SETUP=0
+RESTART=0
+CLEANUP=0
+TEST=0
+URL="localhost:19999"
+
+err=0
+
+
+ HEALTH_CMDAPI_MSG_AUTHERROR="Auth Error"
+ HEALTH_CMDAPI_MSG_SILENCEALL="All alarm notifications are silenced"
+ HEALTH_CMDAPI_MSG_DISABLEALL="All health checks are disabled"
+ HEALTH_CMDAPI_MSG_RESET="All health checks and notifications are enabled"
+ HEALTH_CMDAPI_MSG_DISABLE="Health checks disabled for alarms matching the selectors"
+ HEALTH_CMDAPI_MSG_SILENCE="Alarm notifications silenced for alarms matching the selectors"
+ HEALTH_CMDAPI_MSG_ADDED="Alarm selector added"
+ HEALTH_CMDAPI_MSG_INVALID_KEY="Invalid key. Ignoring it."
+ HEALTH_CMDAPI_MSG_STYPEWARNING="WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command."
+ HEALTH_CMDAPI_MSG_NOSELECTORWARNING="WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors."
+
+ if [ -f "${NETDATA_VARLIB_DIR}/netdata.api.key" ] ;then
+ read -r CORRECT_TOKEN < "${NETDATA_VARLIB_DIR}/netdata.api.key"
+ else
+ echo "${NETDATA_VARLIB_DIR}/netdata.api.key not found"
+ exit 1
+ fi
+ # Set correct token
+ TOKEN="${CORRECT_TOKEN}"
+
+ # Test default state
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check "Default State" "False False False False False False"
+ check_list "RESET"
+
+ # Test auth failure
+ TOKEN="Wrong token"
+ cmd "cmd=DISABLE ALL" "$HEALTH_CMDAPI_MSG_AUTHERROR"
+ check "Default State" "False False False False False False"
+ check_list "DISABLE_ALL_ERROR"
+
+ # Set correct token
+ TOKEN="${CORRECT_TOKEN}"
+
+ # Test disable
+ cmd "cmd=DISABLE ALL" "$HEALTH_CMDAPI_MSG_DISABLEALL"
+ check "All disabled" "True False True False True False"
+ check_list "DISABLE_ALL"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check "Default State" "False False False False False False"
+ check_list "RESET"
+
+ # Test silence
+ cmd "cmd=SILENCE ALL" "$HEALTH_CMDAPI_MSG_SILENCEALL"
+ check "All silenced" "False True False True False True"
+ check_list "SILENCE_ALL"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check "Default State" "False False False False False False"
+ check_list "RESET"
+
+ # Add silencer by name
+ printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_ADDED"
+ cmd "cmd=SILENCE&alarm=*10min_cpu_usage *load_trigger" "${resp}"
+ check "Silence notifications for alarm1 and load_trigger" "False True False False False True"
+ check_list "SILENCE_ALARM_CPU_USAGE_LOAD_TRIGGER"
+
+ # Convert to disable health checks
+ cmd "cmd=DISABLE" "$HEALTH_CMDAPI_MSG_DISABLE"
+ check "Disable notifications for alarm1 and load_trigger" "True False False False True False"
+ check_list "DISABLE"
+
+ # Convert back to silence notifications
+ cmd "cmd=SILENCE" "$HEALTH_CMDAPI_MSG_SILENCE"
+ check "Silence notifications for alarm1 and load_trigger" "False True False False False True"
+ check_list "SILENCE"
+
+ # Add second silencer by name
+ cmd "alarm=*10min_cpu_iowait" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Silence notifications for alarm1,alarm2 and load_trigger" "False True False True False True"
+ check_list "ALARM_CPU_IOWAIT"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add silencer by chart
+ printf -v resp "$HEALTH_CMDAPI_MSG_DISABLE\n$HEALTH_CMDAPI_MSG_ADDED"
+ cmd "cmd=DISABLE&chart=system.load" "${resp}"
+ check "Default State" "False False False False True False"
+ check_list "DISABLE_SYSTEM_LOAD"
+
+ # Add silencer by context
+ cmd "context=system.cpu" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Default State" "True False True False True False"
+ check_list "CONTEXT_SYSTEM_CPU"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add second condition to a selector (AND)
+ printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_ADDED"
+ cmd "cmd=SILENCE&alarm=*10min_cpu_usage *load_trigger&chart=system.load" "${resp}"
+ check "Silence notifications load_trigger" "False False False False False True"
+ check_list "SILENCE_ALARM_CPU_USAGE"
+
+ # Add second selector with two conditions
+ cmd "alarm=*10min_cpu_usage *load_trigger&context=system.cpu" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Silence notifications load_trigger" "False True False False False True"
+ check_list "ALARM_CPU_USAGE"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add silencer without a command to disable or silence alarms
+ printf -v resp "$HEALTH_CMDAPI_MSG_ADDED\n$HEALTH_CMDAPI_MSG_STYPEWARNING"
+ cmd "families=load" "${resp}"
+ check "Family selector with no command" "False False False False False False"
+ check_list "FAMILIES_LOAD"
+
+ # Add silence command
+ cmd "cmd=SILENCE" "$HEALTH_CMDAPI_MSG_SILENCE"
+ check "Silence family load" "False False False False False True"
+ check_list "SILENCE_2"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+ # Add command without silencers
+ printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_NOSELECTORWARNING"
+ cmd "cmd=SILENCE" "${resp}"
+ check "Command with no selector" "False False False False False False"
+ check_list "SILENCE_3"
+
+ # Add hosts silencer
+ cmd "hosts=*" "$HEALTH_CMDAPI_MSG_ADDED"
+ check "Silence all hosts" "False True False True False True"
+ check_list "HOSTS"
+
+ # Reset
+ cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET"
+ check_list "RESET"
+
+if [ $err -gt 0 ] ; then
+ echo "$err error(s) found"
+ exit 1
+fi
diff --git a/tests/installer/checksums.sh b/tests/installer/checksums.sh
deleted file mode 100755
index b2b0b2a2..00000000
--- a/tests/installer/checksums.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-#
-# Mechanism to validate kickstart files integrity status
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pawel Krupa (pawel@netdata.cloud)
-# Author : Pavlos Emm. Katsoulakis (paul@netdata.cloud)
-set -e
-
-# If we are not in netdata git repo, at the top level directory, fail
-TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel 2> /dev/null || echo "")")
-CWD="$(git rev-parse --show-cdup 2> /dev/null || echo "")"
-if [ -n "$CWD" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as ./tests/installer/$(basename "$0") from top level directory of netdata git repository"
- echo "Kickstart validation process aborted"
- exit 1
-fi
-
-README_DOC="packaging/installer/README.md"
-source ./tests/installer/slack.sh
-
-for file in kickstart.sh kickstart-static64.sh; do
- README_MD5=$(grep "$file" $README_DOC | grep md5sum | cut -d '"' -f2)
- KICKSTART_URL="https://my-netdata.io/$file"
- KICKSTART="packaging/installer/$file"
- KICKSTART_MD5="$(md5sum "${KICKSTART}" | cut -d' ' -f1)"
- CALCULATED_MD5="$(curl -Ss ${KICKSTART_URL} | md5sum | cut -d ' ' -f 1)"
-
- # Conditionally run the website validation
- if [ -z "${LOCAL_ONLY}" ]; then
- echo "Validating ${KICKSTART_URL} against local file ${KICKSTART} with MD5 ${KICKSTART_MD5}.."
- if [ "$KICKSTART_MD5" == "$CALCULATED_MD5" ]; then
- echo "${KICKSTART_URL} looks fine"
- else
- post_message "TRAVIS_MESSAGE" "Attention <!here> , ${KICKSTART_URL} md5sum does not match local file, it needs to be updated"
- fi
- fi
-
- echo "Validating documentation for $file"
- if [ "$KICKSTART_MD5" != "$README_MD5" ]; then
- echo "Invalid checksum for $file in $README_DOC."
- echo "checksum in docs: $README_MD5"
- echo "current checksum: $KICKSTART_MD5"
- exit 2
- else
- echo "$file MD5Sum is well documented"
- fi
-
-done
-echo "No problems found, exiting succesfully!"
diff --git a/tests/installer/slack.sh b/tests/installer/slack.sh
deleted file mode 100755
index 83cb5fa7..00000000
--- a/tests/installer/slack.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-# #No shebang necessary
-# BASH Lib: Simple incoming webhook for slack integration.
-#
-# The script expects the following parameters to be defined by the upper layer:
-# SLACK_NOTIFY_WEBHOOK_URL
-# SLACK_BOT_NAME
-# SLACK_CHANNEL
-#
-# Copyright:
-#
-# Author: Pavlos Emm. Katsoulakis <paul@netdata.cloud
-
-post_message() {
- TYPE="$1"
- MESSAGE="$2"
- CUSTOM_CHANNEL="$3"
-
- case "$TYPE" in
- "PLAIN_MESSAGE")
- curl -X POST --data-urlencode "payload={\"channel\": \"${SLACK_CHANNEL}\", \"username\": \"${SLACK_BOT_NAME}\", \"text\": \"${MESSAGE}\", \"icon_emoji\": \":space_invader:\"}" ${SLACK_NOTIFY_WEBHOOK_URL}
- ;;
- "TRAVIS_MESSAGE")
- EVENT_LINE="${TRAVIS_JOB_NUMBER}: Event type '${TRAVIS_EVENT_TYPE}', on '${TRAVIS_OS_NAME}'"
- if [ "$TRAVIS_EVENT_TYPE}" == "pull_request" ]; then
- EVENT_LINE="${TRAVIS_JOB_NUMBER}: Event type '${TRAVIS_EVENT_TYPE}' #${TRAVIS_PULL_REQUEST}, on '${TRAVIS_OS_NAME}' "
- fi
-
- if [ -n "${CUSTOM_CHANNEL}" ]; then
- echo "Sending travis message to custom channel ${CUSTOM_CHANNEL}"
- OPTIONAL_CHANNEL_INFO="\"channel\": \"${CUSTOM_CHANNEL}\","
- fi
-
- POST_MESSAGE="{
- ${OPTIONAL_CHANNEL_INFO}
- \"text\": \"${TRAVIS_REPO_SLUG}, ${MESSAGE}\",
- \"attachments\": [{
- \"text\": \"${TRAVIS_JOB_NUMBER}: Event type '${TRAVIS_EVENT_TYPE}', on '${TRAVIS_OS_NAME}' \",
- \"fallback\": \"I could not determine the build\",
- \"callback_id\": \"\",
- \"color\": \"#3AA3E3\",
- \"attachment_type\": \"default\",
- \"actions\": [
- {
- \"name\": \"${TRAVIS_BUILD_NUMBER}\",
- \"text\": \"Build #${TRAVIS_BUILD_NUMBER}\",
- \"type\": \"button\",
- \"url\": \"${TRAVIS_BUILD_WEB_URL}\"
- },
- {
- \"name\": \"${TRAVIS_JOB_NUMBER}\",
- \"text\": \"Job #${TRAVIS_JOB_NUMBER}\",
- \"type\": \"button\",
- \"url\": \"${TRAVIS_JOB_WEB_URL}\"
- }]
- }]
- }"
- echo "Sending ${POST_MESSAGE}"
- curl -X POST --data-urlencode "payload=${POST_MESSAGE}" "${SLACK_NOTIFY_WEBHOOK_URL}"
- ;;
- *)
- echo "Unrecognized message type \"$TYPE\" was given"
- return 1
- ;;
- esac
-}
diff --git a/tests/k6/data.js b/tests/k6/data.js
deleted file mode 100644
index fb4e087e..00000000
--- a/tests/k6/data.js
+++ /dev/null
@@ -1,67 +0,0 @@
-import http from "k6/http";
-import { log, check, group, sleep } from "k6";
-import { Rate } from "k6/metrics";
-
-// A custom metric to track failure rates
-var failureRate = new Rate("check_failure_rate");
-
-// Options
-export let options = {
- stages: [
- // Linearly ramp up from 1 to 20 VUs during first 30s
- { target: 20, duration: "30s" },
- // Hold at 50 VUs for the next 1 minute
- { target: 20, duration: "1m" },
- // Linearly ramp down from 50 to 0 VUs over the last 10 seconds
- { target: 0, duration: "10s" }
- ],
- thresholds: {
- // We want the 95th percentile of all HTTP request durations to be less than 500ms
- "http_req_duration": ["p(95)<500"],
- // Requests with the fast tag should finish even faster
- "http_req_duration{fast:yes}": ["p(99)<250"],
- // Thresholds based on the custom metric we defined and use to track application failures
- "check_failure_rate": [
- // Global failure rate should be less than 1%
- "rate<0.01",
- // Abort the test early if it climbs over 5%
- { threshold: "rate<=0.05", abortOnFail: true },
- ],
- },
-};
-
-function rnd(min, max) {
- min = Math.ceil(min);
- max = Math.floor(max);
- return Math.floor(Math.random() * (max - min)) + min; //The maximum is exclusive and the minimum is inclusive
-}
-
-// Main function
-export default function () {
- // Control what the data request asks for
- let charts = [ "example.random" ]
- let chartmin = 0;
- let chartmax = charts.length - 1;
- let aftermin = 60;
- let aftermax = 3600;
- let beforemin = 3503600;
- let beforemax = 3590000;
- let pointsmin = 300;
- let pointsmax = 3600;
-
- group("Requests", function () {
- // Execute multiple requests in parallel like a browser, to fetch data for the charts. The one taking the longes is the data request.
- let resps = http.batch([
- ["GET", "http://localhost:19999/api/v1/info", { tags: { fast: "yes" } }],
- ["GET", "http://localhost:19999/api/v1/charts", { tags: { fast: "yes" } }],
- ["GET", "http://localhost:19999/api/v1/data?chart="+charts[rnd(chartmin,chartmax)]+"&before=-"+rnd(beforemin,beforemax)+"&after=-"+rnd(aftermin,aftermax)+"&points="+rnd(pointsmin,pointsmax)+"&format=json&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&_="+rnd(1,1000000000000), { }],
- ["GET", "http://localhost:19999/api/v1/alarms", { tags: { fast: "yes" } }]
- ]);
- // Combine check() call with failure tracking
- failureRate.add(!check(resps, {
- "status is 200": (r) => r[0].status === 200 && r[1].status === 200
- }));
- });
-
- sleep(Math.random() * 2 + 1); // Random sleep between 1s and 3s
-}
diff --git a/tests/lifecycle.bats b/tests/lifecycle.bats
deleted file mode 100755
index 4d18115b..00000000
--- a/tests/lifecycle.bats
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env bats
-#
-# Netdata installation lifecycle testing script.
-# This is to validate the install, update and uninstall of netdata
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
-#
-
-INSTALLATION="$BATS_TMPDIR/installation"
-ENV="${INSTALLATION}/netdata/etc/netdata/.environment"
-# list of files which need to be checked. Path cannot start from '/'
-FILES="usr/libexec/netdata/plugins.d/go.d.plugin
- usr/libexec/netdata/plugins.d/charts.d.plugin
- usr/libexec/netdata/plugins.d/python.d.plugin
- usr/libexec/netdata/plugins.d/node.d.plugin"
-
-DIRS="usr/sbin/netdata
- etc/netdata
- usr/share/netdata
- usr/libexec/netdata
- var/cache/netdata
- var/lib/netdata
- var/log/netdata"
-
-setup() {
- # If we are not in netdata git repo, at the top level directory, fail
- TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
- CWD=$(git rev-parse --show-cdup || echo "")
- if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as ./tests/lifecycle/$(basename "$0") from top level directory of git repository"
- exit 1
- fi
-}
-
-@test "install netdata" {
- ./netdata-installer.sh --dont-wait --dont-start-it --auto-update --install "${INSTALLATION}"
-
- # Validate particular files
- for file in $FILES; do
- [ ! -f "$BATS_TMPDIR/$file" ]
- done
-
- # Validate particular directories
- for a_dir in $DIRS; do
- [ ! -d "$BATS_TMPDIR/$a_dir" ]
- done
-}
-
-@test "update netdata" {
- export ENVIRONMENT_FILE="${ENV}"
- /etc/cron.daily/netdata-updater
- ! grep "new_installation" "${ENV}"
-}
-
-@test "uninstall netdata" {
- ./packaging/installer/netdata-uninstaller.sh --yes --force --env "${ENV}"
- [ ! -f "${INSTALLATION}/netdata/usr/sbin/netdata" ]
- [ ! -f "/etc/cron.daily/netdata-updater" ]
-}
diff --git a/tests/profile/Makefile b/tests/profile/Makefile
deleted file mode 100644
index 5f4e8b52..00000000
--- a/tests/profile/Makefile
+++ /dev/null
@@ -1,53 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-COMMON_CFLAGS = -I ../../ -DTARGET_OS=1 -Wall -Wextra
-PROFILE_CFLAGS = -O1 -ggdb $(COMMON_CFLAGS)
-PERFORMANCE_CFLAGS = -O2 $(COMMON_CFLAGS)
-
-CFLAGS = $(PERFORMANCE_CFLAGS)
-
-LIBNETDATA_FILES = \
- ../../libnetdata/popen/popen.o \
- ../../libnetdata/storage_number/storage_number.o \
- ../../libnetdata/avl/avl.o \
- ../../libnetdata/socket/socket.o \
- ../../libnetdata/os.o \
- ../../libnetdata/clocks/clocks.o \
- ../../libnetdata/procfile/procfile.o \
- ../../libnetdata/statistical/statistical.o \
- ../../libnetdata/eval/eval.o \
- ../../libnetdata/threads/threads.o \
- ../../libnetdata/dictionary/dictionary.o \
- ../../libnetdata/simple_pattern/simple_pattern.o \
- ../../libnetdata/url/url.o \
- ../../libnetdata/config/appconfig.o \
- ../../libnetdata/libnetdata.o \
- ../../libnetdata/buffer/buffer.o \
- ../../libnetdata/adaptive_resortable_list/adaptive_resortable_list.o \
- ../../libnetdata/locks/locks.o \
- ../../libnetdata/log/log.o \
- $(NULL)
-
-COMMON_LDFLAGS = $(LIBNETDATA_FILES) -pthread -lm
-
-all: statsd-stress benchmark-procfile-parser test-eval benchmark-dictionary benchmark-value-pairs
-
-benchmark-procfile-parser: benchmark-procfile-parser.c
- gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
-
-benchmark-dictionary: benchmark-dictionary.c
- gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
-
-benchmark-value-pairs: benchmark-value-pairs.c
- gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
-
-statsd-stress: statsd-stress.c
- gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
-
-test-eval: test-eval.c
- gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS}
-
-
-clean:
- rm -f benchmark-procfile-parser statsd-stress test-eval benchmark-dictionary benchmark-value-pairs
-
diff --git a/tests/profile/benchmark-dictionary.c b/tests/profile/benchmark-dictionary.c
deleted file mode 100644
index 30c098d5..00000000
--- a/tests/profile/benchmark-dictionary.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-/*
- * 1. build netdata (as normally)
- * 2. cd tests/profile/
- * 3. compile with:
- * gcc -O3 -Wall -Wextra -I ../../src/ -I ../../ -o benchmark-dictionary benchmark-dictionary.c ../../src/dictionary.o ../../src/log.o ../../src/avl.o ../../src/common.o -pthread
- *
- */
-
-#include "config.h"
-#include "libnetdata/libnetdata.h"
-
-struct myvalue {
- int i;
-};
-
-void netdata_cleanup_and_exit(int ret) { exit(ret); }
-
-int main(int argc, char **argv) {
- if(argc || argv) {;}
-
-// DICTIONARY *dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED|DICTIONARY_FLAG_WITH_STATISTICS);
- DICTIONARY *dict = dictionary_create(DICTIONARY_FLAG_WITH_STATISTICS);
- if(!dict) fatal("Cannot create dictionary.");
-
- struct rusage start, end;
- unsigned long long dt;
- char buf[100 + 1];
- struct myvalue value, *v;
- int i, max = 30000000, max2;
-
- // ------------------------------------------------------------------------
-
- getrusage(RUSAGE_SELF, &start);
- dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
- fprintf(stderr, "Inserting %d entries in the dictionary\n", max);
- for(i = 0; i < max; i++) {
- value.i = i;
- snprintf(buf, 100, "%d", i);
-
- dictionary_set(dict, buf, &value, sizeof(struct myvalue));
- }
- getrusage(RUSAGE_SELF, &end);
- dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
- fprintf(stderr, "Added %d entries in %llu nanoseconds: %llu inserts per second\n", max, dt, max * 1000000ULL / dt);
- fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
-
- // ------------------------------------------------------------------------
-
- getrusage(RUSAGE_SELF, &start);
- dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
- fprintf(stderr, "Retrieving %d entries from the dictionary\n", max);
- for(i = 0; i < max; i++) {
- value.i = i;
- snprintf(buf, 100, "%d", i);
-
- v = dictionary_get(dict, buf);
- if(!v)
- fprintf(stderr, "ERROR: cannot get value %d from the dictionary\n", i);
- else if(v->i != i)
- fprintf(stderr, "ERROR: expected %d but got %d\n", i, v->i);
- }
- getrusage(RUSAGE_SELF, &end);
- dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
- fprintf(stderr, "Read %d entries in %llu nanoseconds: %llu searches per second\n", max, dt, max * 1000000ULL / dt);
- fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
-
- // ------------------------------------------------------------------------
-
- getrusage(RUSAGE_SELF, &start);
- dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
- fprintf(stderr, "Resetting %d entries in the dictionary\n", max);
- for(i = 0; i < max; i++) {
- value.i = i;
- snprintf(buf, 100, "%d", i);
-
- dictionary_set(dict, buf, &value, sizeof(struct myvalue));
- }
- getrusage(RUSAGE_SELF, &end);
- dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
- fprintf(stderr, "Reset %d entries in %llu nanoseconds: %llu resets per second\n", max, dt, max * 1000000ULL / dt);
- fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
-
- // ------------------------------------------------------------------------
-
- getrusage(RUSAGE_SELF, &start);
- dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
- fprintf(stderr, "Searching %d non-existing entries in the dictionary\n", max);
- max2 = max * 2;
- for(i = max; i < max2; i++) {
- value.i = i;
- snprintf(buf, 100, "%d", i);
-
- v = dictionary_get(dict, buf);
- if(v)
- fprintf(stderr, "ERROR: cannot got non-existing value %d from the dictionary\n", i);
- }
- getrusage(RUSAGE_SELF, &end);
- dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
- fprintf(stderr, "Searched %d non-existing entries in %llu nanoseconds: %llu not found searches per second\n", max, dt, max * 1000000ULL / dt);
- fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
-
- // ------------------------------------------------------------------------
-
- getrusage(RUSAGE_SELF, &start);
- dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
- fprintf(stderr, "Deleting %d entries from the dictionary\n", max);
- for(i = 0; i < max; i++) {
- value.i = i;
- snprintf(buf, 100, "%d", i);
-
- dictionary_del(dict, buf);
- }
- getrusage(RUSAGE_SELF, &end);
- dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
- fprintf(stderr, "Deleted %d entries in %llu nanoseconds: %llu deletes per second\n", max, dt, max * 1000000ULL / dt);
- fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches);
-
- // ------------------------------------------------------------------------
-
- getrusage(RUSAGE_SELF, &start);
- dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL;
- fprintf(stderr, "Destroying dictionary\n");
- dictionary_destroy(dict);
- getrusage(RUSAGE_SELF, &end);
- dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec);
- fprintf(stderr, "Destroyed in %llu nanoseconds\n", dt);
-
- return 0;
-}
diff --git a/tests/profile/benchmark-line-parsing.c b/tests/profile/benchmark-line-parsing.c
deleted file mode 100644
index c07d1d85..00000000
--- a/tests/profile/benchmark-line-parsing.c
+++ /dev/null
@@ -1,707 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-#include <stdio.h>
-#include <inttypes.h>
-#include <string.h>
-#include <stdlib.h>
-#include <ctype.h>
-#include <sys/time.h>
-
-#define likely(x) __builtin_expect(!!(x), 1)
-#define unlikely(x) __builtin_expect(!!(x), 0)
-
-#define simple_hash(name) ({ \
- register unsigned char *__hash_source = (unsigned char *)(name); \
- register uint32_t __hash_value = 0x811c9dc5; \
- while (*__hash_source) { \
- __hash_value *= 16777619; \
- __hash_value ^= (uint32_t) *__hash_source++; \
- } \
- __hash_value; \
-})
-
-static inline uint32_t simple_hash2(const char *name) {
- register unsigned char *s = (unsigned char *)name;
- register uint32_t hval = 0x811c9dc5;
- while (*s) {
- hval *= 16777619;
- hval ^= (uint32_t) *s++;
- }
- return hval;
-}
-
-static inline unsigned long long fast_strtoull(const char *s) {
- register unsigned long long n = 0;
- register char c;
- for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
- n *= 10;
- n += c - '0';
- // n = (n << 1) + (n << 3) + (c - '0');
- }
- return n;
-}
-
-static uint32_t cache_hash = 0;
-static uint32_t rss_hash = 0;
-static uint32_t rss_huge_hash = 0;
-static uint32_t mapped_file_hash = 0;
-static uint32_t writeback_hash = 0;
-static uint32_t dirty_hash = 0;
-static uint32_t swap_hash = 0;
-static uint32_t pgpgin_hash = 0;
-static uint32_t pgpgout_hash = 0;
-static uint32_t pgfault_hash = 0;
-static uint32_t pgmajfault_hash = 0;
-static uint32_t inactive_anon_hash = 0;
-static uint32_t active_anon_hash = 0;
-static uint32_t inactive_file_hash = 0;
-static uint32_t active_file_hash = 0;
-static uint32_t unevictable_hash = 0;
-static uint32_t hierarchical_memory_limit_hash = 0;
-static uint32_t total_cache_hash = 0;
-static uint32_t total_rss_hash = 0;
-static uint32_t total_rss_huge_hash = 0;
-static uint32_t total_mapped_file_hash = 0;
-static uint32_t total_writeback_hash = 0;
-static uint32_t total_dirty_hash = 0;
-static uint32_t total_swap_hash = 0;
-static uint32_t total_pgpgin_hash = 0;
-static uint32_t total_pgpgout_hash = 0;
-static uint32_t total_pgfault_hash = 0;
-static uint32_t total_pgmajfault_hash = 0;
-static uint32_t total_inactive_anon_hash = 0;
-static uint32_t total_active_anon_hash = 0;
-static uint32_t total_inactive_file_hash = 0;
-static uint32_t total_active_file_hash = 0;
-static uint32_t total_unevictable_hash = 0;
-
-char *strings[] = {
- "cache",
- "rss",
- "rss_huge",
- "mapped_file",
- "writeback",
- "dirty",
- "swap",
- "pgpgin",
- "pgpgout",
- "pgfault",
- "pgmajfault",
- "inactive_anon",
- "active_anon",
- "inactive_file",
- "active_file",
- "unevictable",
- "hierarchical_memory_limit",
- "total_cache",
- "total_rss",
- "total_rss_huge",
- "total_mapped_file",
- "total_writeback",
- "total_dirty",
- "total_swap",
- "total_pgpgin",
- "total_pgpgout",
- "total_pgfault",
- "total_pgmajfault",
- "total_inactive_anon",
- "total_active_anon",
- "total_inactive_file",
- "total_active_file",
- "total_unevictable",
- NULL
-};
-
-unsigned long long values1[12] = { 0 };
-unsigned long long values2[12] = { 0 };
-unsigned long long values3[12] = { 0 };
-unsigned long long values4[12] = { 0 };
-unsigned long long values5[12] = { 0 };
-unsigned long long values6[12] = { 0 };
-
-#define NUMBER1 "12345678901234"
-#define NUMBER2 "23456789012345"
-#define NUMBER3 "34567890123456"
-#define NUMBER4 "45678901234567"
-#define NUMBER5 "56789012345678"
-#define NUMBER6 "67890123456789"
-#define NUMBER7 "78901234567890"
-#define NUMBER8 "89012345678901"
-#define NUMBER9 "90123456789012"
-#define NUMBER10 "12345678901234"
-#define NUMBER11 "23456789012345"
-
-// simple system strcmp()
-void test1() {
- int i;
- for(i = 0; strings[i] ; i++) {
- char *s = strings[i];
-
- if(unlikely(!strcmp(s, "cache")))
- values1[i] = strtoull(NUMBER1, NULL, 10);
-
- else if(unlikely(!strcmp(s, "rss")))
- values1[i] = strtoull(NUMBER2, NULL, 10);
-
- else if(unlikely(!strcmp(s, "rss_huge")))
- values1[i] = strtoull(NUMBER3, NULL, 10);
-
- else if(unlikely(!strcmp(s, "mapped_file")))
- values1[i] = strtoull(NUMBER4, NULL, 10);
-
- else if(unlikely(!strcmp(s, "writeback")))
- values1[i] = strtoull(NUMBER5, NULL, 10);
-
- else if(unlikely(!strcmp(s, "dirty")))
- values1[i] = strtoull(NUMBER6, NULL, 10);
-
- else if(unlikely(!strcmp(s, "swap")))
- values1[i] = strtoull(NUMBER7, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgpgin")))
- values1[i] = strtoull(NUMBER8, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgpgout")))
- values1[i] = strtoull(NUMBER9, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgfault")))
- values1[i] = strtoull(NUMBER10, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgmajfault")))
- values1[i] = strtoull(NUMBER11, NULL, 10);
- }
-}
-
-// inline simple_hash() with system strtoull()
-void test2() {
- int i;
- for(i = 0; strings[i] ; i++) {
- char *s = strings[i];
- uint32_t hash = simple_hash2(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
- values2[i] = strtoull(NUMBER1, NULL, 10);
-
- else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
- values2[i] = strtoull(NUMBER2, NULL, 10);
-
- else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
- values2[i] = strtoull(NUMBER3, NULL, 10);
-
- else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
- values2[i] = strtoull(NUMBER4, NULL, 10);
-
- else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
- values2[i] = strtoull(NUMBER5, NULL, 10);
-
- else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
- values2[i] = strtoull(NUMBER6, NULL, 10);
-
- else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
- values2[i] = strtoull(NUMBER7, NULL, 10);
-
- else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
- values2[i] = strtoull(NUMBER8, NULL, 10);
-
- else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
- values2[i] = strtoull(NUMBER9, NULL, 10);
-
- else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
- values2[i] = strtoull(NUMBER10, NULL, 10);
-
- else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
- values2[i] = strtoull(NUMBER11, NULL, 10);
- }
-}
-
-// statement expression simple_hash(), system strtoull()
-void test3() {
- int i;
- for(i = 0; strings[i] ; i++) {
- char *s = strings[i];
- uint32_t hash = simple_hash(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
- values3[i] = strtoull(NUMBER1, NULL, 10);
-
- else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
- values3[i] = strtoull(NUMBER2, NULL, 10);
-
- else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
- values3[i] = strtoull(NUMBER3, NULL, 10);
-
- else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
- values3[i] = strtoull(NUMBER4, NULL, 10);
-
- else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
- values3[i] = strtoull(NUMBER5, NULL, 10);
-
- else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
- values3[i] = strtoull(NUMBER6, NULL, 10);
-
- else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
- values3[i] = strtoull(NUMBER7, NULL, 10);
-
- else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
- values3[i] = strtoull(NUMBER8, NULL, 10);
-
- else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
- values3[i] = strtoull(NUMBER9, NULL, 10);
-
- else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
- values3[i] = strtoull(NUMBER10, NULL, 10);
-
- else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
- values3[i] = strtoull(NUMBER11, NULL, 10);
- }
-}
-
-
-// inline simple_hash(), if-continue checks
-void test4() {
- int i;
- for(i = 0; strings[i] ; i++) {
- char *s = strings[i];
- uint32_t hash = simple_hash2(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) {
- values4[i] = strtoull(NUMBER1, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) {
- values4[i] = strtoull(NUMBER2, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) {
- values4[i] = strtoull(NUMBER3, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) {
- values4[i] = strtoull(NUMBER4, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) {
- values4[i] = strtoull(NUMBER5, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) {
- values4[i] = strtoull(NUMBER6, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) {
- values4[i] = strtoull(NUMBER7, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) {
- values4[i] = strtoull(NUMBER8, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) {
- values4[i] = strtoull(NUMBER9, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) {
- values4[i] = strtoull(NUMBER10, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) {
- values4[i] = strtoull(NUMBER11, NULL, 0);
- continue;
- }
- }
-}
-
-// inline simple_hash(), if-else-if-else-if (netdata default)
-void test5() {
- int i;
- for(i = 0; strings[i] ; i++) {
- char *s = strings[i];
- uint32_t hash = simple_hash2(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
- values5[i] = fast_strtoull(NUMBER1);
-
- else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
- values5[i] = fast_strtoull(NUMBER2);
-
- else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
- values5[i] = fast_strtoull(NUMBER3);
-
- else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
- values5[i] = fast_strtoull(NUMBER4);
-
- else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
- values5[i] = fast_strtoull(NUMBER5);
-
- else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
- values5[i] = fast_strtoull(NUMBER6);
-
- else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
- values5[i] = fast_strtoull(NUMBER7);
-
- else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
- values5[i] = fast_strtoull(NUMBER8);
-
- else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
- values5[i] = fast_strtoull(NUMBER9);
-
- else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
- values5[i] = fast_strtoull(NUMBER10);
-
- else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
- values5[i] = fast_strtoull(NUMBER11);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-struct entry {
- char *name;
- uint32_t hash;
- int found;
- void (*func)(void *data1, void *data2);
- void *data1;
- void *data2;
- struct entry *prev, *next;
-};
-
-struct base {
- int iteration;
- int registered;
- int wanted;
- int found;
- struct entry *entries, *last;
-};
-
-static inline void callback(void *data1, void *data2) {
- char *string = data1;
- unsigned long long *value = data2;
- *value = fast_strtoull(string);
-}
-
-static inline void callback_system_strtoull(void *data1, void *data2) {
- char *string = data1;
- unsigned long long *value = data2;
- *value = strtoull(string, NULL, 10);
-}
-
-
-static inline struct base *entry(struct base *base, const char *name, void *data1, void *data2, void (*func)(void *, void *)) {
- if(!base)
- base = calloc(1, sizeof(struct base));
-
- struct entry *e = malloc(sizeof(struct entry));
- e->name = strdup(name);
- e->hash = simple_hash2(e->name);
- e->data1 = data1;
- e->data2 = data2;
- e->func = func;
- e->prev = NULL;
- e->next = base->entries;
-
- if(base->entries) base->entries->prev = e;
- else base->last = e;
-
- base->entries = e;
- base->registered++;
- base->wanted = base->registered;
-
- return base;
-}
-
-static inline int check(struct base *base, const char *s) {
- uint32_t hash = simple_hash2(s);
-
- if(likely(!strcmp(s, base->last->name))) {
- base->last->found = 1;
- base->found++;
- if(base->last->func) base->last->func(base->last->data1, base->last->data2);
- base->last = base->last->next;
-
- if(!base->last)
- base->last = base->entries;
-
- if(base->found == base->registered)
- return 1;
-
- return 0;
- }
-
- // find it
- struct entry *e;
- for(e = base->entries; e ; e = e->next)
- if(e->hash == hash && !strcmp(e->name, s))
- break;
-
- if(e == base->last) {
- printf("ERROR\n");
- exit(1);
- }
-
- if(e) {
- // found
-
- // run it
- if(e->func) e->func(e->data1, e->data2);
-
- // unlink it
- if(e->next) e->next->prev = e->prev;
- if(e->prev) e->prev->next = e->next;
-
- if(base->entries == e)
- base->entries = e->next;
- }
- else {
- // not found
-
- // create it
- e = calloc(1, sizeof(struct entry));
- e->name = strdup(s);
- e->hash = hash;
- }
-
- // link it here
- e->next = base->last;
- if(base->last) {
- e->prev = base->last->prev;
- base->last->prev = e;
-
- if(base->entries == base->last)
- base->entries = e;
- }
- else
- e->prev = NULL;
-
- if(e->prev)
- e->prev->next = e;
-
- base->last = e->next;
- if(!base->last)
- base->last = base->entries;
-
- e->found = 1;
- base->found++;
-
- if(base->found == base->registered)
- return 1;
-
- printf("relinked '%s' after '%s' and before '%s': ", e->name, e->prev?e->prev->name:"NONE", e->next?e->next->name:"NONE");
- for(e = base->entries; e ; e = e->next) printf("%s ", e->name);
- printf("\n");
-
- return 0;
-}
-
-static inline void begin(struct base *base) {
-
- if(unlikely(base->iteration % 60) == 1) {
- base->wanted = 0;
- struct entry *e;
- for(e = base->entries; e ; e = e->next)
- if(e->found) base->wanted++;
- }
-
- base->iteration++;
- base->last = base->entries;
- base->found = 0;
-}
-
-void test6() {
-
- static struct base *base = NULL;
-
- if(unlikely(!base)) {
- base = entry(base, "cache", NUMBER1, &values6[0], callback_system_strtoull);
- base = entry(base, "rss", NUMBER2, &values6[1], callback_system_strtoull);
- base = entry(base, "rss_huge", NUMBER3, &values6[2], callback_system_strtoull);
- base = entry(base, "mapped_file", NUMBER4, &values6[3], callback_system_strtoull);
- base = entry(base, "writeback", NUMBER5, &values6[4], callback_system_strtoull);
- base = entry(base, "dirty", NUMBER6, &values6[5], callback_system_strtoull);
- base = entry(base, "swap", NUMBER7, &values6[6], callback_system_strtoull);
- base = entry(base, "pgpgin", NUMBER8, &values6[7], callback_system_strtoull);
- base = entry(base, "pgpgout", NUMBER9, &values6[8], callback_system_strtoull);
- base = entry(base, "pgfault", NUMBER10, &values6[9], callback_system_strtoull);
- base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback_system_strtoull);
- }
-
- begin(base);
-
- int i;
- for(i = 0; strings[i] ; i++) {
- if(check(base, strings[i]))
- break;
- }
-}
-
-void test7() {
-
- static struct base *base = NULL;
-
- if(unlikely(!base)) {
- base = entry(base, "cache", NUMBER1, &values6[0], callback);
- base = entry(base, "rss", NUMBER2, &values6[1], callback);
- base = entry(base, "rss_huge", NUMBER3, &values6[2], callback);
- base = entry(base, "mapped_file", NUMBER4, &values6[3], callback);
- base = entry(base, "writeback", NUMBER5, &values6[4], callback);
- base = entry(base, "dirty", NUMBER6, &values6[5], callback);
- base = entry(base, "swap", NUMBER7, &values6[6], callback);
- base = entry(base, "pgpgin", NUMBER8, &values6[7], callback);
- base = entry(base, "pgpgout", NUMBER9, &values6[8], callback);
- base = entry(base, "pgfault", NUMBER10, &values6[9], callback);
- base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback);
- }
-
- begin(base);
-
- int i;
- for(i = 0; strings[i] ; i++) {
- if(check(base, strings[i]))
- break;
- }
-}
-
-// ----------------------------------------------------------------------------
-
-
-// ==============
-// --- Poor man cycle counting.
-static unsigned long tsc;
-
-static void begin_tsc(void)
-{
- unsigned long a, d;
- asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx");
- tsc = ((unsigned long)d << 32) | (unsigned long)a;
-}
-
-static unsigned long end_tsc(void)
-{
- unsigned long a, d;
- asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx");
- return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
-}
-// ===============
-
-static unsigned long long clk;
-
-static void begin_clock() {
- struct timeval tv;
- if(unlikely(gettimeofday(&tv, NULL) == -1))
- return;
- clk = tv.tv_sec * 1000000 + tv.tv_usec;
-}
-
-static unsigned long long end_clock() {
- struct timeval tv;
- if(unlikely(gettimeofday(&tv, NULL) == -1))
- return -1;
- return clk = tv.tv_sec * 1000000 + tv.tv_usec - clk;
-}
-
-void main(void)
-{
- cache_hash = simple_hash("cache");
- rss_hash = simple_hash("rss");
- rss_huge_hash = simple_hash("rss_huge");
- mapped_file_hash = simple_hash("mapped_file");
- writeback_hash = simple_hash("writeback");
- dirty_hash = simple_hash("dirty");
- swap_hash = simple_hash("swap");
- pgpgin_hash = simple_hash("pgpgin");
- pgpgout_hash = simple_hash("pgpgout");
- pgfault_hash = simple_hash("pgfault");
- pgmajfault_hash = simple_hash("pgmajfault");
- inactive_anon_hash = simple_hash("inactive_anon");
- active_anon_hash = simple_hash("active_anon");
- inactive_file_hash = simple_hash("inactive_file");
- active_file_hash = simple_hash("active_file");
- unevictable_hash = simple_hash("unevictable");
- hierarchical_memory_limit_hash = simple_hash("hierarchical_memory_limit");
- total_cache_hash = simple_hash("total_cache");
- total_rss_hash = simple_hash("total_rss");
- total_rss_huge_hash = simple_hash("total_rss_huge");
- total_mapped_file_hash = simple_hash("total_mapped_file");
- total_writeback_hash = simple_hash("total_writeback");
- total_dirty_hash = simple_hash("total_dirty");
- total_swap_hash = simple_hash("total_swap");
- total_pgpgin_hash = simple_hash("total_pgpgin");
- total_pgpgout_hash = simple_hash("total_pgpgout");
- total_pgfault_hash = simple_hash("total_pgfault");
- total_pgmajfault_hash = simple_hash("total_pgmajfault");
- total_inactive_anon_hash = simple_hash("total_inactive_anon");
- total_active_anon_hash = simple_hash("total_active_anon");
- total_inactive_file_hash = simple_hash("total_inactive_file");
- total_active_file_hash = simple_hash("total_active_file");
- total_unevictable_hash = simple_hash("total_unevictable");
-
- // cache functions
- (void)simple_hash2("hello world");
- (void)strcmp("1", "2");
- (void)strtoull("123", NULL, 0);
-
- unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0, c7;
- unsigned long max = 1000000;
-
- // let the processor get up to speed
- begin_clock();
- for(i = 0; i <= max ;i++) test1();
- c1 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test1();
- c1 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test2();
- c2 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test3();
- c3 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test4();
- c4 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test5();
- c5 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test6();
- c6 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test7();
- c7 = end_clock();
-
- for(i = 0; i < 11 ; i++)
- printf("value %lu: %llu %llu %llu %llu %llu %llu\n", i, values1[i], values2[i], values3[i], values4[i], values5[i], values6[i]);
-
- printf("\n\nRESULTS\n");
- printf("test1() in %lu usecs: if-else-if-else-if, simple strcmp() with system strtoull().\n"
- "test2() in %lu usecs: inline simple_hash() if-else-if-else-if, with system strtoull().\n"
- "test3() in %lu usecs: statement expression simple_hash(), system strtoull().\n"
- "test4() in %lu usecs: inline simple_hash(), if-continue checks, system strtoull().\n"
- "test5() in %lu usecs: inline simple_hash(), if-else-if-else-if, custom strtoull() (netdata default prior to ARL).\n"
- "test6() in %lu usecs: adaptive re-sortable list, system strtoull() (wow!)\n"
- "test7() in %lu usecs: adaptive re-sortable list, custom strtoull() (wow!)\n"
- , c1
- , c2
- , c3
- , c4
- , c5
- , c6
- , c7
- );
-
-}
diff --git a/tests/profile/benchmark-procfile-parser.c b/tests/profile/benchmark-procfile-parser.c
deleted file mode 100644
index 991e2dfc..00000000
--- a/tests/profile/benchmark-procfile-parser.c
+++ /dev/null
@@ -1,329 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-
-#include "config.h"
-#include "libnetdata/libnetdata.h"
-
-void netdata_cleanup_and_exit(int ret) {
- exit(ret);
-}
-
-#define PF_PREFIX "PROCFILE"
-#define PFWORDS_INCREASE_STEP 200
-#define PFLINES_INCREASE_STEP 10
-#define PROCFILE_INCREMENT_BUFFER 512
-extern size_t procfile_max_lines;
-extern size_t procfile_max_words;
-extern size_t procfile_max_allocation;
-
-
-static inline void pflines_reset(pflines *fl) {
- // debug(D_PROCFILE, PF_PREFIX ": reseting lines");
-
- fl->len = 0;
-}
-
-static inline void pflines_free(pflines *fl) {
- // debug(D_PROCFILE, PF_PREFIX ": freeing lines");
-
- freez(fl);
-}
-
-static inline void pfwords_reset(pfwords *fw) {
- // debug(D_PROCFILE, PF_PREFIX ": reseting words");
- fw->len = 0;
-}
-
-
-static inline void pfwords_add(procfile *ff, char *str) {
- // debug(D_PROCFILE, PF_PREFIX ": adding word No %d: '%s'", fw->len, str);
-
- pfwords *fw = ff->words;
- if(unlikely(fw->len == fw->size)) {
- // debug(D_PROCFILE, PF_PREFIX ": expanding words");
-
- ff->words = fw = reallocz(fw, sizeof(pfwords) + (fw->size + PFWORDS_INCREASE_STEP) * sizeof(char *));
- fw->size += PFWORDS_INCREASE_STEP;
- }
-
- fw->words[fw->len++] = str;
-}
-
-NEVERNULL
-static inline size_t *pflines_add(procfile *ff) {
- // debug(D_PROCFILE, PF_PREFIX ": adding line %d at word %d", fl->len, first_word);
-
- pflines *fl = ff->lines;
- if(unlikely(fl->len == fl->size)) {
- // debug(D_PROCFILE, PF_PREFIX ": expanding lines");
-
- ff->lines = fl = reallocz(fl, sizeof(pflines) + (fl->size + PFLINES_INCREASE_STEP) * sizeof(ffline));
- fl->size += PFLINES_INCREASE_STEP;
- }
-
- ffline *ffl = &fl->lines[fl->len++];
- ffl->words = 0;
- ffl->first = ff->words->len;
-
- return &ffl->words;
-}
-
-
-NOINLINE
-static void procfile_parser(procfile *ff) {
- // debug(D_PROCFILE, PF_PREFIX ": Parsing file '%s'", ff->filename);
-
- char *s = ff->data // our current position
- , *e = &ff->data[ff->len] // the terminating null
- , *t = ff->data; // the first character of a word (or quoted / parenthesized string)
-
- // the look up array to find our type of character
- PF_CHAR_TYPE *separators = ff->separators;
-
- char quote = 0; // the quote character - only when in quoted string
- size_t opened = 0; // counts the number of open parenthesis
-
- size_t *line_words = pflines_add(ff);
-
- while(s < e) {
- PF_CHAR_TYPE ct = separators[(unsigned char)(*s)];
-
- // this is faster than a switch()
- // read more here: http://lazarenko.me/switch/
- switch(ct) {
- case PF_CHAR_IS_SEPARATOR:
- if(!quote && !opened) {
- if (s != t) {
- // separator, but we have word before it
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- }
- t = s + 1;
- }
- // fallthrough
-
- case PF_CHAR_IS_WORD:
- s++;
- break;
-
-
- case PF_CHAR_IS_NEWLINE:
- // end of line
-
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- t = ++s;
-
- // debug(D_PROCFILE, PF_PREFIX ": ended line %d with %d words", l, ff->lines->lines[l].words);
-
- line_words = pflines_add(ff);
- break;
-
- case PF_CHAR_IS_QUOTE:
- if(unlikely(!quote && s == t)) {
- // quote opened at the beginning
- quote = *s;
- t = ++s;
- }
- else if(unlikely(quote && quote == *s)) {
- // quote closed
- quote = 0;
-
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- t = ++s;
- }
- else
- s++;
- break;
-
- case PF_CHAR_IS_OPEN:
- if(s == t) {
- opened++;
- t = ++s;
- }
- else if(opened) {
- opened++;
- s++;
- }
- else
- s++;
- break;
-
- case PF_CHAR_IS_CLOSE:
- if(opened) {
- opened--;
-
- if(!opened) {
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- t = ++s;
- }
- else
- s++;
- }
- else
- s++;
- break;
-
- default:
- fatal("Internal Error: procfile_readall() does not handle all the cases.");
- }
- }
-
- if(likely(s > t && t < e)) {
- // the last word
- if(unlikely(ff->len >= ff->size)) {
- // we are going to loose the last byte
- s = &ff->data[ff->size - 1];
- }
-
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- // t = ++s;
- }
-}
-
-
-procfile *procfile_readall1(procfile *ff) {
- // debug(D_PROCFILE, PF_PREFIX ": Reading file '%s'.", ff->filename);
-
- ff->len = 0; // zero the used size
- ssize_t r = 1; // read at least once
- while(r > 0) {
- ssize_t s = ff->len;
- ssize_t x = ff->size - s;
-
- if(unlikely(!x)) {
- debug(D_PROCFILE, PF_PREFIX ": Expanding data buffer for file '%s'.", procfile_filename(ff));
- ff = reallocz(ff, sizeof(procfile) + ff->size + PROCFILE_INCREMENT_BUFFER);
- ff->size += PROCFILE_INCREMENT_BUFFER;
- }
-
- debug(D_PROCFILE, "Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s));
- r = read(ff->fd, &ff->data[s], ff->size - s);
- if(unlikely(r == -1)) {
- if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd);
- procfile_close(ff);
- return NULL;
- }
-
- ff->len += r;
- }
-
- // debug(D_PROCFILE, "Rewinding file '%s'", ff->filename);
- if(unlikely(lseek(ff->fd, 0, SEEK_SET) == -1)) {
- if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff));
- procfile_close(ff);
- return NULL;
- }
-
- pflines_reset(ff->lines);
- pfwords_reset(ff->words);
- procfile_parser(ff);
-
- if(unlikely(procfile_adaptive_initial_allocation)) {
- if(unlikely(ff->len > procfile_max_allocation)) procfile_max_allocation = ff->len;
- if(unlikely(ff->lines->len > procfile_max_lines)) procfile_max_lines = ff->lines->len;
- if(unlikely(ff->words->len > procfile_max_words)) procfile_max_words = ff->words->len;
- }
-
- // debug(D_PROCFILE, "File '%s' updated.", ff->filename);
- return ff;
-}
-
-
-
-
-
-
-
-
-// ==============
-// --- Poor man cycle counting.
-static unsigned long tsc;
-
-void begin_tsc(void)
-{
- unsigned long a, d;
- asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx");
- tsc = ((unsigned long)d << 32) | (unsigned long)a;
-}
-
-unsigned long end_tsc(void)
-{
- unsigned long a, d;
- asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx");
- return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
-}
-// ==============
-
-
-unsigned long test_netdata_internal(void) {
- static procfile *ff = NULL;
-
- ff = procfile_reopen(ff, "/proc/self/status", " \t:,-()/", PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(!ff) {
- fprintf(stderr, "Failed to open filename\n");
- exit(1);
- }
-
- begin_tsc();
- ff = procfile_readall(ff);
- unsigned long c = end_tsc();
-
- if(!ff) {
- fprintf(stderr, "Failed to read filename\n");
- exit(1);
- }
-
- return c;
-}
-
-unsigned long test_method1(void) {
- static procfile *ff = NULL;
-
- ff = procfile_reopen(ff, "/proc/self/status", " \t:,-()/", PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(!ff) {
- fprintf(stderr, "Failed to open filename\n");
- exit(1);
- }
-
- begin_tsc();
- ff = procfile_readall1(ff);
- unsigned long c = end_tsc();
-
- if(!ff) {
- fprintf(stderr, "Failed to read filename\n");
- exit(1);
- }
-
- return c;
-}
-
-//--- Test
-int main(int argc, char **argv)
-{
- (void)argc; (void)argv;
-
- int i, max = 1000000;
-
- unsigned long c1 = 0;
- test_netdata_internal();
- for(i = 0; i < max ; i++)
- c1 += test_netdata_internal();
-
- unsigned long c2 = 0;
- test_method1();
- for(i = 0; i < max ; i++)
- c2 += test_method1();
-
- printf("netdata internal: completed in %lu cycles, %lu cycles per read, %0.2f %%.\n", c1, c1 / max, (float)c1 * 100.0 / (float)c1);
- printf("method1 : completed in %lu cycles, %lu cycles per read, %0.2f %%.\n", c2, c2 / max, (float)c2 * 100.0 / (float)c1);
-
- return 0;
-}
diff --git a/tests/profile/benchmark-registry.c b/tests/profile/benchmark-registry.c
deleted file mode 100644
index cfed6d7c..00000000
--- a/tests/profile/benchmark-registry.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-
-/*
- * compile with
- * gcc -O1 -ggdb -Wall -Wextra -I ../src/ -I ../ -o benchmark-registry benchmark-registry.c ../src/dictionary.o ../src/log.o ../src/avl.o ../src/common.o ../src/appconfig.o ../src/web_buffer.o ../src/storage_number.o ../src/rrd.o ../src/health.o -pthread -luuid -lm -DHAVE_CONFIG_H -DVARLIB_DIR="\"/tmp\""
- */
-
-char *hostname = "me";
-
-#include "../src/registry.c"
-
-void netdata_cleanup_and_exit(int ret) { exit(ret); }
-
-// ----------------------------------------------------------------------------
-// TESTS
-
-int test1(int argc, char **argv) {
-
- void print_stats(uint32_t requests, unsigned long long start, unsigned long long end) {
- fprintf(stderr, " > SPEED: %u requests served in %0.2f seconds ( >>> %llu per second <<< )\n",
- requests, (end-start) / 1000000.0, (unsigned long long)requests * 1000000ULL / (end-start));
-
- fprintf(stderr, " > DB : persons %llu, machines %llu, unique URLs %llu, accesses %llu, URLs: for persons %llu, for machines %llu\n",
- registry.persons_count, registry.machines_count, registry.urls_count, registry.usages_count,
- registry.persons_urls_count, registry.machines_urls_count);
- }
-
- (void) argc;
- (void) argv;
-
- uint32_t u, users = 1000000;
- uint32_t m, machines = 200000;
- uint32_t machines2 = machines * 2;
-
- char **users_guids = malloc(users * sizeof(char *));
- char **machines_guids = malloc(machines2 * sizeof(char *));
- char **machines_urls = malloc(machines2 * sizeof(char *));
- unsigned long long start;
-
- registry_init();
-
- fprintf(stderr, "Generating %u machine guids\n", machines2);
- for(m = 0; m < machines2 ;m++) {
- uuid_t uuid;
- machines_guids[m] = malloc(36+1);
- uuid_generate(uuid);
- uuid_unparse(uuid, machines_guids[m]);
-
- char buf[FILENAME_MAX + 1];
- snprintfz(buf, FILENAME_MAX, "http://%u.netdata.rocks/", m+1);
- machines_urls[m] = strdup(buf);
-
- // fprintf(stderr, "\tmachine %u: '%s', url: '%s'\n", m + 1, machines_guids[m], machines_urls[m]);
- }
-
- start = timems();
- fprintf(stderr, "\nGenerating %u users accessing %u machines\n", users, machines);
- m = 0;
- time_t now = time(NULL);
- for(u = 0; u < users ; u++) {
- if(++m == machines) m = 0;
-
- PERSON *p = registry_request_access(NULL, machines_guids[m], machines_urls[m], "test", now);
- users_guids[u] = p->guid;
- }
- print_stats(u, start, timems());
-
- start = timems();
- fprintf(stderr, "\nAll %u users accessing again the same %u servers\n", users, machines);
- m = 0;
- now = time(NULL);
- for(u = 0; u < users ; u++) {
- if(++m == machines) m = 0;
-
- PERSON *p = registry_request_access(users_guids[u], machines_guids[m], machines_urls[m], "test", now);
-
- if(p->guid != users_guids[u])
- fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[u], p->guid);
- }
- print_stats(u, start, timems());
-
- start = timems();
- fprintf(stderr, "\nAll %u users accessing a new server, out of the %u servers\n", users, machines);
- m = 1;
- now = time(NULL);
- for(u = 0; u < users ; u++) {
- if(++m == machines) m = 0;
-
- PERSON *p = registry_request_access(users_guids[u], machines_guids[m], machines_urls[m], "test", now);
-
- if(p->guid != users_guids[u])
- fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[u], p->guid);
- }
- print_stats(u, start, timems());
-
- start = timems();
- fprintf(stderr, "\n%u random users accessing a random server, out of the %u servers\n", users, machines);
- now = time(NULL);
- for(u = 0; u < users ; u++) {
- uint32_t tu = random() * users / RAND_MAX;
- uint32_t tm = random() * machines / RAND_MAX;
-
- PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], machines_urls[tm], "test", now);
-
- if(p->guid != users_guids[tu])
- fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid);
- }
- print_stats(u, start, timems());
-
- start = timems();
- fprintf(stderr, "\n%u random users accessing a random server, out of %u servers\n", users, machines2);
- now = time(NULL);
- for(u = 0; u < users ; u++) {
- uint32_t tu = random() * users / RAND_MAX;
- uint32_t tm = random() * machines2 / RAND_MAX;
-
- PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], machines_urls[tm], "test", now);
-
- if(p->guid != users_guids[tu])
- fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid);
- }
- print_stats(u, start, timems());
-
- for(m = 0; m < 10; m++) {
- start = timems();
- fprintf(stderr,
- "\n%u random user accesses to a random server, out of %u servers,\n > using 1/10000 with a random url, 1/1000 with a mismatched url\n",
- users * 2, machines2);
- now = time(NULL);
- for (u = 0; u < users * 2; u++) {
- uint32_t tu = random() * users / RAND_MAX;
- uint32_t tm = random() * machines2 / RAND_MAX;
-
- char *url = machines_urls[tm];
- char buf[FILENAME_MAX + 1];
- if (random() % 10000 == 1234) {
- snprintfz(buf, FILENAME_MAX, "http://random.%ld.netdata.rocks/", random());
- url = buf;
- }
- else if (random() % 1000 == 123)
- url = machines_urls[random() * machines2 / RAND_MAX];
-
- PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], url, "test", now);
-
- if (p->guid != users_guids[tu])
- fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid);
- }
- print_stats(u, start, timems());
- }
-
- fprintf(stderr, "\n\nSAVE\n");
- start = timems();
- registry_save();
- print_stats(registry.persons_count, start, timems());
-
- fprintf(stderr, "\n\nCLEANUP\n");
- start = timems();
- registry_free();
- print_stats(registry.persons_count, start, timems());
- return 0;
-}
-
-// ----------------------------------------------------------------------------
-// TESTING
-
-int main(int argc, char **argv) {
- config_set_boolean("registry", "enabled", 1);
-
- //debug_flags = 0xFFFFFFFF;
- test1(argc, argv);
- exit(0);
-
- (void)argc;
- (void)argv;
-
-
- PERSON *p1, *p2;
-
- fprintf(stderr, "\n\nINITIALIZATION\n");
-
- registry_init();
-
- int i = 2;
-
- fprintf(stderr, "\n\nADDING ENTRY\n");
- p1 = registry_request_access("2c95abd0-1542-11e6-8c66-00508db7e9c9", "7c173980-145c-11e6-b86f-00508db7e9c1", "http://localhost:19999/", "test", time(NULL));
-
- if(0)
- while(i--) {
-#ifdef REGISTRY_STDOUT_DUMP
- fprintf(stderr, "\n\nADDING ENTRY\n");
-#endif /* REGISTRY_STDOUT_DUMP */
- p1 = registry_request_access(NULL, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://localhost:19999/", "test", time(NULL));
-
-#ifdef REGISTRY_STDOUT_DUMP
- fprintf(stderr, "\n\nADDING ANOTHER URL\n");
-#endif /* REGISTRY_STDOUT_DUMP */
- p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://127.0.0.1:19999/", "test", time(NULL));
-
-#ifdef REGISTRY_STDOUT_DUMP
- fprintf(stderr, "\n\nADDING ANOTHER URL\n");
-#endif /* REGISTRY_STDOUT_DUMP */
- p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://my.server:19999/", "test", time(NULL));
-
-#ifdef REGISTRY_STDOUT_DUMP
- fprintf(stderr, "\n\nADDING ANOTHER MACHINE\n");
-#endif /* REGISTRY_STDOUT_DUMP */
- p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://my.server:19999/", "test", time(NULL));
-
-#ifdef REGISTRY_STDOUT_DUMP
- fprintf(stderr, "\n\nADDING ANOTHER PERSON\n");
-#endif /* REGISTRY_STDOUT_DUMP */
- p2 = registry_request_access(NULL, "7c173980-145c-11e6-b86f-00508db7e9c3", "http://localhost:19999/", "test", time(NULL));
-
-#ifdef REGISTRY_STDOUT_DUMP
- fprintf(stderr, "\n\nADDING ANOTHER MACHINE\n");
-#endif /* REGISTRY_STDOUT_DUMP */
- p2 = registry_request_access(p2->guid, "7c173980-145c-11e6-b86f-00508db7e9c3", "http://localhost:19999/", "test", time(NULL));
- }
-
- fprintf(stderr, "\n\nSAVE\n");
- registry_save();
-
- fprintf(stderr, "\n\nCLEANUP\n");
- registry_free();
- return 0;
-}
diff --git a/tests/profile/benchmark-value-pairs.c b/tests/profile/benchmark-value-pairs.c
deleted file mode 100644
index ae4f53c3..00000000
--- a/tests/profile/benchmark-value-pairs.c
+++ /dev/null
@@ -1,623 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-
-#include "config.h"
-#include "libnetdata/libnetdata.h"
-
-#ifdef simple_hash
-#undef simple_hash
-#endif
-
-void netdata_cleanup_and_exit(int ret) {
- exit(ret);
-}
-
-#define simple_hash(name) ({ \
- register unsigned char *__hash_source = (unsigned char *)(name); \
- register uint32_t __hash_value = 0x811c9dc5; \
- while (*__hash_source) { \
- __hash_value *= 16777619; \
- __hash_value ^= (uint32_t) *__hash_source++; \
- } \
- __hash_value; \
-})
-
-static inline uint32_t simple_hash2(const char *name) {
- register unsigned char *s = (unsigned char *)name;
- register uint32_t hval = 0x811c9dc5;
- while (*s) {
- hval *= 16777619;
- hval ^= (uint32_t) *s++;
- }
- return hval;
-}
-
-static inline unsigned long long fast_strtoull(const char *s) {
- register unsigned long long n = 0;
- register char c;
- for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
- n *= 10;
- n += c - '0';
- // n = (n << 1) + (n << 3) + (c - '0');
- }
- return n;
-}
-
-static uint32_t cache_hash = 0;
-static uint32_t rss_hash = 0;
-static uint32_t rss_huge_hash = 0;
-static uint32_t mapped_file_hash = 0;
-static uint32_t writeback_hash = 0;
-static uint32_t dirty_hash = 0;
-static uint32_t swap_hash = 0;
-static uint32_t pgpgin_hash = 0;
-static uint32_t pgpgout_hash = 0;
-static uint32_t pgfault_hash = 0;
-static uint32_t pgmajfault_hash = 0;
-static uint32_t inactive_anon_hash = 0;
-static uint32_t active_anon_hash = 0;
-static uint32_t inactive_file_hash = 0;
-static uint32_t active_file_hash = 0;
-static uint32_t unevictable_hash = 0;
-static uint32_t hierarchical_memory_limit_hash = 0;
-static uint32_t total_cache_hash = 0;
-static uint32_t total_rss_hash = 0;
-static uint32_t total_rss_huge_hash = 0;
-static uint32_t total_mapped_file_hash = 0;
-static uint32_t total_writeback_hash = 0;
-static uint32_t total_dirty_hash = 0;
-static uint32_t total_swap_hash = 0;
-static uint32_t total_pgpgin_hash = 0;
-static uint32_t total_pgpgout_hash = 0;
-static uint32_t total_pgfault_hash = 0;
-static uint32_t total_pgmajfault_hash = 0;
-static uint32_t total_inactive_anon_hash = 0;
-static uint32_t total_active_anon_hash = 0;
-static uint32_t total_inactive_file_hash = 0;
-static uint32_t total_active_file_hash = 0;
-static uint32_t total_unevictable_hash = 0;
-
-unsigned long long values1[50] = { 0 };
-unsigned long long values2[50] = { 0 };
-unsigned long long values3[50] = { 0 };
-unsigned long long values4[50] = { 0 };
-unsigned long long values5[50] = { 0 };
-unsigned long long values6[50] = { 0 };
-unsigned long long values7[50] = { 0 };
-unsigned long long values8[50] = { 0 };
-unsigned long long values9[50] = { 0 };
-
-struct pair {
- const char *name;
- const char *value;
- uint32_t hash;
- unsigned long long *collected8;
- unsigned long long *collected9;
-} pairs[] = {
- { "cache", "12345678901234", 0, &values8[0] ,&values9[0] },
- { "rss", "23456789012345", 0, &values8[1] ,&values9[1] },
- { "rss_huge", "34567890123456", 0, &values8[2] ,&values9[2] },
- { "mapped_file", "45678901234567", 0, &values8[3] ,&values9[3] },
- { "writeback", "56789012345678", 0, &values8[4] ,&values9[4] },
- { "dirty", "67890123456789", 0, &values8[5] ,&values9[5] },
- { "swap", "78901234567890", 0, &values8[6] ,&values9[6] },
- { "pgpgin", "89012345678901", 0, &values8[7] ,&values9[7] },
- { "pgpgout", "90123456789012", 0, &values8[8] ,&values9[8] },
- { "pgfault", "10345678901234", 0, &values8[9] ,&values9[9] },
- { "pgmajfault", "11456789012345", 0, &values8[10] ,&values9[10] },
- { "inactive_anon", "12000000000000", 0, &values8[11] ,&values9[11] },
- { "active_anon", "13345678901234", 0, &values8[12] ,&values9[12] },
- { "inactive_file", "14345678901234", 0, &values8[13] ,&values9[13] },
- { "active_file", "15345678901234", 0, &values8[14] ,&values9[14] },
- { "unevictable", "16345678901234", 0, &values8[15] ,&values9[15] },
- { "hierarchical_memory_limit", "17345678901234", 0, &values8[16] ,&values9[16] },
- { "total_cache", "18345678901234", 0, &values8[17] ,&values9[17] },
- { "total_rss", "19345678901234", 0, &values8[18] ,&values9[18] },
- { "total_rss_huge", "20345678901234", 0, &values8[19] ,&values9[19] },
- { "total_mapped_file", "21345678901234", 0, &values8[20] ,&values9[20] },
- { "total_writeback", "22345678901234", 0, &values8[21] ,&values9[21] },
- { "total_dirty", "23000000000000", 0, &values8[22] ,&values9[22] },
- { "total_swap", "24345678901234", 0, &values8[23] ,&values9[23] },
- { "total_pgpgin", "25345678901234", 0, &values8[24] ,&values9[24] },
- { "total_pgpgout", "26345678901234", 0, &values8[25] ,&values9[25] },
- { "total_pgfault", "27345678901234", 0, &values8[26] ,&values9[26] },
- { "total_pgmajfault", "28345678901234", 0, &values8[27] ,&values9[27] },
- { "total_inactive_anon", "29345678901234", 0, &values8[28] ,&values9[28] },
- { "total_active_anon", "30345678901234", 0, &values8[29] ,&values9[29] },
- { "total_inactive_file", "31345678901234", 0, &values8[30] ,&values9[30] },
- { "total_active_file", "32345678901234", 0, &values8[31] ,&values9[31] },
- { "total_unevictable", "33345678901234", 0, &values8[32] ,&values9[32] },
- { NULL, NULL , 0, NULL ,NULL }
-};
-
-// simple system strcmp()
-void test1() {
- int i;
- for(i = 0; pairs[i].name ; i++) {
- const char *s = pairs[i].name;
- const char *v = pairs[i].value;
-
- if(unlikely(!strcmp(s, "cache")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "rss")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "rss_huge")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "mapped_file")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "writeback")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "dirty")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "swap")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgpgin")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgpgout")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgfault")))
- values1[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(!strcmp(s, "pgmajfault")))
- values1[i] = strtoull(v, NULL, 10);
- }
-}
-
-// inline simple_hash() with system strtoull()
-void test2() {
- int i;
- for(i = 0; pairs[i].name ; i++) {
- const char *s = pairs[i].name;
- const char *v = pairs[i].value;
-
- uint32_t hash = simple_hash2(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
- values2[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
- values2[i] = strtoull(v, NULL, 10);
- }
-}
-
-// statement expression simple_hash(), system strtoull()
-void test3() {
- int i;
- for(i = 0; pairs[i].name ; i++) {
- const char *s = pairs[i].name;
- const char *v = pairs[i].value;
-
- uint32_t hash = simple_hash(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
- values3[i] = strtoull(v, NULL, 10);
-
- else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
- values3[i] = strtoull(v, NULL, 10);
- }
-}
-
-
-// inline simple_hash(), if-continue checks
-void test4() {
- int i;
- for(i = 0; pairs[i].name ; i++) {
- const char *s = pairs[i].name;
- const char *v = pairs[i].value;
-
- uint32_t hash = simple_hash2(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
-
- if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) {
- values4[i] = strtoull(v, NULL, 0);
- continue;
- }
- }
-}
-
-// inline simple_hash(), if-else-if-else-if (netdata default)
-void test5() {
- int i;
- for(i = 0; pairs[i].name ; i++) {
- const char *s = pairs[i].name;
- const char *v = pairs[i].value;
-
- uint32_t hash = simple_hash2(s);
-
- if(unlikely(hash == cache_hash && !strcmp(s, "cache")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == rss_hash && !strcmp(s, "rss")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == swap_hash && !strcmp(s, "swap")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault")))
- values5[i] = fast_strtoull(v);
-
- else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault")))
- values5[i] = fast_strtoull(v);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-void arl_strtoull(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name;
- (void)hash;
-
- register unsigned long long *d = dst;
- *d = strtoull(value, NULL, 10);
- // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d);
-}
-
-void test6() {
- static ARL_BASE *base = NULL;
-
- if(unlikely(!base)) {
- base = arl_create("test6", arl_strtoull, 60);
- arl_expect_custom(base, "cache", NULL, &values6[0]);
- arl_expect_custom(base, "rss", NULL, &values6[1]);
- arl_expect_custom(base, "rss_huge", NULL, &values6[2]);
- arl_expect_custom(base, "mapped_file", NULL, &values6[3]);
- arl_expect_custom(base, "writeback", NULL, &values6[4]);
- arl_expect_custom(base, "dirty", NULL, &values6[5]);
- arl_expect_custom(base, "swap", NULL, &values6[6]);
- arl_expect_custom(base, "pgpgin", NULL, &values6[7]);
- arl_expect_custom(base, "pgpgout", NULL, &values6[8]);
- arl_expect_custom(base, "pgfault", NULL, &values6[9]);
- arl_expect_custom(base, "pgmajfault", NULL, &values6[10]);
- }
-
- arl_begin(base);
-
- int i;
- for(i = 0; pairs[i].name ; i++)
- if(arl_check(base, pairs[i].name, pairs[i].value)) break;
-}
-
-void arl_str2ull(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name;
- (void)hash;
-
- register unsigned long long *d = dst;
- *d = str2ull(value);
- // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d);
-}
-
-void test7() {
- static ARL_BASE *base = NULL;
-
- if(unlikely(!base)) {
- base = arl_create("test7", arl_str2ull, 60);
- arl_expect_custom(base, "cache", NULL, &values7[0]);
- arl_expect_custom(base, "rss", NULL, &values7[1]);
- arl_expect_custom(base, "rss_huge", NULL, &values7[2]);
- arl_expect_custom(base, "mapped_file", NULL, &values7[3]);
- arl_expect_custom(base, "writeback", NULL, &values7[4]);
- arl_expect_custom(base, "dirty", NULL, &values7[5]);
- arl_expect_custom(base, "swap", NULL, &values7[6]);
- arl_expect_custom(base, "pgpgin", NULL, &values7[7]);
- arl_expect_custom(base, "pgpgout", NULL, &values7[8]);
- arl_expect_custom(base, "pgfault", NULL, &values7[9]);
- arl_expect_custom(base, "pgmajfault", NULL, &values7[10]);
- }
-
- arl_begin(base);
-
- int i;
- for(i = 0; pairs[i].name ; i++)
- if(arl_check(base, pairs[i].name, pairs[i].value)) break;
-}
-
-void test8() {
- int i;
- for(i = 0; pairs[i].name; i++) {
- uint32_t hash = simple_hash(pairs[i].name);
-
- int j;
- for(j = 0; pairs[j].name; j++) {
- if(hash == pairs[j].hash && !strcmp(pairs[i].name, pairs[j].name)) {
- *pairs[j].collected8 = strtoull(pairs[i].value, NULL, 10);
- break;
- }
- }
- }
-}
-
-void test9() {
- int i;
- for(i = 0; pairs[i].name; i++) {
- uint32_t hash = simple_hash(pairs[i].name);
-
- int j;
- for(j = 0; pairs[j].name; j++) {
- if(hash == pairs[j].hash && !strcmp(pairs[i].name, pairs[j].name)) {
- *pairs[j].collected9 = str2ull(pairs[i].value);
- break;
- }
- }
- }
-}
-
-// ----------------------------------------------------------------------------
-
-/*
-// ==============
-// --- Poor man cycle counting.
-static unsigned long tsc;
-
-static void begin_tsc(void)
-{
- unsigned long a, d;
- asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx");
- tsc = ((unsigned long)d << 32) | (unsigned long)a;
-}
-
-static unsigned long end_tsc(void)
-{
- unsigned long a, d;
- asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx");
- return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
-}
-// ===============
-*/
-
-static unsigned long long clk;
-
-static void begin_clock() {
- struct timeval tv;
- if(unlikely(gettimeofday(&tv, NULL) == -1))
- return;
- clk = tv.tv_sec * 1000000 + tv.tv_usec;
-}
-
-static unsigned long long end_clock() {
- struct timeval tv;
- if(unlikely(gettimeofday(&tv, NULL) == -1))
- return -1;
- return clk = tv.tv_sec * 1000000 + tv.tv_usec - clk;
-}
-
-int main(void)
-{
- {
- int i;
- for(i = 0; pairs[i].name; i++)
- pairs[i].hash = simple_hash(pairs[i].name);
- }
-
- cache_hash = simple_hash("cache");
- rss_hash = simple_hash("rss");
- rss_huge_hash = simple_hash("rss_huge");
- mapped_file_hash = simple_hash("mapped_file");
- writeback_hash = simple_hash("writeback");
- dirty_hash = simple_hash("dirty");
- swap_hash = simple_hash("swap");
- pgpgin_hash = simple_hash("pgpgin");
- pgpgout_hash = simple_hash("pgpgout");
- pgfault_hash = simple_hash("pgfault");
- pgmajfault_hash = simple_hash("pgmajfault");
- inactive_anon_hash = simple_hash("inactive_anon");
- active_anon_hash = simple_hash("active_anon");
- inactive_file_hash = simple_hash("inactive_file");
- active_file_hash = simple_hash("active_file");
- unevictable_hash = simple_hash("unevictable");
- hierarchical_memory_limit_hash = simple_hash("hierarchical_memory_limit");
- total_cache_hash = simple_hash("total_cache");
- total_rss_hash = simple_hash("total_rss");
- total_rss_huge_hash = simple_hash("total_rss_huge");
- total_mapped_file_hash = simple_hash("total_mapped_file");
- total_writeback_hash = simple_hash("total_writeback");
- total_dirty_hash = simple_hash("total_dirty");
- total_swap_hash = simple_hash("total_swap");
- total_pgpgin_hash = simple_hash("total_pgpgin");
- total_pgpgout_hash = simple_hash("total_pgpgout");
- total_pgfault_hash = simple_hash("total_pgfault");
- total_pgmajfault_hash = simple_hash("total_pgmajfault");
- total_inactive_anon_hash = simple_hash("total_inactive_anon");
- total_active_anon_hash = simple_hash("total_active_anon");
- total_inactive_file_hash = simple_hash("total_inactive_file");
- total_active_file_hash = simple_hash("total_active_file");
- total_unevictable_hash = simple_hash("total_unevictable");
-
- // cache functions
- (void)simple_hash2("hello world");
- (void)strcmp("1", "2");
- (void)strtoull("123", NULL, 0);
-
- unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0, c7 = 0, c8 = 0, c9 = 0;
- unsigned long max = 1000000;
-
- begin_clock();
- for(i = 0; i <= max ;i++) test1();
- c1 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test2();
- c2 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test3();
- c3 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test4();
- c4 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test5();
- c5 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test6();
- c6 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test7();
- c7 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test8();
- c8 = end_clock();
-
- begin_clock();
- for(i = 0; i <= max ;i++) test9();
- c9 = end_clock();
-
- for(i = 0; i < 11 ; i++)
- printf("value %lu: %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", i, values1[i], values2[i], values3[i], values4[i], values5[i], values6[i], values7[i], values8[i], values9[i]);
-
- printf("\n\nRESULTS\n");
- printf("test1() [1] in %lu usecs: simple system strcmp().\n"
- "test2() [4] in %lu usecs: inline simple_hash() with system strtoull().\n"
- "test3() [5] in %lu usecs: statement expression simple_hash(), system strtoull().\n"
- "test4() [6] in %lu usecs: inline simple_hash(), if-continue checks.\n"
- "test5() [7] in %lu usecs: inline simple_hash(), if-else-if-else-if (netdata default prior to ARL).\n"
- "test6() [8] in %lu usecs: adaptive re-sortable array with strtoull() (wow!)\n"
- "test7() [9] in %lu usecs: adaptive re-sortable array with str2ull() (wow!)\n"
- "test8() [2] in %lu usecs: nested loop with strtoull()\n"
- "test9() [3] in %lu usecs: nested loop with str2ull()\n"
- , c1
- , c2
- , c3
- , c4
- , c5
- , c6
- , c7
- , c8
- , c9
- );
-
- return 0;
-}
diff --git a/tests/profile/statsd-stress.c b/tests/profile/statsd-stress.c
deleted file mode 100644
index 435d58d5..00000000
--- a/tests/profile/statsd-stress.c
+++ /dev/null
@@ -1,151 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-#include <stdlib.h>
-#include <arpa/inet.h>
-#include <netinet/in.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <unistd.h>
-#include <string.h>
-#include <time.h>
-#include <pthread.h>
-
-void diep(char *s)
-{
- perror(s);
- exit(1);
-}
-
-size_t run_threads = 1;
-size_t metrics = 1024;
-
-#define SERVER_IP "127.0.0.1"
-#define PORT 8125
-
-size_t myrand(size_t max) {
- size_t loops = max / RAND_MAX;
- size_t i;
-
- size_t ret = rand();
- for(i = 0; i < loops ;i++)
- ret += rand();
-
- return ret % max;
-}
-
-struct thread_data {
- size_t id;
- struct sockaddr_in *si_other;
- int slen;
- size_t counter;
-};
-
-static void *report_thread(void *__data) {
- struct thread_data *data = (struct thread_data *)__data;
-
- size_t last = 0;
- for (;;) {
- size_t i;
- size_t total = 0;
- for(i = 0; i < run_threads ;i++)
- total += data[i].counter;
-
- printf("%zu metrics/s\n", total-last);
- last = total;
-
- sleep(1);
- printf("\033[F\033[J");
- }
-
- return NULL;
-}
-
-char *types[] = {"g", "c", "m", "ms", "h", "s", NULL};
-// char *types[] = {"g", "c", "C", "h", "ms", NULL}; // brubeck compatible
-
-static void *spam_thread(void *__data) {
- struct thread_data *data = (struct thread_data *)__data;
-
- int s;
- char packet[1024];
-
- if ((s = socket(AF_INET, SOCK_DGRAM, 0))==-1)
- diep("socket");
-
- char **packets = malloc(sizeof(char *) * metrics);
- size_t i, *lengths = malloc(sizeof(size_t) * metrics);
- size_t t;
-
- for(i = 0, t = 0; i < metrics ;i++, t++) {
- if(!types[t]) t = 0;
- char *type = types[t];
-
- lengths[i] = sprintf(packet, "stress.%s.t%zu.m%zu:%zu|%s", type, data->id, i, myrand(metrics), type);
- packets[i] = strdup(packet);
- // printf("packet %zu, of length %zu: '%s'\n", i, lengths[i], packets[i]);
- }
- //printf("\n");
-
- for (;;) {
- for(i = 0; i < metrics ;i++) {
- if (sendto(s, packets[i], lengths[i], 0, (void *)data->si_other, data->slen) < 0) {
- printf("C ==> DROPPED\n");
- return NULL;
- }
- data->counter++;
- }
- }
-
- free(packets);
- free(lengths);
- close(s);
- return NULL;
-}
-
-int main(int argc, char *argv[])
-{
- if (argc != 5) {
- fprintf(stderr, "Usage: '%s THREADS METRICS IP PORT'\n", argv[0]);
- exit(-1);
- }
-
- run_threads = atoi(argv[1]);
- metrics = atoi(argv[2]);
- char *ip = argv[3];
- int port = atoi(argv[4]);
-
- struct thread_data data[run_threads];
- struct sockaddr_in si_other;
- pthread_t threads[run_threads], report;
- size_t i;
-
- srand(time(NULL));
-
- memset(&si_other, 0, sizeof(si_other));
- si_other.sin_family = AF_INET;
- si_other.sin_port = htons(port);
- if (inet_aton(ip, &si_other.sin_addr)==0) {
- fprintf(stderr, "inet_aton() of ip '%s' failed\n", ip);
- exit(1);
- }
-
- for (i = 0; i < run_threads; ++i) {
- data[i].id = i;
- data[i].si_other = &si_other;
- data[i].slen = sizeof(si_other);
- data[i].counter = 0;
- pthread_create(&threads[i], NULL, spam_thread, &data[i]);
- }
-
- printf("\n");
- printf("THREADS : %zu\n", run_threads);
- printf("METRICS : %zu\n", metrics);
- printf("DESTINATION : %s:%d\n", ip, port);
- printf("\n");
- pthread_create(&report, NULL, report_thread, &data);
-
- for (i =0; i < run_threads; ++i)
- pthread_join(threads[i], NULL);
-
- return 0;
-}
diff --git a/tests/profile/test-eval.c b/tests/profile/test-eval.c
deleted file mode 100644
index 144381cf..00000000
--- a/tests/profile/test-eval.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-
-/*
- * 1. build netdata (as normally)
- * 2. cd profile/
- * 3. compile with:
- * gcc -O1 -ggdb -Wall -Wextra -I ../src/ -I ../ -o test-eval test-eval.c ../src/log.o ../src/eval.o ../src/common.o ../src/clocks.o ../src/web_buffer.o ../src/storage_number.o -pthread -lm
- */
-
-#include "config.h"
-#include "libnetdata/libnetdata.h"
-#include "database/rrdcalc.h"
-
-void netdata_cleanup_and_exit(int ret) { exit(ret); }
-
-/*
-void indent(int level, int show) {
- int i = level;
- while(i--) printf(" | ");
- if(show) printf(" \\_ ");
- else printf(" \\_ ");
-}
-
-void print_node(EVAL_NODE *op, int level);
-
-void print_value(EVAL_VALUE *v, int level) {
- indent(level, 0);
-
- switch(v->type) {
- case EVAL_VALUE_INVALID:
- printf("value (NOP)\n");
- break;
-
- case EVAL_VALUE_NUMBER:
- printf("value %Lf (NUMBER)\n", v->number);
- break;
-
- case EVAL_VALUE_EXPRESSION:
- printf("value (SUB-EXPRESSION)\n");
- print_node(v->expression, level+1);
- break;
-
- default:
- printf("value (INVALID type %d)\n", v->type);
- break;
-
- }
-}
-
-void print_node(EVAL_NODE *op, int level) {
-
-// if(op->operator != EVAL_OPERATOR_NOP) {
- indent(level, 1);
- if(op->operator) printf("%c (node %d, precedence: %d)\n", op->operator, op->id, op->precedence);
- else printf("NOP (node %d, precedence: %d)\n", op->id, op->precedence);
-// }
-
- int i = op->count;
- while(i--) print_value(&op->ops[i], level + 1);
-}
-
-calculated_number evaluate(EVAL_NODE *op, int depth);
-
-calculated_number evaluate_value(EVAL_VALUE *v, int depth) {
- switch(v->type) {
- case EVAL_VALUE_NUMBER:
- return v->number;
-
- case EVAL_VALUE_EXPRESSION:
- return evaluate(v->expression, depth);
-
- default:
- fatal("I don't know how to handle EVAL_VALUE type %d", v->type);
- }
-}
-
-void print_depth(int depth) {
- static int count = 0;
-
- printf("%d. ", ++count);
- while(depth--) printf(" ");
-}
-
-calculated_number evaluate(EVAL_NODE *op, int depth) {
- calculated_number n1, n2, r;
-
- switch(op->operator) {
- case EVAL_OPERATOR_SIGN_PLUS:
- r = evaluate_value(&op->ops[0], depth);
- break;
-
- case EVAL_OPERATOR_SIGN_MINUS:
- r = -evaluate_value(&op->ops[0], depth);
- break;
-
- case EVAL_OPERATOR_PLUS:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 + n2;
- print_depth(depth);
- printf("%Lf = %Lf + %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_MINUS:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 - n2;
- print_depth(depth);
- printf("%Lf = %Lf - %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_MULTIPLY:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 * n2;
- print_depth(depth);
- printf("%Lf = %Lf * %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_DIVIDE:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 / n2;
- print_depth(depth);
- printf("%Lf = %Lf / %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_NOT:
- n1 = evaluate_value(&op->ops[0], depth);
- r = !n1;
- print_depth(depth);
- printf("%Lf = NOT %Lf\n", r, n1);
- break;
-
- case EVAL_OPERATOR_AND:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 && n2;
- print_depth(depth);
- printf("%Lf = %Lf AND %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_OR:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 || n2;
- print_depth(depth);
- printf("%Lf = %Lf OR %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_GREATER_THAN_OR_EQUAL:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 >= n2;
- print_depth(depth);
- printf("%Lf = %Lf >= %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_LESS_THAN_OR_EQUAL:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 <= n2;
- print_depth(depth);
- printf("%Lf = %Lf <= %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_GREATER:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 > n2;
- print_depth(depth);
- printf("%Lf = %Lf > %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_LESS:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 < n2;
- print_depth(depth);
- printf("%Lf = %Lf < %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_NOT_EQUAL:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 != n2;
- print_depth(depth);
- printf("%Lf = %Lf <> %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_EQUAL:
- if(op->count != 2)
- fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count);
- n1 = evaluate_value(&op->ops[0], depth);
- n2 = evaluate_value(&op->ops[1], depth);
- r = n1 == n2;
- print_depth(depth);
- printf("%Lf = %Lf == %Lf\n", r, n1, n2);
- break;
-
- case EVAL_OPERATOR_EXPRESSION_OPEN:
- printf("BEGIN SUB-EXPRESSION\n");
- r = evaluate_value(&op->ops[0], depth + 1);
- printf("END SUB-EXPRESSION\n");
- break;
-
- case EVAL_OPERATOR_NOP:
- case EVAL_OPERATOR_VALUE:
- r = evaluate_value(&op->ops[0], depth);
- break;
-
- default:
- error("I don't know how to handle operator '%c'", op->operator);
- r = 0;
- break;
- }
-
- return r;
-}
-
-
-void print_expression(EVAL_NODE *op, const char *failed_at, int error) {
- if(op) {
- printf("expression tree:\n");
- print_node(op, 0);
-
- printf("\nevaluation steps:\n");
- evaluate(op, 0);
-
- int error;
- calculated_number ret = expression_evaluate(op, &error);
- printf("\ninternal evaluator:\nSTATUS: %d, RESULT = %Lf\n", error, ret);
-
- expression_free(op);
- }
- else {
- printf("error: %d, failed_at: '%s'\n", error, (failed_at)?failed_at:"<NONE>");
- }
-}
-*/
-
-int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result) {
- (void)variable;
- (void)hash;
- (void)rc;
- (void)result;
-
- return 0;
-}
-
-int main(int argc, char **argv) {
- if(argc != 2) {
- fprintf(stderr, "I need an epxression (enclose it in single-quotes (') as a single parameter)\n");
- exit(1);
- }
-
- const char *failed_at = NULL;
- int error;
-
- EVAL_EXPRESSION *exp = expression_parse(argv[1], &failed_at, &error);
- if(!exp)
- printf("\nPARSING FAILED\nExpression: '%s'\nParsing stopped at: '%s'\nParsing error code: %d (%s)\n", argv[1], (failed_at)?((*failed_at)?failed_at:"<END OF EXPRESSION>"):"<NONE>", error, expression_strerror(error));
-
- else {
- printf("\nPARSING OK\nExpression: '%s'\nParsed as : '%s'\nParsing error code: %d (%s)\n", argv[1], exp->parsed_as, error, expression_strerror(error));
-
- if(expression_evaluate(exp)) {
- printf("\nEvaluates to: %Lf\n\n", exp->result);
- }
- else {
- printf("\nEvaluation failed with code %d and message: %s\n\n", exp->error, buffer_tostring(exp->error_msg));
- }
- expression_free(exp);
- }
-
- return 0;
-}
diff --git a/tests/updater_checks.bats b/tests/updater_checks.bats
deleted file mode 100755
index 1a7eeb70..00000000
--- a/tests/updater_checks.bats
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env bats
-#
-# This script is responsible for validating
-# updater capabilities after a change
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
-#
-
-INSTALLATION="$BATS_TMPDIR/installation"
-ENV="${INSTALLATION}/netdata/etc/netdata/.environment"
-# list of files which need to be checked. Path cannot start from '/'
-FILES="usr/libexec/netdata/plugins.d/go.d.plugin
- usr/libexec/netdata/plugins.d/charts.d.plugin
- usr/libexec/netdata/plugins.d/python.d.plugin
- usr/libexec/netdata/plugins.d/node.d.plugin"
-
-DIRS="usr/sbin/netdata
- etc/netdata
- usr/share/netdata
- usr/libexec/netdata
- var/cache/netdata
- var/lib/netdata
- var/log/netdata"
-
-setup() {
- # If we are not in netdata git repo, at the top level directory, fail
- TOP_LEVEL=$(basename "$(git rev-parse --show-toplevel)")
- CWD=$(git rev-parse --show-cdup || echo "")
- if [ -n "${CWD}" ] || [ ! "${TOP_LEVEL}" == "netdata" ]; then
- echo "Run as ./tests/$(basename "$0") from top level directory of git repository"
- exit 1
- fi
-}
-
-@test "install stable netdata using kickstart" {
- kickstart_file="/tmp/kickstart.$$"
- curl -Ss -o ${kickstart_file} https://my-netdata.io/kickstart.sh
- chmod +x ${kickstart_file}
- ${kickstart_file} --dont-wait --dont-start-it --auto-update --install ${INSTALLATION}
-
- # Validate particular files
- for file in $FILES; do
- [ ! -f "$BATS_TMPDIR/$file" ]
- done
-
- # Validate particular directories
- for a_dir in $DIRS; do
- [ ! -d "$BATS_TMPDIR/$a_dir" ]
- done
-
- # Cleanup
- rm -rf ${kickstart_file}
-}
-
-@test "update netdata using the new updater" {
- export ENVIRONMENT_FILE="${ENV}"
- # Run the updater, with the override so that it uses the local repo we have at hand
- # Try to run the installed, if any, otherwise just run the one from the repo
- export NETDATA_LOCAL_TARBAL_OVERRIDE="${PWD}"
- /etc/cron.daily/netdata-updater || ./packaging/installer/netdata-updater.sh
- ! grep "new_installation" "${ENV}"
-}
-
-@test "uninstall netdata using latest uninstaller" {
- ./packaging/installer/netdata-uninstaller.sh --yes --force --env "${ENV}"
- [ ! -f "${INSTALLATION}/netdata/usr/sbin/netdata" ]
- [ ! -f "/etc/cron.daily/netdata-updater" ]
-}
diff --git a/tests/updater_checks.sh b/tests/updater_checks.sh
deleted file mode 100755
index 9c8b6fa4..00000000
--- a/tests/updater_checks.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env sh
-#
-# Wrapper script that installs the required dependencies
-# for the BATS script to run successfully
-#
-# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Author : Pavlos Emm. Katsoulakis <paul@netdata.cloud)
-#
-
-echo "Syncing/updating repository.."
-
-blind_arch_grep_install() {
- # There is a peculiar docker case with arch, where grep is not available
- # This method will have to be triggered blindly, to inject grep so that we can process
- # It starts to become a chicken-egg situation with all the distros..
- echo "* * Workaround hack * *"
- echo "Attempting blind install for archlinux case"
-
- if command -v pacman > /dev/null 2>&1; then
- echo "Executing grep installation"
- pacman -Sy
- pacman --noconfirm --needed -S grep
- fi
-}
-blind_arch_grep_install || echo "Workaround failed, proceed as usual"
-
-running_os="$(cat /etc/os-release |grep '^ID=' | cut -d'=' -f2 | sed -e 's/"//g')"
-
-case "${running_os}" in
-"centos"|"fedora")
- echo "Running on CentOS, updating YUM repository.."
- yum clean all
- yum update -y
-
- echo "Installing extra dependencies.."
- yum install -y epel-release
- yum install -y bats curl
- ;;
-"debian"|"ubuntu")
- echo "Running ${running_os}, updating APT repository"
- apt-get update -y
- apt-get install -y bats curl
- ;;
-"opensuse-leap"|"opensuse-tumbleweed")
- zypper update -y
- zypper install -y bats curl
- ;;
-"arch")
- pacman -Sy
- pacman --noconfirm --needed -S bash-bats curl
- ;;
-"alpine")
- apk update
- apk add bash curl bats
- ;;
-*)
- echo "Running on ${running_os}, no repository preparation done"
- ;;
-esac
-
-# Download and run depednency scriptlet, before anything else
-#
-deps_tool="/tmp/deps_tool.$$.sh"
-curl -Ss -o ${deps_tool} https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh
-if [ -f "${deps_tool}" ]; then
- echo "Running dependency handling script.."
- chmod +x "${deps_tool}"
- ${deps_tool} --non-interactive netdata
- rm -f "${deps_tool}"
- echo "Done!"
-else
- echo "Failed to fetch dependency script, aborting the test"
- exit 1
-fi
-
-echo "Running BATS file.."
-bats --tap tests/updater_checks.bats
diff --git a/tests/urls/request.sh b/tests/urls/request.sh
new file mode 100644
index 00000000..6cbe7738
--- /dev/null
+++ b/tests/urls/request.sh
@@ -0,0 +1,301 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+################################################################################################
+#### ####
+#### GLOBAL VARIABLES ####
+#### ####
+################################################################################################
+
+# The current time
+CT=$(date +'%s')
+
+# The previous time
+PT=$((CT - 30))
+
+# The output directory where we will store the results and error
+OUTDIR="tests"
+OUTEDIR="encoded_tests"
+OUTOPTDIR="options"
+ERRDIR="etests"
+
+################################################################################################
+#### ####
+#### FUNCTIONS ####
+#### ####
+################################################################################################
+
+# Print error message and close script
+netdata_print_error(){
+ echo "Closing due error \"$1\" code \"$2\""
+ exit 1
+}
+
+# Print the header message of the function
+netdata_print_header() {
+ echo "$1"
+}
+
+# Create the main directory where the results will be stored
+netdata_create_directory() {
+ netdata_print_header "Creating directory $1"
+ if [ ! -d "$1" ]; then
+ mkdir "$1"
+ TEST=$?
+ if [ $TEST -ne 0 ]; then
+ netdata_print_error "Cannot create directory $?"
+ fi
+ else
+ echo "Working with directory $OUTDIR"
+ fi
+}
+
+#Check whether download did not have problem
+netdata_test_download(){
+ grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null
+ TEST=$?
+ if [ $TEST -ne 0 ]; then
+ netdata_print_error "Cannot do download of the page $2" $?
+ exit 1
+ fi
+}
+
+#Check whether download had a problem
+netdata_error_test(){
+ grep "HTTP/1.1 200 OK" "$1" 2>/dev/null 1>/dev/null
+ TEST=$?
+ if [ $TEST -eq 0 ]; then
+ netdata_print_error "The page $2 did not answer with an error" $?
+ exit 1
+ fi
+}
+
+
+# Download information from Netdata
+netdata_download_various() {
+ netdata_print_header "Getting $2"
+ curl -v -k --create-dirs -o "$OUTDIR/$3.out" "$1/$2" 2> "$OUTDIR/$3.err"
+ netdata_test_download "$OUTDIR/$3.err" "$1/$2"
+}
+
+netdata_download_various_with_options() {
+ netdata_print_header "Getting options for $2"
+ curl -X OPTIONS -v -k --create-dirs -o "$OUTOPTDIR/$3.out" "$1/$2" 2> "$OUTOPTDIR/$3.err"
+ netdata_test_download "$OUTOPTDIR/$3.err" "$1/$2"
+}
+
+# Download information from Netdata
+netdata_wrong_request_various() {
+ netdata_print_header "Getting $2"
+ curl -v -k --create-dirs -o "$ERRDIR/$3.out" "$1/$2" 2> "$ERRDIR/$3.err"
+ netdata_error_test "$ERRDIR/$3.err" "$1/$2"
+}
+
+# Download charts from Netdata
+netdata_download_charts() {
+ curl -v -k --create-dirs -o "$OUTDIR/charts.out" "$1/$2" 2> "$OUTDIR/charts.err"
+ netdata_test_download "$OUTDIR/charts.err" "$1/$2"
+
+ #Rewrite the next
+ grep -w "id" tests/charts.out| cut -d: -f2 | grep "\"," | sed s/,//g | sort
+}
+
+#Test options for a specific chart
+netdata_download_chart() {
+ SEPARATOR="&"
+ EQUAL="="
+ OUTD=$OUTDIR
+ ENCODED=" "
+ for I in $(seq 0 1); do
+ if [ "$I" -eq "1" ] ; then
+ SEPARATOR="%26"
+ EQUAL="%3D"
+ OUTD=$OUTEDIR
+ ENCODED="encoded"
+ fi
+
+ NAME=${3//\"/}
+ netdata_print_header "Getting data for $NAME using $4 $ENCODED"
+
+ LDIR=$OUTD"/"$4
+
+ LURL="$1/$2$EQUAL$NAME"
+
+ NAME=$NAME"_$4"
+
+ curl -v -k --create-dirs -o "$LDIR/$NAME.out" "$LURL" 2> "$LDIR/$NAME.err"
+ netdata_test_download "$LDIR/$NAME.err" "$LURL"
+
+ UFILES=( "points" "before" "after" )
+ COUNTER=0
+ for OPT in "points=100" "before=$PT" "after=$CT" ;
+ do
+ LURL="$LURL$SEPARATOR$OPT"
+ LFILE=$NAME"_${UFILES[$COUNTER]}";
+
+ curl -v -k --create-dirs -o "$LDIR/$LFILE.out" "$LURL" 2> "$LDIR/$LFILE.err"
+ netdata_test_download "$LDIR/$LFILE.err" "$LURL"
+
+ COUNTER=$((COUNTER + 1))
+ done
+
+ LURL="$LURL&group$EQUAL"
+ for OPT in "min" "max" "sum" "median" "stddev" "cv" "ses" "des" "incremental_sum" "average";
+ do
+ TURL=$LURL$OPT
+ TFILE=$NAME"_$OPT";
+ curl -v -k --create-dirs -o "$LDIR/$TFILE.out" "$TURL" 2> "$LDIR/$TFILE.err"
+ netdata_test_download "$LDIR/$TFILE.err" "$TURL"
+ for MORE in "jsonp" "json" "ssv" "csv" "datatable" "datasource" "tsv" "ssvcomma" "html" "array";
+ do
+ TURL=$TURL"&format="$MORE
+ TFILE=$NAME"_$OPT""_$MORE";
+ curl -v -k --create-dirs -o "$LDIR/$TFILE.out" "$TURL" 2> "$LDIR/$TFILE.err"
+ netdata_test_download "$LDIR/$TFILE.err" "$TURL"
+ done
+ done
+
+ LURL="$LURL$OPT&gtime=60"
+ NFILE=$NAME"_gtime"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+
+ LURL="$LURL$OPT&options=percentage"
+ NFILE=$NAME"_percentage"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+
+ LURL="$LURL$OPT&dimensions=system%7Cnice"
+ NFILE=$NAME"_dimension"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+
+ LURL="$LURL$OPT&label=testing"
+ NFILE=$NAME"_label"
+ curl -v -k --create-dirs -o "$LDIR/$NFILE.out" "$TURL" 2> "$LDIR/$NFILE.err"
+ netdata_test_download "$LDIR/$NFILE.err" "$LURL"
+ done
+}
+
+# Download information from Netdata
+netdata_download_allmetrics() {
+ netdata_print_header "Getting All metrics"
+ LURL="$1/api/v1/allmetrics?format="
+ for FMT in "shell" "prometheus" "prometheus_all_hosts" "json" ;
+ do
+ TURL=$LURL$FMT
+ for OPT in "yes" "no";
+ do
+ if [ "$FMT" == "prometheus" ]; then
+ TURL="$TURL&help=$OPT&types=$OPT&timestamps=$OPT"
+ fi
+ TURL="$TURL&names=$OPT&oldunits=$OPT&hideunits=$OPT&prefix=ND"
+
+ NAME="allmetrics_$FMT"
+ echo "$OUTDIR/$2/$NAME.out"
+ curl -v -k --create-dirs -o "$OUTDIR/$2/$NAME.out" "$TURL" 2> "$OUTDIR/$2/$NAME.err"
+ netdata_test_download "$OUTDIR/$2/$NAME.err" "$TURL"
+ done
+ done
+}
+
+
+################################################################################################
+#### ####
+#### MAIN ROUTINE ####
+#### ####
+################################################################################################
+MURL="http://127.0.0.1:19999"
+
+netdata_create_directory $OUTDIR
+netdata_create_directory $OUTEDIR
+netdata_create_directory $OUTOPTDIR
+netdata_create_directory $ERRDIR
+
+wget --no-check-certificate --execute="robots = off" --mirror --convert-links --no-parent $MURL
+TEST=$?
+if [ $TEST -ne "0" ] ; then
+ echo "Cannot connect to Netdata"
+ exit 1
+fi
+
+netdata_download_various $MURL "netdata.conf" "netdata.conf"
+
+netdata_download_various_with_options $MURL "netdata.conf" "netdata.conf"
+
+netdata_wrong_request_various $MURL "api/v15/info?this%20could%20not%20be%20here" "err_version"
+
+netdata_wrong_request_various $MURL "api/v1/\(*@&$\!$%%5E\)\!$*%&\)\!$*%%5E*\!%5E%\!%5E$%\!%5E%\(\!*%5E*%5E%\(*@&$%5E%\(\!%5E#*&\!^#$*&\!^%\)@\($%^\)\!*&^\(\!*&^#$&#$\)\!$%^\)\!$*%&\)#$\!^#*$^\!\(*#^#\)\!%^\!\)$*%&\!\(*&$\!^#$*&^\!*#^$\!*^\)%\(\!*&$%\)\(\!&#$\!^*#&$^\!*^%\)\!$%\)\!\(&#$\!^#*&^$" "err_version2"
+
+netdata_download_various $MURL "api/v1/info" "info"
+netdata_download_various_with_options $MURL "api/v1/info" "info"
+netdata_download_various $MURL "api/v1/info?this%20could%20not%20be%20here" "err_info"
+
+netdata_print_header "Getting all the netdata charts"
+CHARTS=$( netdata_download_charts "$MURL" "api/v1/charts" )
+WCHARTS=$( netdata_download_charts "$MURL" "api/v1/charts?this%20could%20not%20be%20here" )
+WCHARTS2=$( netdata_download_charts "$MURL" "api/v1/charts%3fthis%20could%20not%20be%20here" )
+
+if [ ${#CHARTS[@]} -ne ${#WCHARTS[@]} ]; then
+ echo "The number of charts does not match with division not encoded.";
+ exit 2;
+elif [ ${#CHARTS[@]} -ne ${#WCHARTS2[@]} ]; then
+ echo "The number of charts does not match when everything is encoded";
+ exit 3;
+fi
+
+netdata_wrong_request_various $MURL "api/v1/chart" "err_chart_without_chart"
+netdata_wrong_request_various $MURL "api/v1/chart?_=234231424242" "err_chart_arg"
+
+netdata_download_various $MURL "api/v1/chart?chart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args"
+netdata_download_various_with_options $MURL "api/v1/chart?chart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args"
+
+netdata_download_various $MURL "api/v1/chart%3Fchart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args_encoded"
+netdata_download_various_with_options $MURL "api/v1/chart%3Fchart=cpu.cpu0_interrupts&_=234231424242" "chart_cpu_with_more_args_encoded"
+netdata_download_various $MURL "api/v1/chart%3Fchart=cpu.cpu0_interrupts%26_=234231424242" "chart_cpu_with_more_args_encoded2"
+netdata_download_various $MURL "api/v1/chart%3Fchart%3Dcpu.cpu0_interrupts%26_%3D234231424242" "chart_cpu_with_more_args_encoded3"
+
+netdata_create_directory "$OUTDIR/chart"
+for I in $CHARTS ; do
+ NAME=${I//\"/}
+ netdata_download_various $MURL "api/v1/chart?chart=$NAME" "chart/$NAME"
+done
+
+netdata_wrong_request_various $MURL "api/v1/alarm_variables" "err_alarm_variables_without_chart"
+netdata_wrong_request_various $MURL "api/v1/alarm_variables?_=234231424242" "err_alarm_variables_arg"
+netdata_download_various $MURL "api/v1/alarm_variables?chart=cpu.cpu0_interrupts&_=234231424242" "alarm_cpu_with_more_args"
+
+netdata_create_directory "$OUTDIR/alarm_variables"
+for I in $CHARTS ; do
+ NAME=${I//\"/}
+ netdata_download_various $MURL "api/v1/alarm_variables?chart=$NAME" "alarm_variables/$NAME"
+done
+
+netdata_create_directory "$OUTDIR/badge"
+netdata_create_directory "$OUTEDIR/badge"
+for I in $CHARTS ; do
+ netdata_download_chart $MURL "api/v1/badge.svg?chart" "$I" "badge"
+done
+
+netdata_create_directory "$OUTDIR/allmetrics"
+netdata_download_allmetrics $MURL "allmetrics"
+
+netdata_download_various $MURL "api/v1/alarms?all" "alarms_all"
+netdata_download_various $MURL "api/v1/alarms?active" "alarms_active"
+netdata_download_various $MURL "api/v1/alarms" "alarms_nothing"
+
+netdata_download_various $MURL "api/v1/alarm_log?after" "alarm_without"
+netdata_download_various $MURL "api/v1/alarm_log" "alarm_nothing"
+netdata_download_various $MURL "api/v1/alarm_log?after&_=$PT" "alarm_log"
+
+netdata_create_directory "$OUTDIR/data"
+netdata_create_directory "$OUTEDIR/data"
+for I in $CHARTS ; do
+ netdata_download_chart $MURL "api/v1/data?chart" "$I" "data"
+ break;
+done
+
+WHITE='\033[0;37m'
+echo -e "${WHITE}ALL the URLS got 200 as answer!"
+
+exit 0
diff --git a/tests/urls/request.sh.in b/tests/urls/request.sh.in
index fac00bc4..6cbe7738 100644
--- a/tests/urls/request.sh.in
+++ b/tests/urls/request.sh.in
@@ -212,7 +212,7 @@ netdata_create_directory $OUTEDIR
netdata_create_directory $OUTOPTDIR
netdata_create_directory $ERRDIR
-wget --execute="robots = off" --mirror --convert-links --no-parent http://127.0.0.1:19999
+wget --no-check-certificate --execute="robots = off" --mirror --convert-links --no-parent $MURL
TEST=$?
if [ $TEST -ne "0" ] ; then
echo "Cannot connect to Netdata"
@@ -232,9 +232,9 @@ netdata_download_various_with_options $MURL "api/v1/info" "info"
netdata_download_various $MURL "api/v1/info?this%20could%20not%20be%20here" "err_info"
netdata_print_header "Getting all the netdata charts"
-CHARTS=$( netdata_download_charts "http://127.0.0.1:19999" "api/v1/charts" )
-WCHARTS=$( netdata_download_charts "http://127.0.0.1:19999" "api/v1/charts?this%20could%20not%20be%20here" )
-WCHARTS2=$( netdata_download_charts "http://127.0.0.1:19999" "api/v1/charts%3fthis%20could%20not%20be%20here" )
+CHARTS=$( netdata_download_charts "$MURL" "api/v1/charts" )
+WCHARTS=$( netdata_download_charts "$MURL" "api/v1/charts?this%20could%20not%20be%20here" )
+WCHARTS2=$( netdata_download_charts "$MURL" "api/v1/charts%3fthis%20could%20not%20be%20here" )
if [ ${#CHARTS[@]} -ne ${#WCHARTS[@]} ]; then
echo "The number of charts does not match with division not encoded.";
@@ -295,8 +295,6 @@ for I in $CHARTS ; do
break;
done
-#http://arch-esxi:19999/api/v1/(*@&$!$%%5E)!$*%&)!$*%%5E*!%5E%!%5E$%!%5E%(!*%5E*%5E%(*@&$%5E%(!%5E#*&!^#$*&!^%)@($%^)!*&^(!*&^#$&#$)!$%^)!$*%&)#$!^#*$^!(*#^#)!%^!)$*%&!(*&$!^#$*&^!*#^$!*^)%(!*&$%)(!&#$!^*#&$^!*^%)!$%)!(&#$!^#*&^$
-
WHITE='\033[0;37m'
echo -e "${WHITE}ALL the URLS got 200 as answer!"
diff --git a/web/Makefile.in b/web/Makefile.in
new file mode 100644
index 00000000..203be6d7
--- /dev/null
+++ b/web/Makefile.in
@@ -0,0 +1,702 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ api \
+ gui \
+ server \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ gui/confluence/README.md \
+ gui/custom/README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/README.md b/web/README.md
index 5c1a06f5..fc230f51 100644
--- a/web/README.md
+++ b/web/README.md
@@ -1,14 +1,14 @@
# Web dashboards overview
-The default port is 19999; for example, to access the dashboard on localhost, use: http://localhost:19999
+The default port is 19999; for example, to access the dashboard on localhost, use: <http://localhost:19999>
To view Netdata collected data you access its **[REST API v1](api/)**.
For our convenience, Netdata provides 2 more layers:
-1. The `dashboard.js` javascript library that allows us to design custom dashboards using plain HTML. For information on creating custom dashboards, see **[Custom Dashboards](gui/custom/)** and **[Atlassian Confluence Dashboards](gui/confluence/)**
+1. The `dashboard.js` javascript library that allows us to design custom dashboards using plain HTML. For information on creating custom dashboards, see **[Custom Dashboards](gui/custom/)** and **[Atlassian Confluence Dashboards](gui/confluence/)**
-2. Ready to be used web dashboards that render all the charts a Netdata server maintains.
+2. Ready to be used web dashboards that render all the charts a Netdata server maintains.
## Customizing the standard dashboards
@@ -18,11 +18,11 @@ If you change that file, your changes will be overwritten when Netdata is update
You have to copy the example file under a new name, so that it will not be overwritten with Netdata updates.
-To configure your info file set in netdata.conf:
+To configure your info file set in `netdata.conf`:
```
[web]
custom dashboard_info.js = your_file_name.js
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/Makefile.in b/web/api/Makefile.in
new file mode 100644
index 00000000..98533d8c
--- /dev/null
+++ b/web/api/Makefile.in
@@ -0,0 +1,760 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(dist_web_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(webdir)"
+DATA = $(dist_noinst_DATA) $(dist_web_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ badges \
+ queries \
+ exporters \
+ formatters \
+ health \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_web_DATA = \
+ netdata-swagger.yaml \
+ netdata-swagger.json \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_webDATA: $(dist_web_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(webdir)" || exit $$?; \
+ done
+
+uninstall-dist_webDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(webdir)'; $(am__uninstall_files_from_dir)
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+ for dir in "$(DESTDIR)$(webdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am: install-dist_webDATA
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am: uninstall-dist_webDATA
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dist_webDATA install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ installdirs-am maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags \
+ tags-am uninstall uninstall-am uninstall-dist_webDATA
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/README.md b/web/api/README.md
index 44afbc90..958b1580 100644
--- a/web/api/README.md
+++ b/web/api/README.md
@@ -2,13 +2,13 @@
## Netdata REST API
-The complete documentation of the netdata API is available at the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)**.
+The complete documentation of the Netdata API is available at the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)**.
If your prefer it over the Swagger Editor, you can also use **[Swagger UI](https://registry.my-netdata.io/swagger/#!/default/get_data)**. This however does not provide all the information available.
## Google charts API
-netdata is a [Google Visualization API datatable and datasource provider](https://developers.google.com/chart/interactive/docs/reference), so it can directly be used with [Google Charts](https://developers.google.com/chart/interactive/docs/).
+Netdata is a [Google Visualization API datatable and datasource provider](https://developers.google.com/chart/interactive/docs/reference), so it can directly be used with [Google Charts](https://developers.google.com/chart/interactive/docs/).
Check this [single chart, jsfiddle example](https://jsfiddle.net/ktsaou/ensu4uws/9/):
@@ -18,4 +18,4 @@ and this [multi chart, jsfiddle example](https://jsfiddle.net/ktsaou/L5y2eqp2/):
![image](https://cloud.githubusercontent.com/assets/2662304/23824766/31a4a68c-0685-11e7-8429-8327cab64be2.png)
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/badges/Makefile.in b/web/api/badges/Makefile.in
new file mode 100644
index 00000000..ea8c6d60
--- /dev/null
+++ b/web/api/badges/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/badges
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/badges/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/badges/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/badges/README.md b/web/api/badges/README.md
index 6884cc11..75c30abf 100644
--- a/web/api/badges/README.md
+++ b/web/api/badges/README.md
@@ -6,11 +6,11 @@ Netdata can generate badges for any chart and any dimension at any time-frame. B
**Netdata badges are powerful**!
-Given that netdata collects from **1.000** to **5.000** metrics per server (depending on the number of network interfaces, disks, cpu cores, applications running, users logged in, containers running, etc) and that netdata already has data reduction/aggregation functions embedded, the badges can be quite powerful.
+Given that Netdata collects from **1.000** to **5.000** metrics per server (depending on the number of network interfaces, disks, cpu cores, applications running, users logged in, containers running, etc) and that Netdata already has data reduction/aggregation functions embedded, the badges can be quite powerful.
For each metric/dimension and for arbitrary time-frames badges can show **min**, **max** or **average** value, but also **sum** or **incremental-sum** to have their **volume**.
-For example, there is [a chart in netdata that shows the current requests/s of nginx](http://london.my-netdata.io/#nginx_local_nginx). Using this chart alone we can show the following badges (we could add more time-frames, like **today**, **yesterday**, etc):
+For example, there is [a chart in Netdata that shows the current requests/s of nginx](http://london.my-netdata.io/#nginx_local_nginx). Using this chart alone we can show the following badges (we could add more time-frames, like **today**, **yesterday**, etc):
<a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&value_color=grey:null%7Cblue&label=nginx%20active%20connections%20now&units=null&precision=0"/></a> <a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&after=-3600&value_color=orange&label=last%20hour%20average&units=null&options=unaligned&precision=0"/></a> <a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&group=max&after=-3600&value_color=red&label=last%20hour%20max&units=null&options=unaligned&precision=0"/></a>
@@ -18,25 +18,25 @@ Similarly, there is [a chart that shows outbound bandwidth per class](http://lon
<a href="https://registry.my-netdata.io/#tc_eth0"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=tc.world_out&dimensions=web_server&value_color=green&label=web%20server%20sends%20now&units=kbps"/></a> <a href="https://registry.my-netdata.io/#tc_eth0"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=tc.world_out&dimensions=web_server&after=-86400&options=unaligned&group=sum&divide=8388608&value_color=blue&label=web%20server%20sent%20today&units=GB"/></a>
-The right one is a **volume** calculation. Netdata calculated the total of the last 86.400 seconds (a day) which gives `kilobits`, then divided it by 8 to make it KB, then by 1024 to make it MB and then by 1024 to make it GB. Calculations like this are quite accurate, since for every value collected, every second, netdata interpolates it to second boundary using microsecond calculations.
+The right one is a **volume** calculation. Netdata calculated the total of the last 86.400 seconds (a day) which gives `kilobits`, then divided it by 8 to make it KB, then by 1024 to make it MB and then by 1024 to make it GB. Calculations like this are quite accurate, since for every value collected, every second, Netdata interpolates it to second boundary using microsecond calculations.
-Let's see a few more badge examples (they come from the [netdata registry](../../../registry/)):
+Let's see a few more badge examples (they come from the [Netdata registry](../../../registry/)):
-- **cpu usage of user `root`** (you can pick any user; 100% = 1 core). This will be `green <10%`, `yellow <20%`, `orange <50%`, `blue <100%` (1 core), `red` otherwise (you define thresholds and colors on the URL).
+- **cpu usage of user `root`** (you can pick any user; 100% = 1 core). This will be `green <10%`, `yellow <20%`, `orange <50%`, `blue <100%` (1 core), `red` otherwise (you define thresholds and colors on the URL).
- <a href="https://registry.my-netdata.io/#apps_cpu"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img></a> <a href="https://registry.my-netdata.io/#apps_cpu"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&after=-3600&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20average%20cpu%20last%20hour&units=%25"></img></a>
+ <a href="https://registry.my-netdata.io/#apps_cpu"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img></a> <a href="https://registry.my-netdata.io/#apps_cpu"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&after=-3600&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20average%20cpu%20last%20hour&units=%25"></img></a>
-- **mysql queries per second**
+- **mysql queries per second**
- <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&label=mysql%20queries%20now&value_color=red&units=%5Cs"></img></a> <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&after=-3600&options=unaligned&group=sum&label=mysql%20queries%20this%20hour&value_color=green&units=null"></img></a> <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&after=-86400&options=unaligned&group=sum&label=mysql%20queries%20today&value_color=blue&units=null"></img></a>
+ <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&label=mysql%20queries%20now&value_color=red&units=%5Cs"></img></a> <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&after=-3600&options=unaligned&group=sum&label=mysql%20queries%20this%20hour&value_color=green&units=null"></img></a> <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&after=-86400&options=unaligned&group=sum&label=mysql%20queries%20today&value_color=blue&units=null"></img></a>
- niche ones: **mysql SELECT statements with JOIN, which did full table scans**:
+ niche ones: **mysql SELECT statements with JOIN, which did full table scans**:
- <a href="https://registry.my-netdata.io/#mysql_local_issues"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.join_issues&dimensions=scan&after=-3600&label=full%20table%20scans%20the%20last%20hour&value_color=orange&group=sum&units=null"></img></a>
+ <a href="https://registry.my-netdata.io/#mysql_local_issues"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.join_issues&dimensions=scan&after=-3600&label=full%20table%20scans%20the%20last%20hour&value_color=orange&group=sum&units=null"></img></a>
---
-> So, every single line on the charts of a [netdata dashboard](http://london.my-netdata.io/), can become a badge and this badge can calculate **average**, **min**, **max**, or **volume** for any time-frame! And you can also vary the badge color using conditions on the calculated value.
+> So, every single line on the charts of a [Netdata dashboard](http://london.my-netdata.io/), can become a badge and this badge can calculate **average**, **min**, **max**, or **volume** for any time-frame! And you can also vary the badge color using conditions on the calculated value.
---
@@ -44,15 +44,15 @@ Let's see a few more badge examples (they come from the [netdata registry](../..
The basic URL is `http://your.netdata:19999/api/v1/badge.svg?option1&option2&option3&...`.
-Here is what you can put for `options` (these are standard netdata API options):
+Here is what you can put for `options` (these are standard Netdata API options):
-- `chart=CHART.NAME`
+- `chart=CHART.NAME`
- The chart to get the values from.
+ The chart to get the values from.
- **This is the only parameter required** and with just this parameter, netdata will return the sum of the latest values of all chart dimensions.
+ **This is the only parameter required** and with just this parameter, Netdata will return the sum of the latest values of all chart dimensions.
- Example:
+ Example:
```html
<a href="#">
@@ -66,21 +66,21 @@ Here is what you can put for `options` (these are standard netdata API options):
<img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu"></img>
</a>
-- `alarm=NAME`
+- `alarm=NAME`
- Render the current value and status of an alarm linked to the chart. This option can be ignored if the badge to be generated is not related to an alarm.
+ Render the current value and status of an alarm linked to the chart. This option can be ignored if the badge to be generated is not related to an alarm.
- The current value of the alarm will be rendered. The color of the badge will indicate the status of the alarm.
+ The current value of the alarm will be rendered. The color of the badge will indicate the status of the alarm.
- For alarm badges, **both `chart` and `alarm` parameters are required**.
+ For alarm badges, **both `chart` and `alarm` parameters are required**.
-- `dimensions=DIMENSION1|DIMENSION2|...`
+- `dimensions=DIMENSION1|DIMENSION2|...`
- The dimensions of the chart to use. If you don't set any dimension, all will be used. When multiple dimensions are used, netdata will sum their values. You can append `options=absolute` if you want this sum to convert all values to positive before adding them.
+ The dimensions of the chart to use. If you don't set any dimension, all will be used. When multiple dimensions are used, Netdata will sum their values. You can append `options=absolute` if you want this sum to convert all values to positive before adding them.
- Pipes in HTML have to escaped with `%7C`.
+ Pipes in HTML have to escaped with `%7C`.
- Example:
+ Example:
```html
<a href="#">
@@ -94,15 +94,15 @@ Here is what you can put for `options` (these are standard netdata API options):
<img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&dimensions=system%7Cnice"></img>
</a>
-- `before=SECONDS` and `after=SECONDS`
+- `before=SECONDS` and `after=SECONDS`
- The timeframe. These can be absolute unix timestamps, or relative to now, number of seconds. By default `before=0` and `after=-1` (1 second in the past).
+ The timeframe. These can be absolute unix timestamps, or relative to now, number of seconds. By default `before=0` and `after=-1` (1 second in the past).
- To get the last minute set `after=-60`. This will give the average of the last complete minute (XX:XX:00 - XX:XX:59).
+ To get the last minute set `after=-60`. This will give the average of the last complete minute (XX:XX:00 - XX:XX:59).
- To get the max of the last hour set `after=-3600&group=max`. This will give the maximum value of the last complete hour (XX:00:00 - XX:59:59)
+ To get the max of the last hour set `after=-3600&group=max`. This will give the maximum value of the last complete hour (XX:00:00 - XX:59:59)
- Example:
+ Example:
```html
<a href="#">
@@ -125,109 +125,108 @@ Here is what you can put for `options` (these are standard netdata API options):
```
It produces this:
-
+
<a href="#">
<img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&before=-60&after=-60"></img>
</a>
-- `group=min` or `group=max` or `group=average` (the default) or `group=sum` or `group=incremental-sum`
+- `group=min` or `group=max` or `group=average` (the default) or `group=sum` or `group=incremental-sum`
- If netdata will have to reduce (aggregate) the data to calculate the value, which aggregation method to use.
+ If Netdata will have to reduce (aggregate) the data to calculate the value, which aggregation method to use.
- - `max` will find the max value for the timeframe. This works on both positive and negative dimensions. It will find the most extreme value.
+ - `max` will find the max value for the timeframe. This works on both positive and negative dimensions. It will find the most extreme value.
- - `min` will find the min value for the timeframe. This works on both positive and negative dimensions. It will find the number closest to zero.
+ - `min` will find the min value for the timeframe. This works on both positive and negative dimensions. It will find the number closest to zero.
- - `average` will calculate the average value for the timeframe.
+ - `average` will calculate the average value for the timeframe.
- - `sum` will sum all the values for the timeframe. This is nice for finding the volume of dimensions for a timeframe. So if you have a dimension that reports `X per second`, you can find the volume of the dimension in a timeframe, by adding its values in that timeframe.
+ - `sum` will sum all the values for the timeframe. This is nice for finding the volume of dimensions for a timeframe. So if you have a dimension that reports `X per second`, you can find the volume of the dimension in a timeframe, by adding its values in that timeframe.
- - `incremental-sum` will sum the difference of each value to its next. Let's assume you have a dimension that does not measure the rate of something, but the absolute value of it. So it has values like this "1, 5, 3, 7, 4". `incremental-sum` will calculate the difference of adjacent values. In this example, they will be `(5 - 1) + (3 - 5) + (7 - 3) + (4 - 7) = 3` (which is equal to the last value minus the first = 4 - 1).
+ - `incremental-sum` will sum the difference of each value to its next. Let's assume you have a dimension that does not measure the rate of something, but the absolute value of it. So it has values like this "1, 5, 3, 7, 4". `incremental-sum` will calculate the difference of adjacent values. In this example, they will be `(5 - 1) + (3 - 5) + (7 - 3) + (4 - 7) = 3` (which is equal to the last value minus the first = 4 - 1).
-- `options=opt1|opt2|opt3|...`
+- `options=opt1|opt2|opt3|...`
- These fine tune various options of the API. Here is what you can use for badges (the API has more option, but only these are useful for badges):
+ These fine tune various options of the API. Here is what you can use for badges (the API has more option, but only these are useful for badges):
- - `percentage`, instead of returning the value, calculate the percentage of the sum of the selected dimensions, versus the sum of all the dimensions of the chart. This also sets the units to `%`.
+ - `percentage`, instead of returning the value, calculate the percentage of the sum of the selected dimensions, versus the sum of all the dimensions of the chart. This also sets the units to `%`.
- - `absolute` or `abs`, turn all values positive and then sum them.
+ - `absolute` or `abs`, turn all values positive and then sum them.
- - `display_absolute` or `display-absolute`, to use the signed value during color calculation, but display the absolute value on the badge.
+ - `display_absolute` or `display-absolute`, to use the signed value during color calculation, but display the absolute value on the badge.
- - `min2max`, when multiple dimensions are given, do not sum them, but take their `max - min`.
+ - `min2max`, when multiple dimensions are given, do not sum them, but take their `max - min`.
- - `unaligned`, when data are reduced / aggregated (e.g. the request is about the average of the last minute, or hour), netdata by default aligns them so that the charts will have a constant shape (so average per minute returns always XX:XX:00 - XX:XX:59). Setting the `unaligned` option, netdata will aggregate data without any alignment, so if the request is for 60 seconds, it will aggregate the latest 60 seconds of collected data.
+ - `unaligned`, when data are reduced / aggregated (e.g. the request is about the average of the last minute, or hour), Netdata by default aligns them so that the charts will have a constant shape (so average per minute returns always XX:XX:00 - XX:XX:59). Setting the `unaligned` option, Netdata will aggregate data without any alignment, so if the request is for 60 seconds, it will aggregate the latest 60 seconds of collected data.
These are options dedicated to badges:
-- `label=TEXT`
-
- The label of the badge.
+- `label=TEXT`
-- `units=TEXT`
+ The label of the badge.
- The units of the badge. If you want to put a `/`, please put a `\`. This is because netdata allows badges parameters to be given as path in URL, instead of query string. You can also use `null` or `empty` to show it without any units.
+- `units=TEXT`
- The units `seconds`, `minutes` and `hours` trigger special formatting. The value has to be in this unit, and netdata will automatically change it to show a more pretty duration.
+ The units of the badge. If you want to put a `/`, please put a `\`. This is because Netdata allows badges parameters to be given as path in URL, instead of query string. You can also use `null` or `empty` to show it without any units.
-- `multiply=NUMBER`
+ The units `seconds`, `minutes` and `hours` trigger special formatting. The value has to be in this unit, and Netdata will automatically change it to show a more pretty duration.
- Multiply the value with this number. The default is `1`.
+- `multiply=NUMBER`
-- `divide=NUMBER`
+ Multiply the value with this number. The default is `1`.
- Divide the value with this number. The default is `1`.
+- `divide=NUMBER`
-- `label_color=COLOR`
+ Divide the value with this number. The default is `1`.
- The color of the label (the left part). You can use any HTML color, include `#NNN` and `#NNNNNN`. The following colors are defined in netdata (and you can use them by name): `green`, `brightgreen`, `yellow`, `yellowgreen`, `orange`, `red`, `blue`, `grey`, `gray`, `lightgrey`, `lightgray`. These are taken from https://github.com/badges/shields so they are compatible with standard badges.
+- `label_color=COLOR`
-- `value_color=COLOR:null|COLOR<VALUE|COLOR>VALUE|COLOR>=VALUE|COLOR<=VALUE|...`
+ The color of the label (the left part). You can use any HTML color, include `#NNN` and `#NNNNNN`. The following colors are defined in Netdata (and you can use them by name): `green`, `brightgreen`, `yellow`, `yellowgreen`, `orange`, `red`, `blue`, `grey`, `gray`, `lightgrey`, `lightgray`. These are taken from <https://github.com/badges/shields> so they are compatible with standard badges.
- You can add a pipe delimited list of conditions to pick the color. The first matching (left to right) will be used.
+- `value_color=COLOR:null|COLOR<VALUE|COLOR>VALUE|COLOR>=VALUE|COLOR<=VALUE|...`
- Example: `value_color=grey:null|green<10|yellow<100|orange<1000|blue<10000|red`
+ You can add a pipe delimited list of conditions to pick the color. The first matching (left to right) will be used.
- The above will set `grey` if no value exists (not collected within the `gap when lost iterations above` in netdata.conf for the chart), `green` if the value is less than 10, `yellow` if the value is less than 100, etc up to `red` which will be used if no other conditions match.
+ Example: `value_color=grey:null|green<10|yellow<100|orange<1000|blue<10000|red`
- The supported operators are `<`, `>`, `<=`, `>=`, `=` (or `:`) and `!=` (or `<>`).
+ The above will set `grey` if no value exists (not collected within the `gap when lost iterations above` in `netdata.conf` for the chart), `green` if the value is less than 10, `yellow` if the value is less than 100, etc up to `red` which will be used if no other conditions match.
-- `precision=NUMBER`
+ The supported operators are `<`, `>`, `<=`, `>=`, `=` (or `:`) and `!=` (or `<>`).
- The number of decimal digits of the value. By default netdata will add:
+- `precision=NUMBER`
- - no decimal digits for values > 1000
- - 1 decimal digit for values > 100
- - 2 decimal digits for values > 1
- - 3 decimal digits for values > 0.1
- - 4 decimal digits for values <= 0.1
+ The number of decimal digits of the value. By default Netdata will add:
- Using the `precision=NUMBER` you can set your preference per badge.
+ - no decimal digits for values > 1000
+ - 1 decimal digit for values > 100
+ - 2 decimal digits for values > 1
+ - 3 decimal digits for values > 0.1
+ - 4 decimal digits for values \<= 0.1
-- `scale=XXX`
+ Using the `precision=NUMBER` you can set your preference per badge.
- This option scales the svg image. It accepts values above or equal to 100 (100% is the default scale). For example, lets get a few different sizes:
+- `scale=XXX`
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=100"></img> original<br/>
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=125"></img> `scale=125`<br/>
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=150"></img> `scale=150`<br/>
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=175"></img> `scale=175`<br/>
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=200"></img> `scale=200`
+ This option scales the svg image. It accepts values above or equal to 100 (100% is the default scale). For example, lets get a few different sizes:
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=100"></img> original<br/>
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=125"></img> `scale=125`<br/>
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=150"></img> `scale=150`<br/>
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=175"></img> `scale=175`<br/>
+ <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=200"></img> `scale=200`
-- `refresh=auto` or `refresh=SECONDS`
+- `refresh=auto` or `refresh=SECONDS`
- This option enables auto-refreshing of images. netdata will send the HTTP header `Refresh: SECONDS` to the web browser, thus requesting automatic refresh of the images at regular intervals.
+ This option enables auto-refreshing of images. Netdata will send the HTTP header `Refresh: SECONDS` to the web browser, thus requesting automatic refresh of the images at regular intervals.
- `auto` will calculate the proper `SECONDS` to avoid unnecessary refreshes. If `SECONDS` is zero, this feature is disabled (it is also disabled by default).
+ `auto` will calculate the proper `SECONDS` to avoid unnecessary refreshes. If `SECONDS` is zero, this feature is disabled (it is also disabled by default).
- Auto-refreshing like this, works only if you access the badge directly. So, you may have to put it an `embed` or `iframe` for it to be auto-refreshed. Use something like this:
+ Auto-refreshing like this, works only if you access the badge directly. So, you may have to put it an `embed` or `iframe` for it to be auto-refreshed. Use something like this:
```html
<embed src="BADGE_URL" type="image/svg+xml" height="20" />
```
- Another way is to use javascript to auto-refresh them. You can auto-refresh all the netdata badges on a page using javascript. You have to add a class to all the netdata badges, like this `<img class="netdata-badge" src="..."/>`. Then add this javascript code to your page (it requires jquery):
+ Another way is to use javascript to auto-refresh them. You can auto-refresh all the Netdata badges on a page using javascript. You have to add a class to all the Netdata badges, like this `<img class="netdata-badge" src="..."/>`. Then add this javascript code to your page (it requires jquery):
```html
<script>
@@ -251,23 +250,23 @@ A more advanced badges refresh method is to include `http://your.netdata.ip:1999
Keep in mind that if you add badge URLs to your HTML pages you have to escape the special characters:
-character|name|escape sequence
-:-------:|:--:|:-------------:
-` `|space (in labels and units)|`%20`
-` # `|hash (for colors)|`%23`
-` % `|percent (in units)|`%25`
-` < `|less than|`%3C`
-` > `|greater than|`%3E`
-` \ `|backslash (when you need a `/`)|`%5C`
-` \| `|pipe (delimiting parameters)|`%7C`
+|character|name|escape sequence|
+|:-------:|:--:|:-------------:|
+|``|space (in labels and units)|`%20`|
+|`#`|hash (for colors)|`%23`|
+|`%`|percent (in units)|`%25`|
+|`<`|less than|`%3C`|
+|`>`|greater than|`%3E`|
+|`\`|backslash (when you need a `/`)|`%5C`|
+|`\|`|pipe (delimiting parameters)|`%7C`|
## FAQ
#### Is it fast?
-On modern hardware, netdata can generate about **2.000 badges per second per core**, before noticing any delays. It generates a badge in about half a millisecond!
-Of course these timing are for badges that use recent data. If you need badges that do calculations over long durations (a day, or more), timing will differ. netdata logs its timings at its `access.log`, so take a look there before adding a heavy badge on a busy web site. Of course, you can cache such badges or have a cron job get them from netdata and save them at your web server at regular intervals.
+On modern hardware, Netdata can generate about **2.000 badges per second per core**, before noticing any delays. It generates a badge in about half a millisecond!
+Of course these timing are for badges that use recent data. If you need badges that do calculations over long durations (a day, or more), timing will differ. Netdata logs its timings at its `access.log`, so take a look there before adding a heavy badge on a busy web site. Of course, you can cache such badges or have a cron job get them from Netdata and save them at your web server at regular intervals.
#### Embedding badges in github
@@ -275,13 +274,13 @@ You have 2 options a) SVG images with markdown and b) SVG images with HTML (dire
For example, this is the cpu badge shown above:
-- Markdown example:
+- Markdown example:
```md
[![A nice name](https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25)](https://registry.my-netdata.io/#apps_cpu)
```
-- HTML example:
+- HTML example:
```html
<a href="https://registry.my-netdata.io/#apps_cpu">
@@ -305,4 +304,4 @@ You can refresh them from your browser console though. Press F12 to open the web
var len = document.images.length; while(len--) { document.images[len].src = document.images[len].src.replace(/\?cacheBuster=\d*/, "") + "?cacheBuster=" + new Date().getTime().toString(); };
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fbadges%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fbadges%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/exporters/Makefile.in b/web/api/exporters/Makefile.in
new file mode 100644
index 00000000..e9769a31
--- /dev/null
+++ b/web/api/exporters/Makefile.in
@@ -0,0 +1,699 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/exporters
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ shell \
+ prometheus \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/exporters/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/exporters/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/exporters/README.md b/web/api/exporters/README.md
index ff711d7e..2cbb037c 100644
--- a/web/api/exporters/README.md
+++ b/web/api/exporters/README.md
@@ -2,4 +2,4 @@
TBD
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fexporters%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fexporters%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/exporters/prometheus/Makefile.in b/web/api/exporters/prometheus/Makefile.in
new file mode 100644
index 00000000..fb02fb30
--- /dev/null
+++ b/web/api/exporters/prometheus/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/exporters/prometheus
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/exporters/prometheus/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/exporters/prometheus/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/exporters/prometheus/README.md b/web/api/exporters/prometheus/README.md
index 88e79ecd..9b2367cc 100644
--- a/web/api/exporters/prometheus/README.md
+++ b/web/api/exporters/prometheus/README.md
@@ -1,5 +1,5 @@
# prometheus exporter
-The prometheus exporter for netdata is located at the [backends section for prometheus](../../../../backends/prometheus).
+The prometheus exporter for Netdata is located at the [backends section for prometheus](../../../../backends/prometheus).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fexporters%2Fprometheus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fexporters%2Fprometheus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/exporters/shell/Makefile.in b/web/api/exporters/shell/Makefile.in
new file mode 100644
index 00000000..94a0e9a9
--- /dev/null
+++ b/web/api/exporters/shell/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/exporters/shell
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/exporters/shell/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/exporters/shell/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/exporters/shell/README.md b/web/api/exporters/shell/README.md
index ab412eba..fef253c8 100644
--- a/web/api/exporters/shell/README.md
+++ b/web/api/exporters/shell/README.md
@@ -1,18 +1,18 @@
# shell exporter
-Shell scripts can now query netdata:
+Shell scripts can now query Netdata:
```sh
eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')"
```
-after this command, all the netdata metrics are exposed to shell. Check:
+after this command, all the Netdata metrics are exposed to shell. Check:
```sh
# source the metrics
eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')"
-# let's see if there are variables exposed by netdata for system.cpu
+# let's see if there are variables exposed by Netdata for system.cpu
set | grep "^NETDATA_SYSTEM_CPU"
NETDATA_SYSTEM_CPU_GUEST=0
@@ -50,7 +50,7 @@ user 0m0,000s
sys 0m0,007s
# it is...
-# 0.07 seconds for curl to be loaded, connect to netdata and fetch the response back...
+# 0.07 seconds for curl to be loaded, connect to Netdata and fetch the response back...
```
The `_VISIBLETOTAL` variable sums up all the dimensions of each chart.
@@ -63,4 +63,4 @@ NETDATA_${chart_id^^}_${dimension_id^^}="${value}"
The value is rounded to the closest integer, since shell script cannot process decimal numbers.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fexporters%2Fshell%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fexporters%2Fshell%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/formatters/Makefile.in b/web/api/formatters/Makefile.in
new file mode 100644
index 00000000..52be38b4
--- /dev/null
+++ b/web/api/formatters/Makefile.in
@@ -0,0 +1,701 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/formatters
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ csv \
+ json \
+ ssv \
+ value \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/formatters/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/formatters/README.md b/web/api/formatters/README.md
index b4ce1e30..037fb534 100644
--- a/web/api/formatters/README.md
+++ b/web/api/formatters/README.md
@@ -5,20 +5,20 @@ Using API parameters, the caller may define the format he/she wishes to get back
The following formats are supported:
-format|module|content type|description
-:---:|:---:|:---:|:-----
-`array`|[ssv](ssv)|application/json|a JSON array
-`csv`|[csv](csv)|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines
-`csvjsonarray`|[csv](csv)|application/json|a JSON array, with each row as another array (the first row has the dimension names)
-`datasource`|[json](json)|application/json|a Google Visualization Provider `datasource` javascript callback
-`datatable`|[json](json)|application/json|a Google `datatable`
-`html`|[csv](csv)|text/html|an html table
-`json`|[json](json)|application/json|a JSON object
-`jsonp`|[json](json)|application/json|a JSONP javascript callback
-`markdown`|[csv](csv)|text/plain|a markdown table
-`ssv`|[ssv](ssv)|text/plain|a space separated list of values
-`ssvcomma`|[ssv](ssv)|text/plain|a comma separated list of values
-`tsv`|[csv](csv)|text/plain|a TAB delimited `csv` (MS Excel flavor)
+| format|module|content type|description|
+|:----:|:----:|:----------:|:----------|
+| `array`|[ssv](ssv)|application/json|a JSON array|
+| `csv`|[csv](csv)|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines|
+| `csvjsonarray`|[csv](csv)|application/json|a JSON array, with each row as another array (the first row has the dimension names)|
+| `datasource`|[json](json)|application/json|a Google Visualization Provider `datasource` javascript callback|
+| `datatable`|[json](json)|application/json|a Google `datatable`|
+| `html`|[csv](csv)|text/html|an html table|
+| `json`|[json](json)|application/json|a JSON object|
+| `jsonp`|[json](json)|application/json|a JSONP javascript callback|
+| `markdown`|[csv](csv)|text/plain|a markdown table|
+| `ssv`|[ssv](ssv)|text/plain|a space separated list of values|
+| `ssvcomma`|[ssv](ssv)|text/plain|a comma separated list of values|
+| `tsv`|[csv](csv)|text/plain|a TAB delimited `csv` (MS Excel flavor)|
For examples of each format, check the relative module documentation.
@@ -59,16 +59,15 @@ This is such an object:
## Downloading data query result files
Following the [Google Visualization Provider guidelines](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source),
-netdata supports parsing `tqx` options.
+Netdata supports parsing `tqx` options.
-Using these options, any netdata data query can instruct the web browser to download
+Using these options, any Netdata data query can instruct the web browser to download
the result and save it under a given filename.
For example, to download a CSV file with CPU utilization of the last hour,
[click here](https://registry.my-netdata.io/api/v1/data?chart=system.cpu&after=-3600&format=csv&options=nonzero&tqx=outFileName:system+cpu+utilization+of+the+last_hour.csv).
-
This is done by appending `&tqx=outFileName:FILENAME` to any data query.
The output will be in the format given with `&format=`.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/formatters/csv/Makefile.in b/web/api/formatters/csv/Makefile.in
new file mode 100644
index 00000000..ebeffefa
--- /dev/null
+++ b/web/api/formatters/csv/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/formatters/csv
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/csv/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/formatters/csv/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/formatters/csv/README.md b/web/api/formatters/csv/README.md
index 995e740b..0d16fc4e 100644
--- a/web/api/formatters/csv/README.md
+++ b/web/api/formatters/csv/README.md
@@ -2,28 +2,27 @@
The CSV formatter presents [results of database queries](../../queries) in the following formats:
-format|content type|description
-:---:|:---:|:-----
-`csv`|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines
-`csvjsonarray`|application/json|a JSON array, with each row as another array (the first row has the dimension names)
-`tsv`|text/plain|like `csv` but TAB is used instead of comma to separate values (MS Excel flavor)
-`html`|text/html|an html table
-`markdown`|text/plain|markdown table
+| format|content type|description|
+| :----:|:----------:|:----------|
+| `csv`|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines|
+| `csvjsonarray`|application/json|a JSON array, with each row as another array (the first row has the dimension names)|
+| `tsv`|text/plain|like `csv` but TAB is used instead of comma to separate values (MS Excel flavor)|
+| `html`|text/html|an html table|
+| `markdown`|text/plain|markdown table|
In all formats the date and time is the first column.
The CSV formatter respects the following API `&options=`:
-option|supported|description
-:---:|:---:|:---
-`nonzero`|yes|to return only the dimensions that have at least a non-zero value
-`flip`|yes|to return the rows older to newer (the default is newer to older)
-`seconds`|yes|to return the date and time in unix timestamp
-`ms`|yes|to return the date and time in unit timestamp as milliseconds
-`percent`|yes|to replace all values with their percentage over the row total
-`abs`|yes|to turn all values positive
-`null2zero`|yes|to replace gaps with zeros (the default prints the string `null`
-
+| option|supported|description|
+|:----:|:-------:|:----------|
+| `nonzero`|yes|to return only the dimensions that have at least a non-zero value|
+| `flip`|yes|to return the rows older to newer (the default is newer to older)|
+| `seconds`|yes|to return the date and time in unix timestamp|
+| `ms`|yes|to return the date and time in unit timestamp as milliseconds|
+| `percent`|yes|to replace all values with their percentage over the row total|
+| `abs`|yes|to turn all values positive|
+| `null2zero`|yes|to replace gaps with zeros (the default prints the string `null`|
## Examples
@@ -82,7 +81,6 @@ This is how it looks when rendered by a web browser:
![image](https://user-images.githubusercontent.com/2662304/47597887-bafbf480-d99c-11e8-864a-d880bb8d2e5b.png)
-
---
Get a JSON array with the average bandwidth rate of the mysql server, over the last hour, in 6 values
@@ -101,7 +99,7 @@ Netdata always returns bandwidth rates in `kilobits/s`.
[1540597200000,0.7499968,120.2807337],
[1540596600000,0.7499988,120.2810527]
]
-```
+```
---
@@ -109,33 +107,33 @@ Get the number of processes started per minute, for the last 10 minutes, in `mar
```bash
# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.forks&format=markdown&after=-600&points=10&group=sum'
-time|started
-:---:|:---:
-2018-10-27 03:52:00|245.1706149
-2018-10-27 03:51:00|152.6654636
-2018-10-27 03:50:00|163.1755789
-2018-10-27 03:49:00|176.1574766
-2018-10-27 03:48:00|178.0137076
-2018-10-27 03:47:00|183.8306543
-2018-10-27 03:46:00|264.1635621
-2018-10-27 03:45:00|205.001551
-2018-10-27 03:44:00|7026.9852167
-2018-10-27 03:43:00|205.9904794
+time | started
+:---: |:---:
+2018-10-27 03:52:00| 245.1706149
+2018-10-27 03:51:00| 152.6654636
+2018-10-27 03:50:00| 163.1755789
+2018-10-27 03:49:00| 176.1574766
+2018-10-27 03:48:00| 178.0137076
+2018-10-27 03:47:00| 183.8306543
+2018-10-27 03:46:00| 264.1635621
+2018-10-27 03:45:00| 205.001551
+2018-10-27 03:44:00| 7026.9852167
+2018-10-27 03:43:00| 205.9904794
```
And this is how it looks when formatted:
-time|started
-:---:|:---:
-2018-10-27 03:52:00|245.1706149
-2018-10-27 03:51:00|152.6654636
-2018-10-27 03:50:00|163.1755789
-2018-10-27 03:49:00|176.1574766
-2018-10-27 03:48:00|178.0137076
-2018-10-27 03:47:00|183.8306543
-2018-10-27 03:46:00|264.1635621
-2018-10-27 03:45:00|205.001551
-2018-10-27 03:44:00|7026.9852167
-2018-10-27 03:43:00|205.9904794
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fcsv%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+| time | started |
+|:--:|:-----:|
+| 2018-10-27 03:52:00 | 245.1706149 |
+| 2018-10-27 03:51:00 | 152.6654636 |
+| 2018-10-27 03:50:00 | 163.1755789 |
+| 2018-10-27 03:49:00 | 176.1574766 |
+| 2018-10-27 03:48:00 | 178.0137076 |
+| 2018-10-27 03:47:00 | 183.8306543 |
+| 2018-10-27 03:46:00 | 264.1635621 |
+| 2018-10-27 03:45:00 | 205.001551 |
+| 2018-10-27 03:44:00 | 7026.9852167 |
+| 2018-10-27 03:43:00 | 205.9904794 |
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fcsv%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/formatters/json/Makefile.in b/web/api/formatters/json/Makefile.in
new file mode 100644
index 00000000..105c6677
--- /dev/null
+++ b/web/api/formatters/json/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/formatters/json
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/json/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/formatters/json/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/formatters/json/README.md b/web/api/formatters/json/README.md
index 033bf8eb..8a0e37bf 100644
--- a/web/api/formatters/json/README.md
+++ b/web/api/formatters/json/README.md
@@ -2,26 +2,26 @@
The CSV formatter presents [results of database queries](../../queries) in the following formats:
-format|content type|description
-:---:|:---:|:-----
-`json`|application/json|return the query result as a json object
-`jsonp`|application/json|return the query result as a JSONP javascript callback
-`datatable`|application/json|return the query result as a Google `datatable`
-`datasource`|application/json|return the query result as a Google Visualization Provider `datasource` javascript callback
+| format | content type | description|
+|:----:|:----------:|:----------|
+| `json` | application/json | return the query result as a json object|
+| `jsonp` | application/json | return the query result as a JSONP javascript callback|
+| `datatable` | application/json | return the query result as a Google `datatable`|
+| `datasource` | application/json | return the query result as a Google Visualization Provider `datasource` javascript callback|
The CSV formatter respects the following API `&options=`:
-option|supported|description
-:---:|:---:|:---
-`google_json`|yes|enable the Google flavor of JSON (using double quotes for strings and `Date()` function for dates
-`objectrows`|yes|return each row as an object, instead of an array
-`nonzero`|yes|to return only the dimensions that have at least a non-zero value
-`flip`|yes|to return the rows older to newer (the default is newer to older)
-`seconds`|yes|to return the date and time in unix timestamp
-`ms`|yes|to return the date and time in unit timestamp as milliseconds
-`percent`|yes|to replace all values with their percentage over the row total
-`abs`|yes|to turn all values positive
-`null2zero`|yes|to replace gaps with zeros (the default prints the string `null`
+| option | supported | description|
+|:----:|:-------:|:----------|
+| `google_json` | yes | enable the Google flavor of JSON (using double quotes for strings and `Date()` function for dates|
+| `objectrows` | yes | return each row as an object, instead of an array|
+| `nonzero` | yes | to return only the dimensions that have at least a non-zero value|
+| `flip` | yes | to return the rows older to newer (the default is newer to older)|
+| `seconds` | yes | to return the date and time in unix timestamp|
+| `ms` | yes | to return the date and time in unit timestamp as milliseconds|
+| `percent` | yes | to replace all values with their percentage over the row total|
+| `abs` | yes | to turn all values positive|
+| `null2zero` | yes | to replace gaps with zeros (the default prints the string `null`|
## Examples
@@ -148,5 +148,4 @@ google.visualization.Query.setResponse({version:'0.6',reqId:'0',status:'ok',sig:
}});
```
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fjson%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fjson%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/formatters/json_wrapper.c b/web/api/formatters/json_wrapper.c
index 4911d2f0..5ef2caf1 100644
--- a/web/api/formatters/json_wrapper.c
+++ b/web/api/formatters/json_wrapper.c
@@ -182,11 +182,14 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
rrdr_buffer_print_format(wb, format);
- buffer_sprintf(wb, "%s,\n"
- " %sresult%s: "
- , sq
- , kq, kq
- );
+ if((options & RRDR_OPTION_CUSTOM_VARS) && (options & RRDR_OPTION_JSON_WRAP)) {
+ buffer_sprintf(wb, "%s,\n %schart_variables%s: ", sq, kq, kq);
+ health_api_v1_chart_custom_variables2json(r->st, wb);
+ }
+ else
+ buffer_sprintf(wb, "%s", sq);
+
+ buffer_sprintf(wb, ",\n %sresult%s: ", kq, kq);
if(string_value) buffer_strcat(wb, sq);
//info("JSONWRAPPER(): %s: END", r->st->id);
diff --git a/web/api/formatters/rrdset2json.c b/web/api/formatters/rrdset2json.c
index 2bbd2a70..4d913992 100644
--- a/web/api/formatters/rrdset2json.c
+++ b/web/api/formatters/rrdset2json.c
@@ -73,7 +73,10 @@ void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memor
if(dimensions_count) *dimensions_count += dimensions;
if(memory_used) *memory_used += memory;
- buffer_strcat(wb, "\n\t\t\t},\n\t\t\t\"green\": ");
+ buffer_sprintf(wb, "\n\t\t\t},\n\t\t\t\"chart_variables\": ");
+ health_api_v1_chart_custom_variables2json(st, wb);
+
+ buffer_strcat(wb, ",\n\t\t\t\"green\": ");
buffer_rrd_value(wb, st->green);
buffer_strcat(wb, ",\n\t\t\t\"red\": ");
buffer_rrd_value(wb, st->red);
diff --git a/web/api/formatters/ssv/Makefile.in b/web/api/formatters/ssv/Makefile.in
new file mode 100644
index 00000000..bbda59cf
--- /dev/null
+++ b/web/api/formatters/ssv/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/formatters/ssv
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/ssv/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/formatters/ssv/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/formatters/ssv/README.md b/web/api/formatters/ssv/README.md
index a2892999..ec85fe46 100644
--- a/web/api/formatters/ssv/README.md
+++ b/web/api/formatters/ssv/README.md
@@ -5,21 +5,21 @@ to a single value and returns a list of such values showing how it changes throu
It supports the following formats:
-format|content type|description
-:---:|:---:|:-----
-`ssv`|text/plain|a space separated list of values
-`ssvcomma`|text/plain|a comma separated list of values
-`array`|application/json|a JSON array
+| format | content type | description |
+|:----:|:----------:|:----------|
+| `ssv` | text/plain | a space separated list of values |
+| `ssvcomma` | text/plain | a comma separated list of values |
+| `array` | application/json | a JSON array |
The CSV formatter respects the following API `&options=`:
-option|supported|description
-:---:|:---:|:---
-`nonzero`|yes|to return only the dimensions that have at least a non-zero value
-`flip`|yes|to return the numbers older to newer (the default is newer to older)
-`percent`|yes|to replace all values with their percentage over the row total
-`abs`|yes|to turn all values positive, before using them
-`min2max`|yes|to return the delta from the minimum value to the maximum value (across dimensions)
+| option | supported | description |
+| :----:|:-------:|:----------|
+| `nonzero` | yes | to return only the dimensions that have at least a non-zero value |
+| `flip` | yes | to return the numbers older to newer (the default is newer to older) |
+| `percent` | yes | to replace all values with their percentage over the row total |
+| `abs` | yes | to turn all values positive, before using them |
+| `min2max` | yes | to return the delta from the minimum value to the maximum value (across dimensions) |
## Examples
@@ -51,4 +51,4 @@ in a JSON array:
[278,258,268,239,259,260,243,266,278,318,264,258]
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fssv%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fssv%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/formatters/value/Makefile.in b/web/api/formatters/value/Makefile.in
new file mode 100644
index 00000000..531c3e33
--- /dev/null
+++ b/web/api/formatters/value/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/formatters/value
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/value/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/formatters/value/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/formatters/value/README.md b/web/api/formatters/value/README.md
index 50974de6..04a0f886 100644
--- a/web/api/formatters/value/README.md
+++ b/web/api/formatters/value/README.md
@@ -6,14 +6,14 @@ To calculate the single value to be returned, it sums the values of all dimensio
The Value formatter respects the following API `&options=`:
-option|supported|description
-:---:|:---:|:---
-`percent`|yes|to replace all values with their percentage over the row total
-`abs`|yes|to turn all values positive, before using them
-`min2max`|yes|to return the delta from the minimum value to the maximum value (across dimensions)
+| option | supported | description |
+|:----: |:-------: |:---------- |
+| `percent` | yes | to replace all values with their percentage over the row total|
+| `abs` | yes | to turn all values positive, before using them |
+| `min2max` | yes | to return the delta from the minimum value to the maximum value (across dimensions)|
The Value formatter is not exposed by the API by itself.
Instead it is used by the [`ssv`](../ssv) formatter
and [health monitoring queries](../../../../health).
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fvalue%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fvalue%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/health/Makefile.in b/web/api/health/Makefile.in
new file mode 100644
index 00000000..bf38c93e
--- /dev/null
+++ b/web/api/health/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/health
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/health/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/health/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/health/README.md b/web/api/health/README.md
index 0b4f79f3..a86dbfdd 100644
--- a/web/api/health/README.md
+++ b/web/api/health/README.md
@@ -45,12 +45,13 @@ The following will return an SVG badge of the alarm named `NAME`, attached to th
## Health Management API
Netdata v1.12 and beyond provides a command API to control health checks and notifications at runtime. The feature is especially useful for maintenance periods, during which you receive meaningless alarms.
-From Netdata v1.16.0 and beyond, the configuration controlled via the API commands is [persisted across netdata restarts](#persistence).
+From Netdata v1.16.0 and beyond, the configuration controlled via the API commands is [persisted across Netdata restarts](#persistence).
Specifically, the API allows you to:
- - Disable health checks completely. Alarm conditions will not be evaluated at all and no entries will be added to the alarm log.
- - Silence alarm notifications. Alarm conditions will be evaluated, the alarms will appear in the log and the netdata UI will show the alarms as active, but no notifications will be sent.
- - Disable or Silence specific alarms that match selectors on alarm/template name, chart, context, host and family.
+
+- Disable health checks completely. Alarm conditions will not be evaluated at all and no entries will be added to the alarm log.
+- Silence alarm notifications. Alarm conditions will be evaluated, the alarms will appear in the log and the Netdata UI will show the alarms as active, but no notifications will be sent.
+- Disable or Silence specific alarms that match selectors on alarm/template name, chart, context, host and family.
The API is available by default, but it is protected by an `api authorization token` that is stored in the file you will see in the following entry of `http://localhost:19999/netdata.conf`:
@@ -65,31 +66,31 @@ You can access the API via GET requests, by adding the bearer token to an `Autho
curl "http://myserver/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken"
```
-By default access to the health management API is only allowed from `localhost`. Accessing the API from anything else will return a 403 error with the message `You are not allowed to access this resource.`. You can change permissions by editing the `allow management from` variable in netdata.conf within the [web] section. See [web server access lists](../../server/#access-lists) for more information.
+By default access to the health management API is only allowed from `localhost`. Accessing the API from anything else will return a 403 error with the message `You are not allowed to access this resource.`. You can change permissions by editing the `allow management from` variable in `netdata.conf` within the [web] section. See [web server access lists](../../server/#access-lists) for more information.
-The command `RESET` just returns netdata to the default operation, with all health checks and notifications enabled.
+The command `RESET` just returns Netdata to the default operation, with all health checks and notifications enabled.
If you've configured and entered your token correclty, you should see the plain text response `All health checks and notifications are enabled`.
### Disable or silence all alarms
If all you need is temporarily disable all health checks, then you issue the following before your maintenance period starts:
-```
+```sh
curl "http://myserver/api/v1/manage/health?cmd=DISABLE ALL" -H "X-Auth-Token: Mytoken"
```
The effect of disabling health checks is that the alarm criteria are not evaluated at all and nothing is written in the alarm log.
If you want the health checks to be running but to not receive any notifications during your maintenance period, you can instead use this:
-```
+```sh
curl "http://myserver/api/v1/manage/health?cmd=SILENCE ALL" -H "X-Auth-Token: Mytoken"
```
-Alarms may then still be raised and logged in netdata, so you'll be able to see them via the UI.
+Alarms may then still be raised and logged in Netdata, so you'll be able to see them via the UI.
Regardless of the option you choose, at the end of your maintenance period you revert to the normal state via the RESET command.
-```
+```sh
curl "http://myserver/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken"
```
@@ -97,8 +98,9 @@ Regardless of the option you choose, at the end of your maintenance period you r
If you do not wish to disable/silence all alarms, then the `DISABLE ALL` and `SILENCE ALL` commands can't be used.
Instead, the following commands expect that one or more alarm selectors will be added, so that only alarms that match the selectors are disabled or silenced.
-- `DISABLE` : Set the mode to disable health checks.
-- `SILENCE` : Set the mode to silence notifications.
+
+- `DISABLE` : Set the mode to disable health checks.
+- `SILENCE` : Set the mode to silence notifications.
You will normally put one of these commands in the same request with your first alarm selector, but it's possible to issue them separately as well.
You will get a warning in the response, if a selector was added without a SILENCE/DISABLE command, or vice versa.
@@ -118,14 +120,15 @@ curl "http://myserver/api/v1/manage/health?cmd=SILENCE&context=load" -H "X-Auth-
#### Selection criteria
-The `selection criteria` are key/value pairs, in the format `key : value`, where value is a netdata [simple pattern](../../../libnetdata/simple_pattern/). This means that you can create very powerful selectors (you will rarely need more than one or two).
+The `selection criteria` are key/value pairs, in the format `key : value`, where value is a Netdata [simple pattern](../../../libnetdata/simple_pattern/). This means that you can create very powerful selectors (you will rarely need more than one or two).
The accepted keys for the `selection criteria` are the following:
-- `alarm` : The expression provided will match both `alarm` and `template` names.
-- `chart` : Chart ids/names, as shown on the dashboard. These will match the `on` entry of a configured `alarm`.
-- `context` : Chart context, as shown on the dashboard. These will match the `on` entry of a configured `template`.
-- `hosts` : The hostnames that will need to match.
-- `families` : The alarm families.
+
+- `alarm` : The expression provided will match both `alarm` and `template` names.
+- `chart` : Chart ids/names, as shown on the dashboard. These will match the `on` entry of a configured `alarm`.
+- `context` : Chart context, as shown on the dashboard. These will match the `on` entry of a configured `template`.
+- `hosts` : The hostnames that will need to match.
+- `families` : The alarm families.
You can add any of the selection criteria you need on the request, to ensure that only the alarms you are interested in are matched and disabled/silenced. e.g. there is no reason to add `hosts: *`, if you want the criteria to be applied to alarms for all hosts.
@@ -149,7 +152,7 @@ http://localhost/api/v1/manage/health?families=cpu1 cpu2
### List silencers
-The command `LIST` was added in netdata v1.16.0 and returns a JSON with the current status of the silencers.
+The command `LIST` was added in Netdata v1.16.0 and returns a JSON with the current status of the silencers.
```
curl "http://myserver/api/v1/manage/health?cmd=LIST" -H "X-Auth-Token: Mytoken"
@@ -187,20 +190,20 @@ json
### Responses
-- "Auth Error" : Token authentication failed
-- "All alarm notifications are silenced" : Successful response to cmd=SILENCE ALL
-- "All health checks are disabled" : Successful response to cmd=DISABLE ALL
-- "All health checks and notifications are enabled" : Successful response to cmd=RESET
-- "Health checks disabled for alarms matching the selectors" : Added to the response for a cmd=DISABLE
-- "Alarm notifications silenced for alarms matching the selectors" : Added to the response for a cmd=SILENCE
-- "Alarm selector added" : Added to the response when a new selector is added
-- "Invalid key. Ignoring it." : Wrong name of a parameter. Added to the response and ignored.
-- "WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command." : Added to the response if a selector is added without a selector-specific command.
-- "WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors." : Added to the response if a selector-specific command is issued without a selector.
+- "Auth Error" : Token authentication failed
+- "All alarm notifications are silenced" : Successful response to cmd=SILENCE ALL
+- "All health checks are disabled" : Successful response to cmd=DISABLE ALL
+- "All health checks and notifications are enabled" : Successful response to cmd=RESET
+- "Health checks disabled for alarms matching the selectors" : Added to the response for a cmd=DISABLE
+- "Alarm notifications silenced for alarms matching the selectors" : Added to the response for a cmd=SILENCE
+- "Alarm selector added" : Added to the response when a new selector is added
+- "Invalid key. Ignoring it." : Wrong name of a parameter. Added to the response and ignored.
+- "WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command." : Added to the response if a selector is added without a selector-specific command.
+- "WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors." : Added to the response if a selector-specific command is issued without a selector.
### Persistence
-From netdata v1.16.0 and beyond, the silencers configuration is persisted to disk and loaded when netdata starts.
+From Netdata v1.16.0 and beyond, the silencers configuration is persisted to disk and loaded when Netdata starts.
The JSON string returned by the [LIST command](#list-silencers) is automatically saved to the `silencers file`, every time a command alters the silencers configuration.
The file's location is configurable in `netdata.conf`. The default is shown below:
@@ -213,5 +216,4 @@ The file's location is configurable in `netdata.conf`. The default is shown belo
The test script under [tests/health_mgmtapi](../../../tests/health_mgmtapi) contains a series of tests that you can either run or read through to understand the various calls and responses better.
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fhealth%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fhealth%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/netdata-swagger.json b/web/api/netdata-swagger.json
index 63bc5638..1a0ec863 100644
--- a/web/api/netdata-swagger.json
+++ b/web/api/netdata-swagger.json
@@ -243,7 +243,8 @@
"percentage",
"unaligned",
"match-ids",
- "match-names"
+ "match-names",
+ "showcustomvars"
],
"collectionFormat": "pipes"
},
@@ -656,6 +657,61 @@
}
}
},
+ "/alarm_count": {
+ "get": {
+ "summary": "Get an overall status of the chart",
+ "description": "Checks multiple charts with the same context and counts number of alarms with given status.",
+ "parameters": [
+ {
+ "in": "query",
+ "name": "context",
+ "description": "Specify context which should be checked",
+ "required": false,
+ "allowEmptyValue": true,
+ "type": "array",
+ "items": {
+ "type": "string",
+ "collectionFormat": "pipes"
+ },
+ "default": [
+ "system.cpu"
+ ]
+ },
+ {
+ "in": "query",
+ "name": "status",
+ "description": "Specify alarm status to count",
+ "required": false,
+ "allowEmptyValue": true,
+ "type": "string",
+ "enum": [
+ "REMOVED",
+ "UNDEFINED",
+ "UNINITIALIZED",
+ "CLEAR",
+ "RAISED",
+ "WARNING",
+ "CRITICAL"
+ ],
+ "default": "RAISED"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "An object containing a count of alarms with given status for given contexts",
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "number"
+ }
+ }
+ },
+ "500": {
+ "description": "Internal server error. This usually means the server is out of memory."
+ }
+ }
+ }
+ },
"/manage/health": {
"get": {
"summary": "Accesses the health management API to control health checks and notifications at runtime.",
@@ -975,6 +1031,14 @@
}
}
},
+ "chart_variables": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "$ref": "#/definitions/chart_variables"
+ }
+ }
+ },
"green": {
"type": "number",
"description": "Chart health green threshold"
@@ -1011,13 +1075,8 @@
"chart_variables": {
"type": "object",
"properties": {
- "varname1": {
- "type": "number",
- "format": "float"
- },
- "varname2": {
- "type": "number",
- "format": "float"
+ "key": {
+ "$ref": "#/definitions/chart_variables"
}
}
},
@@ -1049,6 +1108,19 @@
}
}
},
+ "chart_variables": {
+ "type": "object",
+ "properties": {
+ "varname1": {
+ "type": "number",
+ "format": "float"
+ },
+ "varname2": {
+ "type": "number",
+ "format": "float"
+ }
+ }
+ },
"dimension": {
"type": "object",
"properties": {
@@ -1145,6 +1217,14 @@
"type": "string",
"description": "The format of the result returned."
},
+ "chart_variables": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "$ref": "#/definitions/chart_variables"
+ }
+ }
+ },
"result": {
"description": "The result requested, in the format requested."
}
diff --git a/web/api/netdata-swagger.yaml b/web/api/netdata-swagger.yaml
index 3386e01a..50e66140 100644
--- a/web/api/netdata-swagger.yaml
+++ b/web/api/netdata-swagger.yaml
@@ -163,7 +163,7 @@ paths:
type: array
items:
type: string
- enum: [ 'nonzero', 'flip', 'jsonwrap', 'min2max', 'seconds', 'milliseconds', 'abs', 'absolute', 'absolute-sum', 'null2zero', 'objectrows', 'google_json', 'percentage', 'unaligned', 'match-ids', 'match-names' ]
+ enum: [ 'nonzero', 'flip', 'jsonwrap', 'min2max', 'seconds', 'milliseconds', 'abs', 'absolute', 'absolute-sum', 'null2zero', 'objectrows', 'google_json', 'percentage', 'unaligned', 'match-ids', 'match-names', 'showcustomvars' ]
collectionFormat: pipes
default: [seconds, jsonwrap]
allowEmptyValue: false
@@ -430,6 +430,38 @@ paths:
type: array
items:
$ref: '#/definitions/alarm_log_entry'
+ /alarm_count:
+ get:
+ summary: 'Get an overall status of the chart'
+ description: "Checks multiple charts with the same context and counts number of alarms with given status."
+ parameters:
+ - in: query
+ name: context
+ description: "Specify context which should be checked"
+ required: false
+ allowEmptyValue: true
+ type: array
+ items:
+ type: string
+ collectionFormat: pipes
+ default: ['system.cpu']
+ - in: query
+ name: status
+ description: "Specify alarm status to count"
+ required: false
+ allowEmptyValue: true
+ type: string
+ enum: ['REMOVED', 'UNDEFINED', 'UNINITIALIZED', 'CLEAR', 'RAISED', 'WARNING', 'CRITICAL']
+ default: 'RAISED'
+ responses:
+ '200':
+ description: 'An object containing a count of alarms with given status for given contexts'
+ schema:
+ type: array
+ items:
+ type: number
+ '500':
+ description: 'Internal server error. This usually means the server is out of memory.'
/manage/health:
get:
summary: 'Accesses the health management API to control health checks and notifications at runtime.'
@@ -654,6 +686,11 @@ definitions:
properties:
key:
$ref: '#/definitions/dimension'
+ chart_variables:
+ type: object
+ properties:
+ key:
+ $ref: '#/definitions/chart_variables'
green:
type: number
description: 'Chart health green threshold'
@@ -681,12 +718,8 @@ definitions:
chart_variables:
type: object
properties:
- varname1:
- type: number
- format: float
- varname2:
- type: number
- format: float
+ key:
+ $ref: '#/definitions/chart_variables'
family_variables:
type: object
properties:
@@ -705,6 +738,15 @@ definitions:
varname2:
type: number
format: float
+ chart_variables:
+ type: object
+ properties:
+ varname1:
+ type: number
+ format: float
+ varname2:
+ type: number
+ format: float
dimension:
type: object
properties:
@@ -776,6 +818,11 @@ definitions:
format:
type: string
description: 'The format of the result returned.'
+ chart_variables:
+ type: object
+ properties:
+ key:
+ $ref: '#/definitions/chart_variables'
result:
description: 'The result requested, in the format requested.'
alarms:
diff --git a/web/api/queries/Makefile.in b/web/api/queries/Makefile.in
new file mode 100644
index 00000000..dca8341d
--- /dev/null
+++ b/web/api/queries/Makefile.in
@@ -0,0 +1,706 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/queries
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ average \
+ des \
+ incremental_sum \
+ max \
+ min \
+ sum \
+ median \
+ ses \
+ stddev \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/queries/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/queries/README.md b/web/api/queries/README.md
index 6a55398a..458179c7 100644
--- a/web/api/queries/README.md
+++ b/web/api/queries/README.md
@@ -4,16 +4,16 @@ Netdata database can be queried with `/api/v1/data` and `/api/v1/badge.svg` REST
Every data query accepts the following parameters:
-name|required|description
-:----:|:----:|:---
-`chart`|yes|The chart to be queried.
-`points`|no|The number of points to be returned. Netdata can reduce number of points by applying query grouping methods. If not given, the result will have the same granularity as the database (although this relates to `gtime`).
-`before`|no|The absolute timestamp or the relative (to now) time the query should finish evaluating data. If not given, it defaults to the timestamp of the latest point in the database.
-`after`|no|The absolute timestamp or the relative (to `before`) time the query should start evaluating data. if not given, it defaults to the timestamp of the oldest point in the database.
-`group`|no|The grouping method to use when reducing the points the database has. If not given, it defaults to `average`.
-`gtime`|no|A resampling period to change the units of the metrics (i.e. setting this to `60` will convert `per second` metrics to `per minute`. If not given it defaults to granularity of the database.
-`options`|no|A bitmap of options that can affect the operation of the query. Only 2 options are used by the query engine: `unaligned` and `percentage`. All the other options are used by the output formatters. The default is to return aligned data.
-`dimensions`|no|A simple pattern to filter the dimensions to be queried. The default is to return all the dimensions of the chart.
+|name|required|description|
+|:--:|:------:|:----------|
+|`chart`|yes|The chart to be queried.|
+|`points`|no|The number of points to be returned. Netdata can reduce number of points by applying query grouping methods. If not given, the result will have the same granularity as the database (although this relates to `gtime`).|
+|`before`|no|The absolute timestamp or the relative (to now) time the query should finish evaluating data. If not given, it defaults to the timestamp of the latest point in the database.|
+|`after`|no|The absolute timestamp or the relative (to `before`) time the query should start evaluating data. if not given, it defaults to the timestamp of the oldest point in the database.|
+|`group`|no|The grouping method to use when reducing the points the database has. If not given, it defaults to `average`.|
+|`gtime`|no|A resampling period to change the units of the metrics (i.e. setting this to `60` will convert `per second` metrics to `per minute`. If not given it defaults to granularity of the database.|
+|`options`|no|A bitmap of options that can affect the operation of the query. Only 2 options are used by the query engine: `unaligned` and `percentage`. All the other options are used by the output formatters. The default is to return aligned data.|
+|`dimensions`|no|A simple pattern to filter the dimensions to be queried. The default is to return all the dimensions of the chart.|
## Operation
@@ -23,22 +23,22 @@ The query engine works as follows (in this order):
`after` and `before` define a time-frame, accepting:
-- **absolute timestamps** (unix timestamps, i.e. seconds since epoch).
+- **absolute timestamps** (unix timestamps, i.e. seconds since epoch).
-- **relative timestamps**:
+- **relative timestamps**:
- `before` is relative to now and `after` is relative to `before`.
-
- Example: `before=-60&after=-60` evaluates to the time-frame from -120 up to -60 seconds in
- the past, relative to the latest entry of the database of the chart.
+ `before` is relative to now and `after` is relative to `before`.
+
+ Example: `before=-60&after=-60` evaluates to the time-frame from -120 up to -60 seconds in
+ the past, relative to the latest entry of the database of the chart.
The engine verifies that the time-frame requested is available at the database:
-- If the requested time-frame overlaps with the database, the excess requested
- will be truncated.
-
-- If the requested time-frame does not overlap with the database, the engine will
- return an empty data set.
+- If the requested time-frame overlaps with the database, the excess requested
+ will be truncated.
+
+- If the requested time-frame does not overlap with the database, the engine will
+ return an empty data set.
At the end of this operation, `after` and `before` are absolute timestamps.
@@ -49,27 +49,26 @@ expressed with fewer points, compared to what is available at the database.
There are 2 uses that enable this feature:
-- The caller requests a specific number of `points` to be returned.
-
- For example, for a time-frame of 10 minutes, the database has 600 points (1/sec),
- while the caller requested these 10 minutes to be expressed in 200 points.
-
- This feature is used by netdata dashboards when you zoom-out the charts.
- The dashboard is requesting the number of points the user's screen has.
- This saves bandwidth and speeds up the browser (fewer points to evaluate for drawing the charts).
-
-- The caller requests a **re-sampling** of the database, by setting `gtime` to any value
- above the granularity of the chart.
-
- For example, the chart's units is `requests/sec` and caller wants `requests/min`.
-
+- The caller requests a specific number of `points` to be returned.
+
+ For example, for a time-frame of 10 minutes, the database has 600 points (1/sec),
+ while the caller requested these 10 minutes to be expressed in 200 points.
+
+ This feature is used by Netdata dashboards when you zoom-out the charts.
+ The dashboard is requesting the number of points the user's screen has.
+ This saves bandwidth and speeds up the browser (fewer points to evaluate for drawing the charts).
+- The caller requests a **re-sampling** of the database, by setting `gtime` to any value
+ above the granularity of the chart.
+
+ For example, the chart's units is `requests/sec` and caller wants `requests/min`.
+
Using `points` and `gtime` the query engine tries to find a best fit for **database-points**
vs **result-points** (we call this ratio `group points`). It always tries to keep `group points`
an integer. Keep in mind the query engine may shift `after` if required. See also the [example](#example).
#### Time-frame Alignment
-Alignment is a very important aspect of netdata queries. Without it, the animated
+Alignment is a very important aspect of Netdata queries. Without it, the animated
charts on the dashboards would constantly [change shape](#example) during incremental updates.
To provide consistent grouping through time, the query engine (by default) aligns
@@ -79,7 +78,7 @@ For example, if `group points` is 60 and alignment is enabled, the engine will r
each point with durations XX:XX:00 - XX:XX:59, matching whole minutes.
To disable alignment, pass `&options=unaligned` to the query.
-
+
#### Query Execution
To execute the query, the engine evaluates all dimensions of the chart, one after another.
@@ -100,18 +99,18 @@ For each value it calls the **grouping method** given with the `&group=` query p
The following grouping methods are supported. These are given all the values in the time-frame
and they group the values every `group points`.
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min&value_color=blue) finds the minimum value
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max&value_color=lightblue) finds the maximum value
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow) finds the average value
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=sum&after=-60&label=sum&units=requests&value_color=orange) adds all the values and returns the sum
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=median&after=-60&label=median&value_color=red) sorts the values and returns the value in the middle of the list
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=stddev&after=-60&label=stddev&value_color=green) finds the standard deviation of the values
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=cv&after=-60&label=cv&units=pcent&value_color=yellow) finds the relative standard deviation (coefficient of variation) of the values
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=ses&value_color=brown) finds the exponential weighted moving average of the values
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=des&after=-60&label=des&value_color=blue) applies Holt-Winters double exponential smoothing
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=incremental_sum&after=-60&label=incremental_sum&value_color=red) finds the difference of the last vs the first value
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min&value_color=blue) finds the minimum value
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max&value_color=lightblue) finds the maximum value
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow) finds the average value
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=sum&after=-60&label=sum&units=requests&value_color=orange) adds all the values and returns the sum
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=median&after=-60&label=median&value_color=red) sorts the values and returns the value in the middle of the list
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=stddev&after=-60&label=stddev&value_color=green) finds the standard deviation of the values
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=cv&after=-60&label=cv&units=pcent&value_color=yellow) finds the relative standard deviation (coefficient of variation) of the values
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=ses&value_color=brown) finds the exponential weighted moving average of the values
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=des&after=-60&label=des&value_color=blue) applies Holt-Winters double exponential smoothing
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=incremental_sum&after=-60&label=incremental_sum&value_color=red) finds the difference of the last vs the first value
-The examples shown above, are live information from the `successful` web requests of the global netdata registry.
+The examples shown above, are live information from the `successful` web requests of the global Netdata registry.
## Further processing
@@ -129,44 +128,44 @@ the result.
## Example
-When netdata is reducing metrics, it tries to return always the same boundaries. So, if we want 10s averages, it will always return points starting at a `unix timestamp % 10 = 0`.
+When Netdata is reducing metrics, it tries to return always the same boundaries. So, if we want 10s averages, it will always return points starting at a `unix timestamp % 10 = 0`.
Let's see why this is needed, by looking at the error case.
Assume we have 5 points:
-| time | value |
-| :-: | :-: |
-| 00:01 | 1 |
-| 00:02 | 2 |
-| 00:03 | 3 |
-| 00:04 | 4 |
-| 00:05 | 5 |
+|time|value|
+|:--:|:---:|
+|00:01|1|
+|00:02|2|
+|00:03|3|
+|00:04|4|
+|00:05|5|
-At 00:04 you ask for 2 points for 4 seconds in the past. So `group = 2`. netdata would return:
+At 00:04 you ask for 2 points for 4 seconds in the past. So `group = 2`. Netdata would return:
-| point | time | value |
-| :-: | :-: | :-: |
-| 1 | 00:01 - 00:02 | 1.5 |
-| 2 | 00:03 - 00:04 | 3.5 |
+|point|time|value|
+|:---:|:--:|:---:|
+|1|00:01 - 00:02|1.5|
+|2|00:03 - 00:04|3.5|
A second later the chart is to be refreshed, and makes again the same request at 00:05. These are the points that would have been returned:
-| point | time | value |
-| :-: | :-: | :-: |
-| 1 | 00:02 - 00:03 | 2.5 |
-| 2 | 00:04 - 00:05 | 4.5 |
+|point|time|value|
+|:---:|:--:|:---:|
+|1|00:02 - 00:03|2.5|
+|2|00:04 - 00:05|4.5|
**Wait a moment!** The chart was shifted just one point and it changed value! Point 2 was 3.5 and when shifted to point 1 is 2.5! If you see this in a chart, it's a mess. The charts change shape constantly.
-For this reason, netdata always aligns the data it returns to the `group`.
+For this reason, Netdata always aligns the data it returns to the `group`.
-When you request `points=1`, netdata understands that you need 1 point for the whole database, so `group = 3600`. Then it tries to find the starting point which would be `timestamp % 3600 = 0` Within a database of 3600 seconds, there is one such point for sure. Then it tries to find the average of 3600 points. But, most probably it will not find 3600 of them (for just 1 out of 3600 seconds this query will return something).
+When you request `points=1`, Netdata understands that you need 1 point for the whole database, so `group = 3600`. Then it tries to find the starting point which would be `timestamp % 3600 = 0` Within a database of 3600 seconds, there is one such point for sure. Then it tries to find the average of 3600 points. But, most probably it will not find 3600 of them (for just 1 out of 3600 seconds this query will return something).
So, the proper way to query the database is to also set at least `after`. The following call will returns 1 point for the last complete 10-second duration (it starts at `timestamp % 10 = 0`):
-http://netdata.firehol.org/api/v1/data?chart=system.cpu&points=1&after=-10&options=seconds
+<http://netdata.firehol.org/api/v1/data?chart=system.cpu&points=1&after=-10&options=seconds>
When you keep calling this URL, you will see that it returns one new value every 10 seconds, and the timestamp always ends with zero. Similarly, if you say `points=1&after=-5` it will always return timestamps ending with 0 or 5.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/average/Makefile.in b/web/api/queries/average/Makefile.in
new file mode 100644
index 00000000..ac0d61ec
--- /dev/null
+++ b/web/api/queries/average/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/queries/average
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/average/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/queries/average/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/queries/average/README.md b/web/api/queries/average/README.md
index 873b60a5..6b2c0dfa 100644
--- a/web/api/queries/average/README.md
+++ b/web/api/queries/average/README.md
@@ -30,12 +30,12 @@ It can also be used in APIs and badges as `&group=average` in the URL.
Examining last 1 minute `successful` web server responses:
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
## References
-- [https://en.wikipedia.org/wiki/Average](https://en.wikipedia.org/wiki/Average).
+- <https://en.wikipedia.org/wiki/Average>.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Faverage%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Faverage%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/average/average.c b/web/api/queries/average/average.c
index c871b877..2c64358e 100644
--- a/web/api/queries/average/average.c
+++ b/web/api/queries/average/average.c
@@ -46,9 +46,12 @@ calculated_number grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
}
else {
- if(unlikely(r->internal.resampling_group != 1))
- value = g->sum / r->internal.resampling_divisor;
- else
+ if(unlikely(r->internal.resampling_group != 1)) {
+ if (unlikely(r->result_options & RRDR_RESULT_OPTION_VARIABLE_STEP))
+ value = g->sum / g->count / r->internal.resampling_divisor;
+ else
+ value = g->sum / r->internal.resampling_divisor;
+ } else
value = g->sum / g->count;
}
diff --git a/web/api/queries/des/Makefile.in b/web/api/queries/des/Makefile.in
new file mode 100644
index 00000000..1d4e47f7
--- /dev/null
+++ b/web/api/queries/des/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/queries/des
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/des/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/queries/des/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/queries/des/README.md b/web/api/queries/des/README.md
index 486221cb..57ac805d 100644
--- a/web/api/queries/des/README.md
+++ b/web/api/queries/des/README.md
@@ -55,14 +55,14 @@ It can also be used in APIs and badges as `&group=des` in the URL.
Examining last 1 minute `successful` web server responses:
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=single+exponential+smoothing&value_color=yellow)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=des&after=-60&label=double+exponential+smoothing&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=single+exponential+smoothing&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=des&after=-60&label=double+exponential+smoothing&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
## References
-- [https://en.wikipedia.org/wiki/Exponential_smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing).
+- <https://en.wikipedia.org/wiki/Exponential_smoothing>.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fdes%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fdes%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/incremental_sum/Makefile.in b/web/api/queries/incremental_sum/Makefile.in
new file mode 100644
index 00000000..e76a9386
--- /dev/null
+++ b/web/api/queries/incremental_sum/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/queries/incremental_sum
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/incremental_sum/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/queries/incremental_sum/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/queries/incremental_sum/README.md b/web/api/queries/incremental_sum/README.md
index 47f833f3..10878890 100644
--- a/web/api/queries/incremental_sum/README.md
+++ b/web/api/queries/incremental_sum/README.md
@@ -24,13 +24,13 @@ It can also be used in APIs and badges as `&group=incremental_sum` in the URL.
Examining last 1 minute `successful` web server responses:
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=incremental_sum&after=-60&label=incremental+sum&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=incremental_sum&after=-60&label=incremental+sum&value_color=orange)
## References
-- none
+- none
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fincremental_sum%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fincremental_sum%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/max/Makefile.in b/web/api/queries/max/Makefile.in
new file mode 100644
index 00000000..0b7affbb
--- /dev/null
+++ b/web/api/queries/max/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/queries/max
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/max/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/queries/max/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/queries/max/README.md b/web/api/queries/max/README.md
index 34dff021..b0f6f648 100644
--- a/web/api/queries/max/README.md
+++ b/web/api/queries/max/README.md
@@ -22,12 +22,12 @@ It can also be used in APIs and badges as `&group=max` in the URL.
Examining last 1 minute `successful` web server responses:
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max&value_color=orange)
## References
-- [https://en.wikipedia.org/wiki/Sample_maximum_and_minimum](https://en.wikipedia.org/wiki/Sample_maximum_and_minimum).
+- <https://en.wikipedia.org/wiki/Sample_maximum_and_minimum>.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fmax%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fmax%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/median/Makefile.in b/web/api/queries/median/Makefile.in
new file mode 100644
index 00000000..952ffe4c
--- /dev/null
+++ b/web/api/queries/median/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/queries/median
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/median/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/queries/median/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/queries/median/README.md b/web/api/queries/median/README.md
index 72f51a24..a6980124 100644
--- a/web/api/queries/median/README.md
+++ b/web/api/queries/median/README.md
@@ -27,13 +27,13 @@ It can also be used in APIs and badges as `&group=median` in the URL.
Examining last 1 minute `successful` web server responses:
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=median&after=-60&label=median&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=median&after=-60&label=median&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
## References
-- [https://en.wikipedia.org/wiki/Median](https://en.wikipedia.org/wiki/Median).
+- <https://en.wikipedia.org/wiki/Median>.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fmedian%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fmedian%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/min/Makefile.in b/web/api/queries/min/Makefile.in
new file mode 100644
index 00000000..1517de19
--- /dev/null
+++ b/web/api/queries/min/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/queries/min
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/min/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/queries/min/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/queries/min/README.md b/web/api/queries/min/README.md
index 28f6dbf5..6276faec 100644
--- a/web/api/queries/min/README.md
+++ b/web/api/queries/min/README.md
@@ -22,12 +22,12 @@ It can also be used in APIs and badges as `&group=min` in the URL.
Examining last 1 minute `successful` web server responses:
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
## References
-- [https://en.wikipedia.org/wiki/Sample_maximum_and_minimum](https://en.wikipedia.org/wiki/Sample_maximum_and_minimum).
+- <https://en.wikipedia.org/wiki/Sample_maximum_and_minimum>.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fmin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fmin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/query.c b/web/api/queries/query.c
index e90dc8af..af3bcfe3 100644
--- a/web/api/queries/query.c
+++ b/web/api/queries/query.c
@@ -376,7 +376,7 @@ static inline void rrdr_done(RRDR *r, long rrdr_line) {
// ----------------------------------------------------------------------------
// fill RRDR for a single dimension
-static inline void do_dimension(
+static inline void do_dimension_variablestep(
RRDR *r
, long points_wanted
, RRDDIM *rd
@@ -384,16 +384,16 @@ static inline void do_dimension(
, time_t after_wanted
, time_t before_wanted
){
- RRDSET *st = r->st;
+// RRDSET *st = r->st;
time_t
now = after_wanted,
- dt = st->update_every,
+ dt = r->update_every,
max_date = 0,
min_date = 0;
long
- group_size = r->group,
+// group_size = r->group,
points_added = 0,
values_in_group = 0,
values_in_group_non_zero = 0,
@@ -403,102 +403,240 @@ static inline void do_dimension(
group_value_flags = RRDR_VALUE_NOTHING;
struct rrddim_query_handle handle;
- uint8_t initialized_query;
calculated_number min = r->min, max = r->max;
size_t db_points_read = 0;
+ time_t db_now = now;
+ storage_number n_curr, n_prev = SN_EMPTY_SLOT;
+ calculated_number value;
+
+ for(rd->state->query_ops.init(rd, &handle, now, before_wanted) ; points_added < points_wanted ; now += dt) {
+ // make sure we return data in the proper time range
+ if (unlikely(now > before_wanted)) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ r->internal.log = "stopped, because attempted to access the db after 'wanted before'";
+#endif
+ break;
+ }
+ if (unlikely(now < after_wanted)) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ r->internal.log = "skipped, because attempted to access the db before 'wanted after'";
+#endif
+ continue;
+ }
+
+ while (now >= db_now && (!rd->state->query_ops.is_finished(&handle) ||
+ does_storage_number_exist(n_prev))) {
+ value = NAN;
+ if (does_storage_number_exist(n_prev)) {
+ // use the previously read database value
+ n_curr = n_prev;
+ } else {
+ // read the value from the database
+ n_curr = rd->state->query_ops.next_metric(&handle, &db_now);
+ }
+ n_prev = SN_EMPTY_SLOT;
+ // db_now has a different value than above
+ if (likely(now >= db_now)) {
+ if (likely(does_storage_number_exist(n_curr))) {
+ value = unpack_storage_number(n_curr);
+ if (likely(value != 0.0))
+ values_in_group_non_zero++;
+
+ if (unlikely(did_storage_number_reset(n_curr)))
+ group_value_flags |= RRDR_VALUE_RESET;
+ }
+ } else {
+ // We must postpone processing the value and fill the result with gaps instead
+ if (likely(does_storage_number_exist(n_curr))) {
+ n_prev = n_curr;
+ }
+ }
+ // add this value to grouping
+ r->internal.grouping_add(r, value);
+ values_in_group++;
+ db_points_read++;
+ }
+
+ if (0 == values_in_group) {
+ // add NAN to grouping
+ r->internal.grouping_add(r, NAN);
+ }
+
+ rrdr_line = rrdr_line_init(r, now, rrdr_line);
+
+ if(unlikely(!min_date)) min_date = now;
+ max_date = now;
+
+ // find the place to store our values
+ RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_line * r->d + dim_id_in_rrdr];
+
+ // update the dimension options
+ if(likely(values_in_group_non_zero))
+ r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
+
+ // store the specific point options
+ *rrdr_value_options_ptr = group_value_flags;
+
+ // store the value
+ value = r->internal.grouping_flush(r, rrdr_value_options_ptr);
+ r->v[rrdr_line * r->d + dim_id_in_rrdr] = value;
+
+ if(likely(points_added || dim_id_in_rrdr)) {
+ // find the min/max across all dimensions
+
+ if(unlikely(value < min)) min = value;
+ if(unlikely(value > max)) max = value;
+
+ }
+ else {
+ // runs only when dim_id_in_rrdr == 0 && points_added == 0
+ // so, on the first point added for the query.
+ min = max = value;
+ }
+
+ points_added++;
+ values_in_group = 0;
+ group_value_flags = RRDR_VALUE_NOTHING;
+ values_in_group_non_zero = 0;
+ }
+ rd->state->query_ops.finalize(&handle);
+
+ r->internal.db_points_read += db_points_read;
+ r->internal.result_points_generated += points_added;
+
+ r->min = min;
+ r->max = max;
+ r->before = max_date;
+ r->after = min_date - (r->group - 1) * dt;
+ rrdr_done(r, rrdr_line);
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(r->rows != points_added))
+ error("INTERNAL ERROR: %s.%s added %zu rows, but RRDR says I added %zu.", r->st->name, rd->name, (size_t)points_added, (size_t)r->rows);
+ #endif
+}
- for(initialized_query = 0 ; points_added < points_wanted ; now += dt) {
+static inline void do_dimension_fixedstep(
+ RRDR *r
+ , long points_wanted
+ , RRDDIM *rd
+ , long dim_id_in_rrdr
+ , time_t after_wanted
+ , time_t before_wanted
+){
+ RRDSET *st = r->st;
+
+ time_t
+ now = after_wanted,
+ dt = r->update_every / r->group, /* usually is st->update_every */
+ max_date = 0,
+ min_date = 0;
+ long
+ group_size = r->group,
+ points_added = 0,
+ values_in_group = 0,
+ values_in_group_non_zero = 0,
+ rrdr_line = -1;
+
+ RRDR_VALUE_FLAGS
+ group_value_flags = RRDR_VALUE_NOTHING;
+
+ struct rrddim_query_handle handle;
+
+ calculated_number min = r->min, max = r->max;
+ size_t db_points_read = 0;
+ time_t db_now = now;
+
+ for(rd->state->query_ops.init(rd, &handle, now, before_wanted) ; points_added < points_wanted ; now += dt) {
// make sure we return data in the proper time range
if(unlikely(now > before_wanted)) {
- #ifdef NETDATA_INTERNAL_CHECKS
+#ifdef NETDATA_INTERNAL_CHECKS
r->internal.log = "stopped, because attempted to access the db after 'wanted before'";
- #endif
+#endif
break;
}
if(unlikely(now < after_wanted)) {
- #ifdef NETDATA_INTERNAL_CHECKS
+#ifdef NETDATA_INTERNAL_CHECKS
r->internal.log = "skipped, because attempted to access the db before 'wanted after'";
- #endif
+#endif
continue;
}
-
- if (unlikely(!initialized_query)) {
- rd->state->query_ops.init(rd, &handle, now, before_wanted);
- initialized_query = 1;
- }
// read the value from the database
//storage_number n = rd->values[slot];
#ifdef NETDATA_INTERNAL_CHECKS
- if (rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
-#ifdef ENABLE_DBENGINE
- if (now != handle.rrdeng.now)
- error("INTERNAL CHECK: Unaligned query for %s, database time: %ld, expected time: %ld", rd->id, (long)handle.rrdeng.now, (long)now);
-#endif
- } else if (rrdset_time2slot(st, now) != (long unsigned)handle.slotted.slot) {
+ if ((rd->rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) &&
+ (rrdset_time2slot(st, now) != (long unsigned)handle.slotted.slot)) {
error("INTERNAL CHECK: Unaligned query for %s, database slot: %lu, expected slot: %lu", rd->id, (long unsigned)handle.slotted.slot, rrdset_time2slot(st, now));
}
#endif
- storage_number n = rd->state->query_ops.next_metric(&handle);
- calculated_number value = NAN;
- if(likely(does_storage_number_exist(n))) {
+ db_now = now; // this is needed to set db_now in case the next_metric implementation does not set it
+ storage_number n = rd->state->query_ops.next_metric(&handle, &db_now);
+ for ( ; now <= db_now ; now += dt) {
+ calculated_number value = NAN;
+ if(likely(now >= db_now && does_storage_number_exist(n))) {
+#if defined(NETDATA_INTERNAL_CHECKS) && defined(ENABLE_DBENGINE)
+ if ((rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) && (now != handle.rrdeng.now)) {
+ error("INTERNAL CHECK: Unaligned query for %s, database time: %ld, expected time: %ld", rd->id, (long)handle.rrdeng.now, (long)now);
+ }
+#endif
+ value = unpack_storage_number(n);
+ if(likely(value != 0.0))
+ values_in_group_non_zero++;
- value = unpack_storage_number(n);
- if(likely(value != 0.0))
- values_in_group_non_zero++;
+ if(unlikely(did_storage_number_reset(n)))
+ group_value_flags |= RRDR_VALUE_RESET;
- if(unlikely(did_storage_number_reset(n)))
- group_value_flags |= RRDR_VALUE_RESET;
+ }
- }
+ // add this value for grouping
+ r->internal.grouping_add(r, value);
+ values_in_group++;
+ db_points_read++;
- // add this value for grouping
- r->internal.grouping_add(r, value);
- values_in_group++;
- db_points_read++;
+ if(unlikely(values_in_group == group_size)) {
+ rrdr_line = rrdr_line_init(r, now, rrdr_line);
- if(unlikely(values_in_group == group_size)) {
- rrdr_line = rrdr_line_init(r, now, rrdr_line);
+ if(unlikely(!min_date)) min_date = now;
+ max_date = now;
- if(unlikely(!min_date)) min_date = now;
- max_date = now;
+ // find the place to store our values
+ RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_line * r->d + dim_id_in_rrdr];
- // find the place to store our values
- RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_line * r->d + dim_id_in_rrdr];
+ // update the dimension options
+ if(likely(values_in_group_non_zero))
+ r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
- // update the dimension options
- if(likely(values_in_group_non_zero))
- r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
+ // store the specific point options
+ *rrdr_value_options_ptr = group_value_flags;
- // store the specific point options
- *rrdr_value_options_ptr = group_value_flags;
+ // store the value
+ calculated_number value = r->internal.grouping_flush(r, rrdr_value_options_ptr);
+ r->v[rrdr_line * r->d + dim_id_in_rrdr] = value;
- // store the value
- calculated_number value = r->internal.grouping_flush(r, rrdr_value_options_ptr);
- r->v[rrdr_line * r->d + dim_id_in_rrdr] = value;
+ if(likely(points_added || dim_id_in_rrdr)) {
+ // find the min/max across all dimensions
- if(likely(points_added || dim_id_in_rrdr)) {
- // find the min/max across all dimensions
+ if(unlikely(value < min)) min = value;
+ if(unlikely(value > max)) max = value;
- if(unlikely(value < min)) min = value;
- if(unlikely(value > max)) max = value;
+ }
+ else {
+ // runs only when dim_id_in_rrdr == 0 && points_added == 0
+ // so, on the first point added for the query.
+ min = max = value;
+ }
+ points_added++;
+ values_in_group = 0;
+ group_value_flags = RRDR_VALUE_NOTHING;
+ values_in_group_non_zero = 0;
}
- else {
- // runs only when dim_id_in_rrdr == 0 && points_added == 0
- // so, on the first point added for the query.
- min = max = value;
- }
-
- points_added++;
- values_in_group = 0;
- group_value_flags = RRDR_VALUE_NOTHING;
- values_in_group_non_zero = 0;
}
+ now = db_now;
}
- if (likely(initialized_query))
- rd->state->query_ops.finalize(&handle);
+ rd->state->query_ops.finalize(&handle);
r->internal.db_points_read += db_points_read;
r->internal.result_points_generated += points_added;
@@ -506,13 +644,13 @@ static inline void do_dimension(
r->min = min;
r->max = max;
r->before = max_date;
- r->after = min_date - (r->group - 1) * r->st->update_every;
+ r->after = min_date - (r->group - 1) * dt;
rrdr_done(r, rrdr_line);
- #ifdef NETDATA_INTERNAL_CHECKS
+#ifdef NETDATA_INTERNAL_CHECKS
if(unlikely(r->rows != points_added))
error("INTERNAL ERROR: %s.%s added %zu rows, but RRDR says I added %zu.", r->st->name, rd->name, (size_t)points_added, (size_t)r->rows);
- #endif
+#endif
}
// ----------------------------------------------------------------------------
@@ -589,22 +727,18 @@ static void rrd2rrdr_log_request_response_metdata(RRDR *r
}
#endif // NETDATA_INTERNAL_CHECKS
-RRDR *rrd2rrdr(
- RRDSET *st
- , long points_requested
- , long long after_requested
- , long long before_requested
- , RRDR_GROUPING group_method
- , long resampling_time_requested
- , RRDR_OPTIONS options
- , const char *dimensions
+// Returns 1 if an absolute period was requested or 0 if it was a relative period
+static int rrdr_convert_before_after_to_absolute(
+ long long *after_requestedp
+ , long long *before_requestedp
+ , time_t first_entry_t
+ , time_t last_entry_t
) {
- int aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
-
int absolute_period_requested = -1;
+ long long after_requested, before_requested;
- time_t first_entry_t = rrdset_first_entry_t(st);
- time_t last_entry_t = rrdset_last_entry_t(st);
+ before_requested = *before_requestedp;
+ after_requested = *after_requestedp;
if(before_requested == 0 && after_requested == 0) {
// dump the all the data
@@ -614,25 +748,15 @@ RRDR *rrd2rrdr(
}
// allow relative for before (smaller than API_RELATIVE_TIME_MAX)
- if(((before_requested < 0)?-before_requested:before_requested) <= API_RELATIVE_TIME_MAX) {
- if(abs(before_requested) % st->update_every) {
- // make sure it is multiple of st->update_every
- if(before_requested < 0) before_requested = before_requested - st->update_every - before_requested % st->update_every;
- else before_requested = before_requested + st->update_every - before_requested % st->update_every;
- }
+ if(abs(before_requested) <= API_RELATIVE_TIME_MAX) {
if(before_requested > 0) before_requested = first_entry_t + before_requested;
- else before_requested = last_entry_t + before_requested;
+ else before_requested = last_entry_t + before_requested; //last_entry_t is not really now_t
+ //TODO: fix before_requested to be relative to now_t
absolute_period_requested = 0;
}
// allow relative for after (smaller than API_RELATIVE_TIME_MAX)
- if(((after_requested < 0)?-after_requested:after_requested) <= API_RELATIVE_TIME_MAX) {
- if(after_requested == 0) after_requested = -st->update_every;
- if(abs(after_requested) % st->update_every) {
- // make sure it is multiple of st->update_every
- if(after_requested < 0) after_requested = after_requested - st->update_every - after_requested % st->update_every;
- else after_requested = after_requested + st->update_every - after_requested % st->update_every;
- }
+ if(abs(after_requested) <= API_RELATIVE_TIME_MAX) {
after_requested = before_requested + after_requested;
absolute_period_requested = 0;
}
@@ -654,9 +778,53 @@ RRDR *rrd2rrdr(
after_requested = tmp;
}
+ *before_requestedp = before_requested;
+ *after_requestedp = after_requested;
+
+ return absolute_period_requested;
+}
+
+static RRDR *rrd2rrdr_fixedstep(
+ RRDSET *st
+ , long points_requested
+ , long long after_requested
+ , long long before_requested
+ , RRDR_GROUPING group_method
+ , long resampling_time_requested
+ , RRDR_OPTIONS options
+ , const char *dimensions
+ , int update_every
+ , time_t first_entry_t
+ , time_t last_entry_t
+ , int absolute_period_requested
+) {
+ int aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
+
+ if(!absolute_period_requested) {
+ if(before_requested % update_every) {
+ // make sure it is multiple of update_every
+ if(before_requested > 0)
+ before_requested = before_requested - update_every + before_requested % update_every;
+ #ifdef NETDATA_INTERNAL_CHECKS
+ else
+ error("INTERNAL ERROR: rrd2rrdr() on %s, negative or zero before_requested", st->name);
+ #endif
+ }
+ if(after_requested % update_every) {
+ // make sure it is multiple of update_every
+ if(after_requested < 0)
+ after_requested = after_requested - update_every + after_requested % update_every;
+ #ifdef NETDATA_INTERNAL_CHECKS
+ else
+ error("INTERNAL ERROR: rrd2rrdr() on %s, negative or zero after_requested", st->name);
+ #endif
+ }
+ if(after_requested == before_requested) after_requested -= update_every;
+ }
+
// the duration of the chart
time_t duration = before_requested - after_requested;
- long available_points = duration / st->update_every;
+ long available_points = duration / update_every;
if(duration <= 0 || available_points <= 0)
return rrdr_create(st, 1);
@@ -674,7 +842,7 @@ RRDR *rrd2rrdr(
// resampling_time_requested enforces a certain grouping multiple
calculated_number resampling_divisor = 1.0;
long resampling_group = 1;
- if(unlikely(resampling_time_requested > st->update_every)) {
+ if(unlikely(resampling_time_requested > update_every)) {
if (unlikely(resampling_time_requested > duration)) {
// group_time is above the available duration
@@ -684,7 +852,7 @@ RRDR *rrd2rrdr(
after_requested = before_requested - resampling_time_requested;
duration = before_requested - after_requested;
- available_points = duration / st->update_every;
+ available_points = duration / update_every;
group = available_points / points_requested;
}
@@ -696,16 +864,16 @@ RRDR *rrd2rrdr(
if(delta > resampling_time_requested / 10) {
after_requested -= resampling_time_requested - delta;
duration = before_requested - after_requested;
- available_points = duration / st->update_every;
+ available_points = duration / update_every;
group = available_points / points_requested;
}
}
// the points we should group to satisfy gtime
- resampling_group = resampling_time_requested / st->update_every;
- if(unlikely(resampling_time_requested % st->update_every)) {
+ resampling_group = resampling_time_requested / update_every;
+ if(unlikely(resampling_time_requested % update_every)) {
#ifdef NETDATA_INTERNAL_CHECKS
- info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, resampling_time_requested, st->update_every);
+ info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, resampling_time_requested, update_every);
#endif
resampling_group++;
@@ -716,7 +884,7 @@ RRDR *rrd2rrdr(
if(unlikely(group % resampling_group)) group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
//resampling_divisor = group / resampling_group;
- resampling_divisor = (calculated_number)(group * st->update_every) / (calculated_number)resampling_time_requested;
+ resampling_divisor = (calculated_number)(group * update_every) / (calculated_number)resampling_time_requested;
}
// now that we have group,
@@ -724,8 +892,8 @@ RRDR *rrd2rrdr(
if(aligned) {
// alignement has been requested, so align the values
- before_requested -= (before_requested % group);
- after_requested -= (after_requested % group);
+ before_requested -= before_requested % (group * update_every);
+ after_requested -= after_requested % (group * update_every);
}
// we align the request on requested_before
@@ -735,28 +903,28 @@ RRDR *rrd2rrdr(
error("INTERNAL ERROR: rrd2rrdr() on %s, before_wanted is after db max", st->name);
#endif
- before_wanted = last_entry_t - (last_entry_t % ( ((aligned)?group:1) * st->update_every ));
+ before_wanted = last_entry_t - (last_entry_t % ( ((aligned)?group:1) * update_every ));
}
//size_t before_slot = rrdset_time2slot(st, before_wanted);
// we need to estimate the number of points, for having
// an integer number of values per point
- long points_wanted = (before_wanted - after_requested) / (st->update_every * group);
+ long points_wanted = (before_wanted - after_requested) / (update_every * group);
- time_t after_wanted = before_wanted - (points_wanted * group * st->update_every) + st->update_every;
+ time_t after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
if(unlikely(after_wanted < first_entry_t)) {
// hm... we go to the past, calculate again points_wanted using all the db from before_wanted to the beginning
points_wanted = (before_wanted - first_entry_t) / group;
// recalculate after wanted with the new number of points
- after_wanted = before_wanted - (points_wanted * group * st->update_every) + st->update_every;
+ after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
if(unlikely(after_wanted < first_entry_t)) {
#ifdef NETDATA_INTERNAL_CHECKS
error("INTERNAL ERROR: rrd2rrdr() on %s, after_wanted is before db min", st->name);
#endif
- after_wanted = first_entry_t - (first_entry_t % ( ((aligned)?group:1) * st->update_every )) + ( ((aligned)?group:1) * st->update_every );
+ after_wanted = first_entry_t - (first_entry_t % ( ((aligned)?group:1) * update_every )) + ( ((aligned)?group:1) * update_every );
}
}
//size_t after_slot = rrdset_time2slot(st, after_wanted);
@@ -772,7 +940,7 @@ RRDR *rrd2rrdr(
}
// recalculate points_wanted using the final time-frame
- points_wanted = (before_wanted - after_wanted) / st->update_every / group + 1;
+ points_wanted = (before_wanted - after_wanted) / update_every / group + 1;
if(unlikely(points_wanted < 0)) {
#ifdef NETDATA_INTERNAL_CHECKS
error("INTERNAL ERROR: rrd2rrdr() on %s, points_wanted is %ld", st->name, points_wanted);
@@ -803,8 +971,8 @@ RRDR *rrd2rrdr(
error("INTERNAL CHECK: after_slot is invalid %zu, expected 0 to %ld", after_slot, st->entries - 1);
*/
- if(points_wanted > (before_wanted - after_wanted) / group / st->update_every + 1)
- error("INTERNAL CHECK: points_wanted %ld is more than points %ld", points_wanted, (before_wanted - after_wanted) / group / st->update_every + 1);
+ if(points_wanted > (before_wanted - after_wanted) / group / update_every + 1)
+ error("INTERNAL CHECK: points_wanted %ld is more than points %ld", points_wanted, (before_wanted - after_wanted) / group / update_every + 1);
if(group < resampling_group)
error("INTERNAL CHECK: group %ld is less than the desired group points %ld", group, resampling_group);
@@ -844,7 +1012,7 @@ RRDR *rrd2rrdr(
// initialize RRDR
r->group = group;
- r->update_every = (int)group * st->update_every;
+ r->update_every = (int)group * update_every;
r->before = before_wanted;
r->after = after_wanted;
r->internal.points_wanted = points_wanted;
@@ -913,7 +1081,7 @@ RRDR *rrd2rrdr(
// reset the grouping for the new dimension
r->internal.grouping_reset(r);
- do_dimension(
+ do_dimension_fixedstep(
r
, points_wanted
, rd
@@ -961,30 +1129,426 @@ RRDR *rrd2rrdr(
}
#ifdef NETDATA_INTERNAL_CHECKS
+ if (dimensions_used) {
+ if(r->internal.log)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ r->internal.log);
+
+ if(r->rows != points_wanted)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'points' is not wanted 'points'");
+
+ if(aligned && (r->before % group) != 0)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "'before' is not aligned but alignment is required");
+
+ // 'after' should not be aligned, since we start inside the first group
+ //if(aligned && (r->after % group) != 0)
+ // rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
+
+ if(r->before != before_requested)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "chart is not aligned to requested 'before'");
+
+ if(r->before != before_wanted)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'before' is not wanted 'before'");
+
+ // reported 'after' varies, depending on group
+ if(r->after != after_wanted)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'after' is not wanted 'after'");
+ }
+ #endif
+
+ // free all resources used by the grouping method
+ r->internal.grouping_free(r);
+
+ // when all the dimensions are zero, we should return all of them
+ if(unlikely(options & RRDR_OPTION_NONZERO && !dimensions_nonzero)) {
+ // all the dimensions are zero
+ // mark them as NONZERO to send them all
+ for(rd = st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
+ r->od[c] |= RRDR_DIMENSION_NONZERO;
+ }
+ }
+
+ rrdr_query_completed(r->internal.db_points_read, r->internal.result_points_generated);
+ return r;
+}
+
+#ifdef ENABLE_DBENGINE
+static RRDR *rrd2rrdr_variablestep(
+ RRDSET *st
+ , long points_requested
+ , long long after_requested
+ , long long before_requested
+ , RRDR_GROUPING group_method
+ , long resampling_time_requested
+ , RRDR_OPTIONS options
+ , const char *dimensions
+ , int update_every
+ , time_t first_entry_t
+ , time_t last_entry_t
+ , int absolute_period_requested
+ , struct rrdeng_region_info *region_info_array
+) {
+ int aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
+
+ if(!absolute_period_requested) {
+ if(before_requested % update_every) {
+ // make sure it is multiple of update_every
+ if(before_requested > 0)
+ before_requested = before_requested - before_requested % update_every;
+ #ifdef NETDATA_INTERNAL_CHECKS
+ else
+ error("INTERNAL ERROR: rrd2rrdr() on %s, negative or zero before_requested", st->name);
+ #endif
+ }
+ if(after_requested % update_every) {
+ // make sure it is multiple of update_every
+ if(after_requested < 0)
+ after_requested = after_requested - after_requested % update_every;
+ #ifdef NETDATA_INTERNAL_CHECKS
+ else
+ error("INTERNAL ERROR: rrd2rrdr() on %s, negative or zero after_requested", st->name);
+ #endif
+ }
+ if(after_requested == before_requested) after_requested -= update_every;
+ }
+
+ // the duration of the chart
+ time_t duration = before_requested - after_requested;
+ long available_points = duration / update_every;
+
+ if(duration <= 0 || available_points <= 0) {
+ freez(region_info_array);
+ return rrdr_create(st, 1);
+ }
+
+ // check the number of wanted points in the result
+ if(unlikely(points_requested < 0)) points_requested = -points_requested;
+ if(unlikely(points_requested > available_points)) points_requested = available_points;
+ if(unlikely(points_requested == 0)) points_requested = available_points;
+
+ // calculate the desired grouping of source data points
+ long group = available_points / points_requested;
+ if(unlikely(group <= 0)) group = 1;
+ if(unlikely(available_points % points_requested > points_requested / 2)) group++; // rounding to the closest integer
+
+ // resampling_time_requested enforces a certain grouping multiple
+ calculated_number resampling_divisor = 1.0;
+ long resampling_group = 1;
+ if(unlikely(resampling_time_requested > update_every)) {
+ if (unlikely(resampling_time_requested > duration)) {
+ // group_time is above the available duration
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ info("INTERNAL CHECK: %s: requested gtime %ld secs, is greater than the desired duration %ld secs", st->id, resampling_time_requested, duration);
+ #endif
+
+ after_requested = before_requested - resampling_time_requested;
+ duration = before_requested - after_requested;
+ available_points = duration / update_every;
+ group = available_points / points_requested;
+ }
+
+ // if the duration is not aligned to resampling time
+ // extend the duration to the past, to avoid a gap at the chart
+ // only when the missing duration is above 1/10th of a point
+ if(duration % resampling_time_requested) {
+ time_t delta = duration % resampling_time_requested;
+ if(delta > resampling_time_requested / 10) {
+ after_requested -= resampling_time_requested - delta;
+ duration = before_requested - after_requested;
+ available_points = duration / update_every;
+ group = available_points / points_requested;
+ }
+ }
+
+ // the points we should group to satisfy gtime
+ resampling_group = resampling_time_requested / update_every;
+ if(unlikely(resampling_time_requested % update_every)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, resampling_time_requested, update_every);
+ #endif
+
+ resampling_group++;
+ }
+
+ // adapt group according to resampling_group
+ if(unlikely(group < resampling_group)) group = resampling_group; // do not allow grouping below the desired one
+ if(unlikely(group % resampling_group)) group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
+
+ //resampling_divisor = group / resampling_group;
+ resampling_divisor = (calculated_number)(group * update_every) / (calculated_number)resampling_time_requested;
+ }
+
+ // now that we have group,
+ // align the requested timeframe to fit it.
+
+ if(aligned) {
+ // alignement has been requested, so align the values
+ before_requested -= before_requested % (group * update_every);
+ after_requested -= after_requested % (group * update_every);
+ }
+
+ // we align the request on requested_before
+ time_t before_wanted = before_requested;
+ if(likely(before_wanted > last_entry_t)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: rrd2rrdr() on %s, before_wanted is after db max", st->name);
+ #endif
+
+ before_wanted = last_entry_t - (last_entry_t % ( ((aligned)?group:1) * update_every ));
+ }
+ //size_t before_slot = rrdset_time2slot(st, before_wanted);
+
+ // we need to estimate the number of points, for having
+ // an integer number of values per point
+ long points_wanted = (before_wanted - after_requested) / (update_every * group);
+
+ time_t after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
+ if(unlikely(after_wanted < first_entry_t)) {
+ // hm... we go to the past, calculate again points_wanted using all the db from before_wanted to the beginning
+ points_wanted = (before_wanted - first_entry_t) / group;
+
+ // recalculate after wanted with the new number of points
+ after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
+
+ if(unlikely(after_wanted < first_entry_t)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: rrd2rrdr() on %s, after_wanted is before db min", st->name);
+ #endif
+
+ after_wanted = first_entry_t - (first_entry_t % ( ((aligned)?group:1) * update_every )) + ( ((aligned)?group:1) * update_every );
+ }
+ }
+ //size_t after_slot = rrdset_time2slot(st, after_wanted);
+
+ // check if they are reversed
+ if(unlikely(after_wanted > before_wanted)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: rrd2rrdr() on %s, reversed wanted after/before", st->name);
+ #endif
+ time_t tmp = before_wanted;
+ before_wanted = after_wanted;
+ after_wanted = tmp;
+ }
- if(r->internal.log)
- rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ r->internal.log);
+ // recalculate points_wanted using the final time-frame
+ points_wanted = (before_wanted - after_wanted) / update_every / group + 1;
+ if(unlikely(points_wanted < 0)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: rrd2rrdr() on %s, points_wanted is %ld", st->name, points_wanted);
+ #endif
+ points_wanted = 0;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ duration = before_wanted - after_wanted;
- if(r->rows != points_wanted)
- rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'points' is not wanted 'points'");
+ if(after_wanted < first_entry_t)
+ error("INTERNAL CHECK: after_wanted %u is too small, minimum %u", (uint32_t)after_wanted, (uint32_t)first_entry_t);
- if(aligned && (r->before % group) != 0)
- rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "'before' is not aligned but alignment is required");
+ if(after_wanted > last_entry_t)
+ error("INTERNAL CHECK: after_wanted %u is too big, maximum %u", (uint32_t)after_wanted, (uint32_t)last_entry_t);
- // 'after' should not be aligned, since we start inside the first group
- //if(aligned && (r->after % group) != 0)
- // rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
+ if(before_wanted < first_entry_t)
+ error("INTERNAL CHECK: before_wanted %u is too small, minimum %u", (uint32_t)before_wanted, (uint32_t)first_entry_t);
- if(r->before != before_requested)
- rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "chart is not aligned to requested 'before'");
+ if(before_wanted > last_entry_t)
+ error("INTERNAL CHECK: before_wanted %u is too big, maximum %u", (uint32_t)before_wanted, (uint32_t)last_entry_t);
- if(r->before != before_wanted)
- rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'before' is not wanted 'before'");
+/*
+ if(before_slot >= (size_t)st->entries)
+ error("INTERNAL CHECK: before_slot is invalid %zu, expected 0 to %ld", before_slot, st->entries - 1);
- // reported 'after' varies, depending on group
- if(r->after != after_wanted)
- rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'after' is not wanted 'after'");
+ if(after_slot >= (size_t)st->entries)
+ error("INTERNAL CHECK: after_slot is invalid %zu, expected 0 to %ld", after_slot, st->entries - 1);
+*/
+ if(points_wanted > (before_wanted - after_wanted) / group / update_every + 1)
+ error("INTERNAL CHECK: points_wanted %ld is more than points %ld", points_wanted, (before_wanted - after_wanted) / group / update_every + 1);
+
+ if(group < resampling_group)
+ error("INTERNAL CHECK: group %ld is less than the desired group points %ld", group, resampling_group);
+
+ if(group > resampling_group && group % resampling_group)
+ error("INTERNAL CHECK: group %ld is not a multiple of the desired group points %ld", group, resampling_group);
+#endif
+
+ // -------------------------------------------------------------------------
+ // initialize our result set
+ // this also locks the chart for us
+
+ RRDR *r = rrdr_create(st, points_wanted);
+ if(unlikely(!r)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL CHECK: Cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted);
+ #endif
+ freez(region_info_array);
+ return NULL;
+ }
+
+ if(unlikely(!r->d || !points_wanted)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL CHECK: Returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted);
+ #endif
+ freez(region_info_array);
+ return r;
+ }
+
+ r->result_options |= RRDR_RESULT_OPTION_VARIABLE_STEP;
+ if(unlikely(absolute_period_requested == 1))
+ r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
+ else
+ r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
+
+ // find how many dimensions we have
+ long dimensions_count = r->d;
+
+ // -------------------------------------------------------------------------
+ // initialize RRDR
+
+ r->group = group;
+ r->update_every = (int)group * update_every;
+ r->before = before_wanted;
+ r->after = after_wanted;
+ r->internal.points_wanted = points_wanted;
+ r->internal.resampling_group = resampling_group;
+ r->internal.resampling_divisor = resampling_divisor;
+
+
+ // -------------------------------------------------------------------------
+ // assign the processor functions
+
+ {
+ int i, found = 0;
+ for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
+ if(api_v1_data_groups[i].value == group_method) {
+ r->internal.grouping_create= api_v1_data_groups[i].create;
+ r->internal.grouping_reset = api_v1_data_groups[i].reset;
+ r->internal.grouping_free = api_v1_data_groups[i].free;
+ r->internal.grouping_add = api_v1_data_groups[i].add;
+ r->internal.grouping_flush = api_v1_data_groups[i].flush;
+ found = 1;
+ }
+ }
+ if(!found) {
+ errno = 0;
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: grouping method %u not found for chart '%s'. Using 'average'", (unsigned int)group_method, r->st->name);
+ #endif
+ r->internal.grouping_create= grouping_create_average;
+ r->internal.grouping_reset = grouping_reset_average;
+ r->internal.grouping_free = grouping_free_average;
+ r->internal.grouping_add = grouping_add_average;
+ r->internal.grouping_flush = grouping_flush_average;
+ }
+ }
+
+ // allocate any memory required by the grouping method
+ r->internal.grouping_data = r->internal.grouping_create(r);
+
+
+ // -------------------------------------------------------------------------
+ // disable the not-wanted dimensions
+
+ rrdset_check_rdlock(st);
+
+ if(dimensions)
+ rrdr_disable_not_selected_dimensions(r, options, dimensions);
+
+
+ // -------------------------------------------------------------------------
+ // do the work for each dimension
+
+ time_t max_after = 0, min_before = 0;
+ long max_rows = 0;
+
+ RRDDIM *rd;
+ long c, dimensions_used = 0, dimensions_nonzero = 0;
+ for(rd = st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
+
+ // if we need a percentage, we need to calculate all dimensions
+ if(unlikely(!(options & RRDR_OPTION_PERCENTAGE) && (r->od[c] & RRDR_DIMENSION_HIDDEN))) {
+ if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED;
+ continue;
+ }
+ r->od[c] |= RRDR_DIMENSION_SELECTED;
+
+ // reset the grouping for the new dimension
+ r->internal.grouping_reset(r);
+
+ do_dimension_variablestep(
+ r
+ , points_wanted
+ , rd
+ , c
+ , after_wanted
+ , before_wanted
+ );
+
+ if(r->od[c] & RRDR_DIMENSION_NONZERO)
+ dimensions_nonzero++;
+
+ // verify all dimensions are aligned
+ if(unlikely(!dimensions_used)) {
+ min_before = r->before;
+ max_after = r->after;
+ max_rows = r->rows;
+ }
+ else {
+ if(r->after != max_after) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)max_after, rd->name, (size_t)r->after);
+ #endif
+ r->after = (r->after > max_after) ? r->after : max_after;
+ }
+
+ if(r->before != min_before) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)min_before, rd->name, (size_t)r->before);
+ #endif
+ r->before = (r->before < min_before) ? r->before : min_before;
+ }
+
+ if(r->rows != max_rows) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("INTERNAL ERROR: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)max_rows, rd->name, (size_t)r->rows);
+ #endif
+ r->rows = (r->rows > max_rows) ? r->rows : max_rows;
+ }
+ }
+
+ dimensions_used++;
+ }
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+
+ if (dimensions_used) {
+ if(r->internal.log)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ r->internal.log);
+
+ if(r->rows != points_wanted)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'points' is not wanted 'points'");
+
+ if(aligned && (r->before % group) != 0)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "'before' is not aligned but alignment is required");
+
+ // 'after' should not be aligned, since we start inside the first group
+ //if(aligned && (r->after % group) != 0)
+ // rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
+
+ if(r->before != before_requested)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "chart is not aligned to requested 'before'");
+
+ if(r->before != before_wanted)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'before' is not wanted 'before'");
+
+ // reported 'after' varies, depending on group
+ if(r->after != after_wanted)
+ rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'after' is not wanted 'after'");
+ }
#endif
// free all resources used by the grouping method
@@ -1001,5 +1565,57 @@ RRDR *rrd2rrdr(
}
rrdr_query_completed(r->internal.db_points_read, r->internal.result_points_generated);
+ freez(region_info_array);
return r;
}
+#endif //#ifdef ENABLE_DBENGINE
+
+RRDR *rrd2rrdr(
+ RRDSET *st
+ , long points_requested
+ , long long after_requested
+ , long long before_requested
+ , RRDR_GROUPING group_method
+ , long resampling_time_requested
+ , RRDR_OPTIONS options
+ , const char *dimensions
+) {
+ int rrd_update_every;
+ int absolute_period_requested;
+ time_t first_entry_t = rrdset_first_entry_t(st);
+ time_t last_entry_t = rrdset_last_entry_t(st);
+
+ absolute_period_requested = rrdr_convert_before_after_to_absolute(&after_requested, &before_requested,
+ first_entry_t, last_entry_t);
+
+#ifdef ENABLE_DBENGINE
+ if ((st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)) {
+ struct rrdeng_region_info *region_info_array;
+ unsigned regions, max_interval;
+
+ /* This call takes the chart read-lock */
+ regions = rrdeng_variable_step_boundaries(st, after_requested, before_requested,
+ &region_info_array, &max_interval);
+ if (1 == regions) {
+ if (region_info_array)
+ rrd_update_every = region_info_array[0].update_every;
+ else
+ rrd_update_every = st->update_every;
+ if (region_info_array)
+ freez(region_info_array);
+ return rrd2rrdr_fixedstep(st, points_requested, after_requested, before_requested, group_method,
+ resampling_time_requested, options, dimensions, rrd_update_every,
+ first_entry_t, last_entry_t, absolute_period_requested);
+ } else {
+ rrd_update_every = (uint16_t)max_interval;
+ return rrd2rrdr_variablestep(st, points_requested, after_requested, before_requested, group_method,
+ resampling_time_requested, options, dimensions, rrd_update_every,
+ first_entry_t, last_entry_t, absolute_period_requested, region_info_array);
+ }
+ }
+#endif
+ rrd_update_every = st->update_every;
+ return rrd2rrdr_fixedstep(st, points_requested, after_requested, before_requested, group_method,
+ resampling_time_requested, options, dimensions,
+ rrd_update_every, first_entry_t, last_entry_t, absolute_period_requested);
+} \ No newline at end of file
diff --git a/web/api/queries/rrdr.h b/web/api/queries/rrdr.h
index 4f635038..6a031adf 100644
--- a/web/api/queries/rrdr.h
+++ b/web/api/queries/rrdr.h
@@ -6,7 +6,7 @@
#include "libnetdata/libnetdata.h"
typedef enum rrdr_options {
- RRDR_OPTION_NONZERO = 0x00000001, // don't output dimensions will just zero values
+ RRDR_OPTION_NONZERO = 0x00000001, // don't output dimensions with just zero values
RRDR_OPTION_REVERSED = 0x00000002, // output the rows in reverse order (oldest to newest)
RRDR_OPTION_ABSOLUTE = 0x00000004, // values positive, for DATASOURCE_SSV before summing
RRDR_OPTION_MIN2MAX = 0x00000008, // when adding dimensions, use max - min, instead of sum
@@ -18,10 +18,11 @@ typedef enum rrdr_options {
RRDR_OPTION_JSON_WRAP = 0x00000200, // wrap the response in a JSON header with info about the result
RRDR_OPTION_LABEL_QUOTES = 0x00000400, // in CSV output, wrap header labels in double quotes
RRDR_OPTION_PERCENTAGE = 0x00000800, // give values as percentage of total
- RRDR_OPTION_NOT_ALIGNED = 0x00001000, // do not align charts for persistant timeframes
+ RRDR_OPTION_NOT_ALIGNED = 0x00001000, // do not align charts for persistent timeframes
RRDR_OPTION_DISPLAY_ABS = 0x00002000, // for badges, display the absolute value, but calculate colors with sign
RRDR_OPTION_MATCH_IDS = 0x00004000, // when filtering dimensions, match only IDs
RRDR_OPTION_MATCH_NAMES = 0x00008000, // when filtering dimensions, match only names
+ RRDR_OPTION_CUSTOM_VARS = 0x00010000, // when wraping response in a JSON, return custom variables in response
} RRDR_OPTIONS;
typedef enum rrdr_value_flag {
@@ -39,8 +40,11 @@ typedef enum rrdr_dimension_flag {
// RRDR result options
typedef enum rrdr_result_flags {
- RRDR_RESULT_OPTION_ABSOLUTE = 0x00000001, // the query uses absolute time-frames (can be cached by browsers and proxies)
- RRDR_RESULT_OPTION_RELATIVE = 0x00000002, // the query uses relative time-frames (should not to be cached by browsers and proxies)
+ RRDR_RESULT_OPTION_ABSOLUTE = 0x00000001, // the query uses absolute time-frames
+ // (can be cached by browsers and proxies)
+ RRDR_RESULT_OPTION_RELATIVE = 0x00000002, // the query uses relative time-frames
+ // (should not to be cached by browsers and proxies)
+ RRDR_RESULT_OPTION_VARIABLE_STEP = 0x00000004, // the query uses variable-step time-frames
} RRDR_RESULT_FLAGS;
typedef struct rrdresult {
diff --git a/web/api/queries/ses/Makefile.in b/web/api/queries/ses/Makefile.in
new file mode 100644
index 00000000..7d425cdc
--- /dev/null
+++ b/web/api/queries/ses/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/queries/ses
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/ses/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/queries/ses/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/queries/ses/README.md b/web/api/queries/ses/README.md
index 16b153a4..5f21c849 100644
--- a/web/api/queries/ses/README.md
+++ b/web/api/queries/ses/README.md
@@ -43,14 +43,14 @@ It can also be used in APIs and badges as `&group=ses` in the URL.
Examining last 1 minute `successful` web server responses:
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=single+exponential+smoothing&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=single+exponential+smoothing&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
## References
-- [https://en.wikipedia.org/wiki/Moving_average#exponential-moving-average](https://en.wikipedia.org/wiki/Moving_average#exponential-moving-average)
-- [https://en.wikipedia.org/wiki/Exponential_smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing).
+- <https://en.wikipedia.org/wiki/Moving_average#exponential-moving-average>
+- <https://en.wikipedia.org/wiki/Exponential_smoothing>.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fses%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fses%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/stddev/Makefile.in b/web/api/queries/stddev/Makefile.in
new file mode 100644
index 00000000..d62b90fd
--- /dev/null
+++ b/web/api/queries/stddev/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/queries/stddev
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/stddev/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/queries/stddev/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/queries/stddev/README.md b/web/api/queries/stddev/README.md
index 2ef5a2e3..00604154 100644
--- a/web/api/queries/stddev/README.md
+++ b/web/api/queries/stddev/README.md
@@ -1,4 +1,3 @@
-
# standard deviation (`stddev`)
The standard deviation is a measure that is used to quantify the amount of variation or dispersion
@@ -28,14 +27,14 @@ It can also be used in APIs and badges as `&group=stddev` in the URL.
Examining last 1 minute `successful` web server responses:
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=stddev&after=-60&label=standard+deviation&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=max&after=-60&label=max)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=stddev&after=-60&label=standard+deviation&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=max&after=-60&label=max)
## References
-Check [https://en.wikipedia.org/wiki/Standard_deviation](https://en.wikipedia.org/wiki/Standard_deviation).
+Check <https://en.wikipedia.org/wiki/Standard_deviation>.
---
@@ -77,13 +76,13 @@ It can also be used in APIs and badges as `&group=cv` in the URL.
Examining last 1 minute `successful` web server responses:
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=cv&after=-60&label=coefficient+of+variation&value_color=orange&units=pcent)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=max&after=-60&label=max)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=cv&after=-60&label=coefficient+of+variation&value_color=orange&units=pcent)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=max&after=-60&label=max)
## References
-Check [https://en.wikipedia.org/wiki/Coefficient_of_variation](https://en.wikipedia.org/wiki/Coefficient_of_variation).
+Check <https://en.wikipedia.org/wiki/Coefficient_of_variation>.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fstddev%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fstddev%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/queries/sum/Makefile.in b/web/api/queries/sum/Makefile.in
new file mode 100644
index 00000000..afe260a5
--- /dev/null
+++ b/web/api/queries/sum/Makefile.in
@@ -0,0 +1,514 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/api/queries/sum
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/sum/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/api/queries/sum/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/api/queries/sum/README.md b/web/api/queries/sum/README.md
index a74ffff6..a7028ca6 100644
--- a/web/api/queries/sum/README.md
+++ b/web/api/queries/sum/README.md
@@ -24,13 +24,13 @@ It can also be used in APIs and badges as `&group=sum` in the URL.
Examining last 1 minute `successful` web server responses:
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=sum&after=-60&label=1m+sum&value_color=orange&units=requests)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=sum&after=-60&label=1m+sum&value_color=orange&units=requests)
## References
-- [https://en.wikipedia.org/wiki/Summation](https://en.wikipedia.org/wiki/Summation).
+- <https://en.wikipedia.org/wiki/Summation>.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fsum%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fsum%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/api/web_api_v1.c b/web/api/web_api_v1.c
index 2273224b..f34f0200 100644
--- a/web/api/web_api_v1.c
+++ b/web/api/web_api_v1.c
@@ -32,6 +32,7 @@ static struct {
, {"match-ids" , 0 , RRDR_OPTION_MATCH_IDS}
, {"match_names" , 0 , RRDR_OPTION_MATCH_NAMES}
, {"match-names" , 0 , RRDR_OPTION_MATCH_NAMES}
+ , {"showcustomvars" , 0 , RRDR_OPTION_CUSTOM_VARS}
, { NULL, 0, 0}
};
@@ -212,6 +213,50 @@ inline int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w,
return 200;
}
+inline int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url) {
+ RRDCALC_STATUS status = RRDCALC_STATUS_RAISED;
+ BUFFER *contexts = NULL;
+
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "[");
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ debug(D_WEB_CLIENT, "%llu: API v1 alarm_count query param '%s' with value '%s'", w->id, name, value);
+
+ char* p = value;
+ if(!strcmp(name, "status")) {
+ while ((*p = toupper(*p))) p++;
+ if (!strcmp("CRITICAL", value)) status = RRDCALC_STATUS_CRITICAL;
+ else if (!strcmp("WARNING", value)) status = RRDCALC_STATUS_WARNING;
+ else if (!strcmp("UNINITIALIZED", value)) status = RRDCALC_STATUS_UNINITIALIZED;
+ else if (!strcmp("UNDEFINED", value)) status = RRDCALC_STATUS_UNDEFINED;
+ else if (!strcmp("REMOVED", value)) status = RRDCALC_STATUS_REMOVED;
+ else if (!strcmp("CLEAR", value)) status = RRDCALC_STATUS_CLEAR;
+ }
+ else if(!strcmp(name, "context") || !strcmp(name, "ctx")) {
+ if(!contexts) contexts = buffer_create(255);
+ buffer_strcat(contexts, "|");
+ buffer_strcat(contexts, value);
+ }
+ }
+
+ health_aggregate_alarms(host, w->response.data, contexts, status);
+
+ buffer_sprintf(w->response.data, "]\n");
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ buffer_no_cacheable(w->response.data);
+
+ buffer_free(contexts);
+ return 200;
+}
+
inline int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url) {
uint32_t after = 0;
@@ -779,6 +824,7 @@ static struct api_command {
{ "alarms", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarms },
{ "alarm_log", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_log },
{ "alarm_variables", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_variables },
+ { "alarm_count", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_count },
{ "allmetrics", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_allmetrics },
{ "manage/health", 0, WEB_CLIENT_ACL_MGMT, web_client_api_request_v1_mgmt_health },
// terminator
diff --git a/web/api/web_api_v1.h b/web/api/web_api_v1.h
index 70b78178..3edb47e3 100644
--- a/web/api/web_api_v1.h
+++ b/web/api/web_api_v1.h
@@ -16,6 +16,7 @@ extern int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w,
extern int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url);
extern int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf));
extern int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url);
extern int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url);
extern int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url);
extern int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url);
diff --git a/web/gui/Makefile.in b/web/gui/Makefile.in
new file mode 100644
index 00000000..265440bd
--- /dev/null
+++ b/web/gui/Makefile.in
@@ -0,0 +1,914 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/gui
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(dist_web_DATA) $(dist_webcss_DATA) $(dist_webdnt_DATA) \
+ $(dist_webfonts_DATA) $(dist_webimages_DATA) \
+ $(dist_weblib_DATA) $(dist_webstatic_DATA) \
+ $(dist_webwellknown_DATA) $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(webdir)" "$(DESTDIR)$(webcssdir)" \
+ "$(DESTDIR)$(webdntdir)" "$(DESTDIR)$(webfontsdir)" \
+ "$(DESTDIR)$(webimagesdir)" "$(DESTDIR)$(weblibdir)" \
+ "$(DESTDIR)$(webstaticdir)" "$(DESTDIR)$(webwellknowndir)"
+DATA = $(dist_noinst_DATA) $(dist_web_DATA) $(dist_webcss_DATA) \
+ $(dist_webdnt_DATA) $(dist_webfonts_DATA) \
+ $(dist_webimages_DATA) $(dist_weblib_DATA) \
+ $(dist_webstatic_DATA) $(dist_webwellknown_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+
+#
+# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ version.txt \
+ $(NULL)
+
+DASHBOARD_JS_FILES = \
+ src/dashboard.js/prologue.js.inc \
+ src/dashboard.js/utils.js \
+ src/dashboard.js/server-detection.js \
+ src/dashboard.js/dependencies.js \
+ src/dashboard.js/error-handling.js \
+ src/dashboard.js/compatibility.js \
+ src/dashboard.js/xss.js \
+ src/dashboard.js/colors.js \
+ src/dashboard.js/units-conversion.js \
+ src/dashboard.js/options.js \
+ src/dashboard.js/localstorage.js \
+ src/dashboard.js/timeout.js \
+ src/dashboard.js/themes.js \
+ src/dashboard.js/charting/dygraph.js \
+ src/dashboard.js/charting/sparkline.js \
+ src/dashboard.js/charting/google-charts.js \
+ src/dashboard.js/charting/gauge.js \
+ src/dashboard.js/charting/easy-pie-chart.js \
+ src/dashboard.js/charting/d3pie.js \
+ src/dashboard.js/charting/d3.js \
+ src/dashboard.js/charting/peity.js \
+ src/dashboard.js/charting/textonly.js \
+ src/dashboard.js/charting.js \
+ src/dashboard.js/chart-registry.js \
+ src/dashboard.js/common.js \
+ src/dashboard.js/main.js \
+ src/dashboard.js/alarms.js \
+ src/dashboard.js/registry.js \
+ src/dashboard.js/boot.js \
+ src/dashboard.js/epilogue.js.inc \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(DASHBOARD_JS_FILES) \
+ $(NULL)
+
+dist_web_DATA = \
+ demo.html \
+ demo2.html \
+ demosites.html \
+ demosites2.html \
+ dashboard.html \
+ dashboard.js \
+ dashboard_info.js \
+ dashboard_info_custom_example.js \
+ dashboard.css \
+ dashboard.slate.css \
+ favicon.ico \
+ goto-host-from-alarm.html \
+ index.html \
+ main.css \
+ main.js \
+ console.html \
+ infographic.html \
+ robots.txt \
+ refresh-badges.js \
+ sitemap.xml \
+ tv.html \
+ version.txt \
+ $(NULL)
+
+webstaticdir = $(webdir)/static/img
+dist_webstatic_DATA = \
+ static/img/netdata-logomark.svg \
+ $(NULL)
+
+weblibdir = $(webdir)/lib
+dist_weblib_DATA = \
+ lib/bootstrap-3.3.7.min.js \
+ lib/bootstrap-slider-10.0.0.min.js \
+ lib/bootstrap-table-1.11.0.min.js \
+ lib/bootstrap-table-export-1.11.0.min.js \
+ lib/bootstrap-toggle-2.2.2.min.js \
+ lib/clipboard-polyfill-be05dad.js \
+ lib/d3-4.12.2.min.js \
+ lib/d3pie-0.2.1-netdata-3.js \
+ lib/dygraph-c91c859.min.js \
+ lib/dygraph-smooth-plotter-c91c859.js \
+ lib/fontawesome-all-5.0.1.min.js \
+ lib/gauge-1.3.2.min.js \
+ lib/jquery-2.2.4.min.js \
+ lib/jquery.easypiechart-97b5824.min.js \
+ lib/jquery.peity-3.2.0.min.js \
+ lib/jquery.sparkline-2.1.2.min.js \
+ lib/lz-string-1.4.4.min.js \
+ lib/pako-1.0.6.min.js \
+ lib/perfect-scrollbar-0.6.15.min.js \
+ lib/tableExport-1.6.0.min.js \
+ $(NULL)
+
+webcssdir = $(webdir)/css
+dist_webcss_DATA = \
+ css/morris-0.5.1.css \
+ css/bootstrap-3.3.7.css \
+ css/bootstrap-theme-3.3.7.min.css \
+ css/bootstrap-slate-flat-3.3.7.css \
+ css/bootstrap-slider-10.0.0.min.css \
+ css/bootstrap-toggle-2.2.2.min.css \
+ css/c3-0.4.18.min.css \
+ $(NULL)
+
+webfontsdir = $(webdir)/fonts
+dist_webfonts_DATA = \
+ fonts/glyphicons-halflings-regular.eot \
+ fonts/glyphicons-halflings-regular.svg \
+ fonts/glyphicons-halflings-regular.ttf \
+ fonts/glyphicons-halflings-regular.woff \
+ fonts/glyphicons-halflings-regular.woff2 \
+ $(NULL)
+
+webimagesdir = $(webdir)/images
+dist_webimages_DATA = \
+ images/netdata-logomark.svg \
+ images/alert-128-orange.png \
+ images/alert-128-red.png \
+ images/alert-multi-size-orange.ico \
+ images/alert-multi-size-red.ico \
+ images/animated.gif \
+ images/check-mark-2-128-green.png \
+ images/check-mark-2-multi-size-green.ico \
+ images/netdata.svg \
+ images/post.png \
+ images/android-icon-36x36.png \
+ images/android-icon-48x48.png \
+ images/android-icon-72x72.png \
+ images/android-icon-96x96.png \
+ images/android-icon-144x144.png \
+ images/android-icon-192x192.png \
+ images/apple-icon-57x57.png \
+ images/apple-icon-60x60.png \
+ images/apple-icon-72x72.png \
+ images/apple-icon-76x76.png \
+ images/apple-icon-114x114.png \
+ images/apple-icon-120x120.png \
+ images/apple-icon-144x144.png \
+ images/apple-icon-152x152.png \
+ images/apple-icon-180x180.png \
+ images/apple-icon-precomposed.png \
+ images/apple-icon.png \
+ images/favicon-16x16.png \
+ images/favicon-32x32.png \
+ images/favicon-96x96.png \
+ images/favicon.ico \
+ images/ms-icon-70x70.png \
+ images/ms-icon-144x144.png \
+ images/ms-icon-150x150.png \
+ images/ms-icon-310x310.png \
+ images/banner-icon-144x144.png \
+ $(NULL)
+
+webwellknowndir = $(webdir)/.well-known
+dist_webwellknown_DATA = \
+ $(NULL)
+
+webdntdir = $(webdir)/.well-known/dnt
+dist_webdnt_DATA = \
+ .well-known/dnt/cookies \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/gui/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/gui/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_webDATA: $(dist_web_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(webdir)" || exit $$?; \
+ done
+
+uninstall-dist_webDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(webdir)'; $(am__uninstall_files_from_dir)
+install-dist_webcssDATA: $(dist_webcss_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_webcss_DATA)'; test -n "$(webcssdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webcssdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webcssdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webcssdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(webcssdir)" || exit $$?; \
+ done
+
+uninstall-dist_webcssDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_webcss_DATA)'; test -n "$(webcssdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(webcssdir)'; $(am__uninstall_files_from_dir)
+install-dist_webdntDATA: $(dist_webdnt_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_webdnt_DATA)'; test -n "$(webdntdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webdntdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webdntdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webdntdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(webdntdir)" || exit $$?; \
+ done
+
+uninstall-dist_webdntDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_webdnt_DATA)'; test -n "$(webdntdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(webdntdir)'; $(am__uninstall_files_from_dir)
+install-dist_webfontsDATA: $(dist_webfonts_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_webfonts_DATA)'; test -n "$(webfontsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webfontsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webfontsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webfontsdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(webfontsdir)" || exit $$?; \
+ done
+
+uninstall-dist_webfontsDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_webfonts_DATA)'; test -n "$(webfontsdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(webfontsdir)'; $(am__uninstall_files_from_dir)
+install-dist_webimagesDATA: $(dist_webimages_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_webimages_DATA)'; test -n "$(webimagesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webimagesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webimagesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webimagesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(webimagesdir)" || exit $$?; \
+ done
+
+uninstall-dist_webimagesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_webimages_DATA)'; test -n "$(webimagesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(webimagesdir)'; $(am__uninstall_files_from_dir)
+install-dist_weblibDATA: $(dist_weblib_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_weblib_DATA)'; test -n "$(weblibdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(weblibdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(weblibdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(weblibdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(weblibdir)" || exit $$?; \
+ done
+
+uninstall-dist_weblibDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_weblib_DATA)'; test -n "$(weblibdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(weblibdir)'; $(am__uninstall_files_from_dir)
+install-dist_webstaticDATA: $(dist_webstatic_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_webstatic_DATA)'; test -n "$(webstaticdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webstaticdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webstaticdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webstaticdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(webstaticdir)" || exit $$?; \
+ done
+
+uninstall-dist_webstaticDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_webstatic_DATA)'; test -n "$(webstaticdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(webstaticdir)'; $(am__uninstall_files_from_dir)
+install-dist_webwellknownDATA: $(dist_webwellknown_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_webwellknown_DATA)'; test -n "$(webwellknowndir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webwellknowndir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webwellknowndir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webwellknowndir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(webwellknowndir)" || exit $$?; \
+ done
+
+uninstall-dist_webwellknownDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_webwellknown_DATA)'; test -n "$(webwellknowndir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(webwellknowndir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(webdir)" "$(DESTDIR)$(webcssdir)" "$(DESTDIR)$(webdntdir)" "$(DESTDIR)$(webfontsdir)" "$(DESTDIR)$(webimagesdir)" "$(DESTDIR)$(weblibdir)" "$(DESTDIR)$(webstaticdir)" "$(DESTDIR)$(webwellknowndir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_webDATA install-dist_webcssDATA \
+ install-dist_webdntDATA install-dist_webfontsDATA \
+ install-dist_webimagesDATA install-dist_weblibDATA \
+ install-dist_webstaticDATA install-dist_webwellknownDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_webDATA uninstall-dist_webcssDATA \
+ uninstall-dist_webdntDATA uninstall-dist_webfontsDATA \
+ uninstall-dist_webimagesDATA uninstall-dist_weblibDATA \
+ uninstall-dist_webstaticDATA uninstall-dist_webwellknownDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_webDATA install-dist_webcssDATA \
+ install-dist_webdntDATA install-dist_webfontsDATA \
+ install-dist_webimagesDATA install-dist_weblibDATA \
+ install-dist_webstaticDATA install-dist_webwellknownDATA \
+ install-dvi install-dvi-am install-exec install-exec-am \
+ install-html install-html-am install-info install-info-am \
+ install-man install-pdf install-pdf-am install-ps \
+ install-ps-am install-strip installcheck installcheck-am \
+ installdirs maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
+ uninstall uninstall-am uninstall-dist_webDATA \
+ uninstall-dist_webcssDATA uninstall-dist_webdntDATA \
+ uninstall-dist_webfontsDATA uninstall-dist_webimagesDATA \
+ uninstall-dist_weblibDATA uninstall-dist_webstaticDATA \
+ uninstall-dist_webwellknownDATA
+
+.PRECIOUS: Makefile
+
+
+dashboard.js: $(DASHBOARD_JS_FILES)
+ if test -f $@; then rm -f $@; fi
+ cat $(DASHBOARD_JS_FILES) > $@.tmp && mv $@.tmp $@
+
+version.txt:
+ if test -d "$(top_srcdir)/.git"; then \
+ git --git-dir="$(top_srcdir)/.git" log -n 1 --format=%H; \
+ fi > $@.tmp
+ test -s $@.tmp || echo 0 > $@.tmp
+ mv $@.tmp $@
+
+# regenerate these files, even if they are up to date
+.PHONY: version.txt dashboard.js
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/gui/README.md b/web/gui/README.md
index 9be9ffc9..26ef59bc 100644
--- a/web/gui/README.md
+++ b/web/gui/README.md
@@ -17,15 +17,15 @@ For information on creating custom dashboards, see **[Custom Dashboards](custom/
## Supported chart libraries
-- Dygraph
-- jQuery Sparkline
-- Peity
-- Google Charts
-- Morris
-- EasyPieChart
-- Gauge.js
-- D3
-- C3
+- Dygraph
+- jQuery Sparkline
+- Peity
+- Google Charts
+- Morris
+- EasyPieChart
+- Gauge.js
+- D3
+- C3
### Dygraph
@@ -46,20 +46,25 @@ TBD
#### Value Range
You can set the max value of the chart using the following snippet:
+
```html
<div data-netdata="unique.id"
data-chart-library="easypiechart"
data-easypiechart-max-value="40"
></div>
```
+
Be aware that values that exceed the max value will get expanded (e.g. "41" is still 100%). Similar for the minimum:
+
```html
<div data-netdata="unique.id"
data-chart-library="easypiechart"
data-easypiechart-min-value="20"
></div>
```
+
If you specify both minimum and maximum, the rendering behavior changes. Instead of displaying the `value` based from zero, it is now based on the range that is provided by the snippet:
+
```html
<div data-netdata="unique.id"
data-chart-library="easypiechart"
@@ -67,24 +72,28 @@ If you specify both minimum and maximum, the rendering behavior changes. Instead
data-easypiechart-max-value="40"
></div>
```
-In the first example, a value of `30`, without specifying the minimum, fills the chart bar to `75%` (100% / 40 * 30). However, in this example the range is now `20` (40 - 20 = 20). The value `30` will fill the chart to **`50%`**, since it's in the middle between 20 and 40.
+
+In the first example, a value of `30`, without specifying the minimum, fills the chart bar to `75%` (100% / 40 \* 30). However, in this example the range is now `20` (40 - 20 = 20). The value `30` will fill the chart to **`50%`**, since it's in the middle between 20 and 40.
This szenario is useful if you have metrics that change only within a specific range, e.g. temperatures that are very unlikely to fall out of range. In these cases it is more useful to have the chart render the values between the given min and max, to better highlight the changes within them.
#### Negative Values
EasyPieCharts can render negative values with the following flag:
+
```html
<div data-netdata="unique.id"
data-chart-library="easypiechart"
data-override-options="signed"
></div>
```
+
Negative values are rendered counter-clockwise.
#### Full example
This is a chart that displays the hotwater temperature in the given range of 40 to 50.
+
```html
<div data-netdata="stiebeleltron_system.hotwater.hotwatertemp"
data-title="Hot Water Temperature"
@@ -103,6 +112,7 @@ This is a chart that displays the hotwater temperature in the given range of 40
data-common-min="netdata-hotwater-min"
></div>
```
+
![hot water chart](https://user-images.githubusercontent.com/12159026/28666665-a7d68ad2-72c8-11e7-9a96-f6bf9691b471.png)
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/gui/browserconfig.xml b/web/gui/browserconfig.xml
deleted file mode 100644
index 32f47595..00000000
--- a/web/gui/browserconfig.xml
+++ /dev/null
@@ -1,2 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<browserconfig><msapplication><tile><square70x70logo src="images/ms-icon-70x70.png"/><square150x150logo src="images/ms-icon-150x150.png"/><square310x310logo src="images/ms-icon-310x310.png"/><TileColor>#ffffff</TileColor></tile></msapplication></browserconfig>
diff --git a/web/gui/confluence/README.md b/web/gui/confluence/README.md
index 3d7eda6a..22194835 100644
--- a/web/gui/confluence/README.md
+++ b/web/gui/confluence/README.md
@@ -1,20 +1,20 @@
# Atlassian Confluence dashboards
-With netdata you can build **live, interactive, monitoring dashboards** directly on Atlassian's **Confluence** pages.
+With Netdata you can build **live, interactive, monitoring dashboards** directly on Atlassian's **Confluence** pages.
I see you already asking "why should I do this?"
Well... think a bit of it.... confluence is the perfect place for something like that:
-1. All the employees of your company already have access to it.
+1. All the employees of your company already have access to it.
-2. Most probably you have already several spaces on confluence, one for each project or service. Adding live monitoring information there is ideal: everything in one place. Your users will just click on the page and instantly the monitoring page they need will appear with only the information they need to know.
+2. Most probably you have already several spaces on confluence, one for each project or service. Adding live monitoring information there is ideal: everything in one place. Your users will just click on the page and instantly the monitoring page they need will appear with only the information they need to know.
-3. You can create monitoring pages for very specific purposes, hiding all the information that is too detailed for most users, or explaining in detail things that are difficult for them to understand.
+3. You can create monitoring pages for very specific purposes, hiding all the information that is too detailed for most users, or explaining in detail things that are difficult for them to understand.
-So, what can we expect? What can netdata do on confluence?
+So, what can we expect? What can Netdata do on confluence?
-You will be surprised! **Everything a netdata dashboard does!**. Example:
+You will be surprised! **Everything a Netdata dashboard does!**. Example:
![final-confluence4](https://user-images.githubusercontent.com/2662304/34366214-767fa4b8-eaa1-11e7-83af-0b9b9b72aa73.gif)
@@ -24,16 +24,16 @@ Let me show you how.
### Before you begin
-Most likely your confluence is accessible via HTTPS. So, you need to proxy your netdata servers via an apache or nginx to make them HTTPS too. If your Confluence is HTTPS but your netdata are not, you will not be able to fetch the netdata content from the confluence page. The netdata wiki has many examples for proxying netdata through another web server.
+Most likely your confluence is accessible via HTTPS. So, you need to proxy your Netdata servers via an apache or nginx to make them HTTPS too. If your Confluence is HTTPS but your Netdata are not, you will not be able to fetch the Netdata content from the confluence page. The Netdata wiki has many examples for proxying Netdata through another web server.
-> So, make sure netdata and confluence can be accessed with the same protocol (**http**, or **https**).
+> So, make sure Netdata and Confluence can be accessed with the same protocol (**http**, or **https**).
For our example, I will use these 2 servers:
-server|url
-----|----
-Server 1 | https://london.my-netdata.io
-Server 2 | https://frankfurt.my-netdata.io
+| server | url |
+|------|---|
+| Server 1 | <https://london.my-netdata.io> |
+| Server 2 | <https://frankfurt.my-netdata.io> |
I will use the first server for the static dashboard javascript files.
@@ -41,7 +41,7 @@ I will use the first server for the static dashboard javascript files.
Then, you need to enable the `html` plugin of confluence. We will add some plain html content on that page, and this plugin is required.
-### Create a new page
+### Create a new page
Create a new confluence page and paste this into an `html` box:
@@ -64,7 +64,7 @@ like this (type `{html` for the html box to appear - you need the confluence htm
### Add a few badges
-Then, go to your netdata and copy an alarm badge (the `<embed>` version of it):
+Then, go to your Netdata and copy an alarm badge (the `<embed>` version of it):
![copy-embed-badge](https://user-images.githubusercontent.com/2662304/34329562-dddea37e-e90d-11e7-9830-041a9f6a5984.gif)
@@ -78,7 +78,7 @@ Hit **update** and you will get this:
This badge is now auto-refreshing. It will update itself based on the update frequency of the alarm.
-> Keep in mind you can add badges with custom netdata queries too. netdata automatically creates badges for all the alarms, but every chart, every dimension on every chart, can be used for a badge. And netdata badges are quite powerful! Check [Creating Badges](../../api/badges/) for more information on badges.
+> Keep in mind you can add badges with custom Netdata queries too. Netdata automatically creates badges for all the alarms, but every chart, every dimension on every chart, can be used for a badge. And Netdata badges are quite powerful! Check [Creating Badges](../../api/badges/) for more information on badges.
So, let's create a table and add this badge for both our web servers:
@@ -88,7 +88,7 @@ Now we get this:
![screenshot from 2017-12-25 01-07-10](https://user-images.githubusercontent.com/2662304/34329615-f7dea286-e90f-11e7-9b6f-600215494f96.png)
-### Add a netdata chart
+### Add a Netdata chart
The simplest form of a chart is this (it adds the chart `web_log_nginx_netdata.response_statuses`, using 100% of the width, 150px height, and the last 10 minutes of data):
@@ -110,7 +110,7 @@ And you will get this:
![screenshot from 2017-12-25 01-14-09](https://user-images.githubusercontent.com/2662304/34329640-efd15574-e910-11e7-9004-94487dcde154.png)
-> This chart is **alive**, fully interactive. You can drag it, pan it, zoom it, etc like you do on netdata dashboards!
+> This chart is **alive**, fully interactive. You can drag it, pan it, zoom it, etc like you do on Netdata dashboards!
Of course this too big. We need something smaller to add inside the table. Let's try this:
@@ -128,7 +128,7 @@ Of course this too big. We need something smaller to add inside the table. Let's
></div>
```
-The chart name is shown on all netdata charts, so just copy it from a netdata dashboard.
+The chart name is shown on all Netdata charts, so just copy it from a Netdata dashboard.
We will fetch the same chart from both servers. To define the server we also added `data-host=` with the URL of each server, like this (we also added `<br/>` for a newline between the badge and the chart):
@@ -138,8 +138,7 @@ Which gives us this:
![screenshot from 2017-12-25 01-26-04](https://user-images.githubusercontent.com/2662304/34329700-989f0f2e-e912-11e7-8ac9-c78f82cfbdb0.png)
-Note the color difference. This is because netdata automatically hides dimensions that are just zero (the frankfurt server has only successful requests). To instruct netdata to disable this feature, we need to add another html fragment at the bottom of the page (make sure this is added after loading `dashboard.js`). So we edit the first block we added, and append a new `<script>` section to it:
-
+Note the color difference. This is because Netdata automatically hides dimensions that are just zero (the frankfurt server has only successful requests). To instruct Netdata to disable this feature, we need to add another html fragment at the bottom of the page (make sure this is added after loading `dashboard.js`). So we edit the first block we added, and append a new `<script>` section to it:
```html
<script>
@@ -165,7 +164,7 @@ Now they match:
#### more options
-If you want to change the colors append `data-colors="#001122 #334455 #667788"`. The colors will be used for the dimensions top to bottom, as shown on a netdata dashboard. Keep in mind the default netdata dashboards hide by default all dimensions that are just zero, so enable them at the dashboard settings to see them all.
+If you want to change the colors append `data-colors="#001122 #334455 #667788"`. The colors will be used for the dimensions top to bottom, as shown on a Netdata dashboard. Keep in mind the default Netdata dashboards hide by default all dimensions that are just zero, so enable them at the dashboard settings to see them all.
You can get a percentage chart, by adding these on these charts:
@@ -177,7 +176,7 @@ You can get a percentage chart, by adding these on these charts:
data-units="%"
```
-The first line instructs netdata to calculate the percentage of each dimension, the second strips any fractional digits, the third instructs the charting library to size the chart from 0 to 100, the next one instructs it to include 0 in the chart and the last changes the units of the chart to `%`. This is how it will look:
+The first line instructs Netdata to calculate the percentage of each dimension, the second strips any fractional digits, the third instructs the charting library to size the chart from 0 to 100, the next one instructs it to include 0 in the chart and the last changes the units of the chart to `%`. This is how it will look:
![screenshot from 2017-12-25 01-45-39](https://user-images.githubusercontent.com/2662304/34329774-570ef990-e915-11e7-899f-eee939564aaf.png)
@@ -189,8 +188,8 @@ Let's now add a few gauges. The chart we added has several dimensions: `success`
Let's say we want to add 2 gauges:
-1. `success` and `redirect` together, in blue
-2. `error`, `bad` and `other` together, in orange
+1. `success` and `redirect` together, in blue
+2. `error`, `bad` and `other` together, in orange
We will add the following for each server. We have enclosed them in another a `<div>` because Confluence will wrap them if the page width is not enough to fit them. With that additional `<div>` they will always be next to each other.
@@ -234,7 +233,6 @@ Adding the above will give you this:
![final-confluence](https://user-images.githubusercontent.com/2662304/34329813-636bb8de-e917-11e7-8cc7-19e197859008.gif)
-
### Final source - for the confluence source editor
If you enable the source editor of Confluence, you can paste the whole example (implementing the first image on this post and demonstrating everything discussed on this page):
@@ -1011,4 +1009,4 @@ NETDATA.options.current.eliminate_zero_dimensions = false;
</div>
```
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2Fconfluence%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2Fconfluence%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/gui/custom/README.md b/web/gui/custom/README.md
index f9ffd255..68d8bbb5 100644
--- a/web/gui/custom/README.md
+++ b/web/gui/custom/README.md
@@ -2,12 +2,12 @@
You can:
-- create your own dashboards using simple HTML (no javascript is required for basic dashboards)
-- utilizing any or all of the available chart libraries, on the same dashboard
-- using data from one or more netdata servers, on the same dashboard
-- host your dashboard HTML page on any web server, anywhere
+- create your own dashboards using simple HTML (no javascript is required for basic dashboards)
+- utilizing any or all of the available chart libraries, on the same dashboard
+- using data from one or more Netdata servers, on the same dashboard
+- host your dashboard HTML page on any web server, anywhere
-netdata charts can also be added to existing web pages.
+Netdata charts can also be added to existing web pages.
Check this **[very simple working example of a custom dashboard](http://netdata.firehol.org/demo.html)**, and its **[html source](../demo.html)**.
@@ -17,11 +17,10 @@ If you plan to put the dashboard on TV, check **[tv.html](../tv.html)**. This is
![image](https://cloud.githubusercontent.com/assets/2662304/14252187/d8d5f78e-fa8e-11e5-990d-99821d38c874.png)
--
-
## Web directory
-All of the mentioned examples are available on your local netdata installation (e.g. `http://myhost:19999/dashboard.html`). The default web root directory with the HTML and JS code is `/usr/share/netdata/web`. The main dashboard is also in that directory and called `index.html`.
+All of the mentioned examples are available on your local Netdata installation (e.g. `http://myhost:19999/dashboard.html`). The default web root directory with the HTML and JS code is `/usr/share/netdata/web`. The main dashboard is also in that directory and called `index.html`.\
Note: index.html has a different syntax. Don't use it as a template for simple custom dashboards.
## Example empty dashboard
@@ -52,12 +51,11 @@ If you need to create a new dashboard on an empty page, we suggest the following
</html>
```
-
## dashboard.js
-To add netdata charts to any web page (dedicated to netdata or not), you need to include the `/dashboard.js` file of a netdata server.
+To add Netdata charts to any web page (dedicated to Netdata or not), you need to include the `/dashboard.js` file of a Netdata server.
-For example, if your netdata server listens at `http://box:19999/`, you will need to add the following to the `head` section of your web page:
+For example, if your Netdata server listens at `http://box:19999/`, you will need to add the following to the `head` section of your web page:
```html
<script type="text/javascript" src="http://box:19999/dashboard.js"></script>
@@ -67,23 +65,23 @@ For example, if your netdata server listens at `http://box:19999/`, you will nee
`dashboard.js` will automatically load the following:
-1. `dashboard.css`, required for the netdata charts
+1. `dashboard.css`, required for the Netdata charts
-2. `jquery.min.js`, (only if jquery is not already loaded for this web page)
+2. `jquery.min.js`, (only if jquery is not already loaded for this web page)
-3. `bootstrap.min.js` (only if bootstrap is not already loaded) and `bootstrap.min.css`.
+3. `bootstrap.min.js` (only if bootstrap is not already loaded) and `bootstrap.min.css`.
- You can disable this by adding the following before loading `dashboard.js`:
+ You can disable this by adding the following before loading `dashboard.js`:
```html
<script>var netdataNoBootstrap = true;</script>
```
-4. `jquery.nanoscroller.min.js`, required for the scrollbar of the chart legends.
+4. `jquery.nanoscroller.min.js`, required for the scrollbar of the chart legends.
-5. `bootstrap-toggle.min.js` and `bootstrap-toggle.min.css`, required for the settings toggle buttons.
+5. `bootstrap-toggle.min.js` and `bootstrap-toggle.min.css`, required for the settings toggle buttons.
-6. `font-awesome.min.css`, for icons.
+6. `font-awesome.min.css`, for icons.
When `dashboard.js` loads will scan the page for elements that define charts (see below) and immediately start refreshing them. Keep in mind more javascript modules may be loaded (every chart library is a different javascript file, that is loaded on first use).
@@ -117,11 +115,11 @@ NETDATA.pause(function() {
});
```
-### The default netdata server
+### The default Netdata server
-`dashboard.js` will attempt to auto-detect the URL of the netdata server it is loaded from, and set this server as the default netdata server for all charts.
+`dashboard.js` will attempt to auto-detect the URL of the Netdata server it is loaded from, and set this server as the default Netdata server for all charts.
-If you need to set any other URL as the default netdata server for all charts that do not specify a netdata server, add this before loading `dashboard.js`:
+If you need to set any other URL as the default Netdata server for all charts that do not specify a Netdata server, add this before loading `dashboard.js`:
```html
<script type="text/javascript">var netdataServer = "http://your.netdata.server:19999";</script>
@@ -129,13 +127,13 @@ If you need to set any other URL as the default netdata server for all charts th
---
-# Adding charts
+## Adding charts
To add charts, you need to add a `div` for each of them. Each of these `div` elements accept a few `data-` attributes:
### The chart unique ID
-The unique ID of a chart is shown at the title of the chart of the default netdata dashboard. You can also find all the charts available at your netdata server with this URL: `http://your.netdata.server:19999/api/v1/charts` ([example](http://netdata.firehol.org/api/v1/charts)).
+The unique ID of a chart is shown at the title of the chart of the default Netdata dashboard. You can also find all the charts available at your Netdata server with this URL: `http://your.netdata.server:19999/api/v1/charts` ([example](http://netdata.firehol.org/api/v1/charts)).
To specify the unique id, use this:
@@ -160,9 +158,9 @@ You can specify the duration of the chart (how much time of data it will show) u
The can be either:
-- **absolute** unix timestamps (in javascript terms, they are `new Date().getTime() / 1000`. Using absolute timestamps you can have a chart showing always the same time-frame.
+- **absolute** unix timestamps (in javascript terms, they are `new Date().getTime() / 1000`. Using absolute timestamps you can have a chart showing always the same time-frame.
-- **relative** number of seconds to now. To show the last 10 minutes of data, `AFTER_SECONDS` must be `-600` (relative to now) and `BEFORE_SECONDS` must be `0` (meaning: now). If you want the chart to auto-refresh the current values, you need to specify **relative** values.
+- **relative** number of seconds to now. To show the last 10 minutes of data, `AFTER_SECONDS` must be `-600` (relative to now) and `BEFORE_SECONDS` must be `0` (meaning: now). If you want the chart to auto-refresh the current values, you need to specify **relative** values.
### Chart sizes
@@ -182,7 +180,7 @@ If you want `dashboard.js` to remember permanently (browser local storage) the d
### Netdata server
-Each chart can get data from a different netdata server. You can give per chart the netdata server using:
+Each chart can get data from a different Netdata server. You can give per chart the Netdata server using:
```html
<div data-netdata="unique.id"
@@ -197,6 +195,7 @@ If you have ephemeral monitoring setup ([More info here](../../../streaming/#mon
data-host="http://yournetdata.server:19999/host/reported-hostname"
></div>
```
+
### Chart library
The default chart library is `dygraph`. You set a different chart library per chart using this:
@@ -209,21 +208,20 @@ The default chart library is `dygraph`. You set a different chart library per ch
Each chart library may support more chart-library specific settings. Please refer to the documentation of the chart library you are interested, in this wiki or the source code:
-- options `data-dygraph-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L6251-L6361)
-- options `data-easypiechart-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L7954-L7966)
-- options `data-gauge-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L8182-L8189)
-- options `data-d3pie-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L7394-L7561)
-- options `data-sparkline-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L5940-L5985)
-- options `data-peity-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L5892)
-
+- options `data-dygraph-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L6251-L6361)
+- options `data-easypiechart-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L7954-L7966)
+- options `data-gauge-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L8182-L8189)
+- options `data-d3pie-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L7394-L7561)
+- options `data-sparkline-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L5940-L5985)
+- options `data-peity-XXX` [here](https://github.com/netdata/netdata/blob/643cfe20a8d8beba0ed31ec6afaade80853fd310/web/dashboard.js#L5892)
### Data points
For the time-frame requested, `dashboard.js` will use the chart dimensions and the settings of the chart library to find out how many data points it can show.
-For example, most line chart libraries are using 3 pixels per data point. If the chart shows 10 minutes of data (600 seconds), its update frequency is 1 second, and the chart width is 1800 pixels, then `dashboard.js` will request from the netdata server: 10 minutes of data, represented in 600 points, and the chart will be refreshed per second. If the user resizes the window so that the chart becomes 600 pixels wide, then `dashboard.js` will request the same 10 minutes of data, represented in 200 points and the chart will be refreshed once every 3 seconds.
+For example, most line chart libraries are using 3 pixels per data point. If the chart shows 10 minutes of data (600 seconds), its update frequency is 1 second, and the chart width is 1800 pixels, then `dashboard.js` will request from the Netdata server: 10 minutes of data, represented in 600 points, and the chart will be refreshed per second. If the user resizes the window so that the chart becomes 600 pixels wide, then `dashboard.js` will request the same 10 minutes of data, represented in 200 points and the chart will be refreshed once every 3 seconds.
-If you need to have a fixed number of points in the data source retrieved from the netdata server, you can set:
+If you need to have a fixed number of points in the data source retrieved from the Netdata server, you can set:
```html
<div data-netdata="unique.id"
@@ -245,7 +243,7 @@ Where `PIXELS_PER_POINT` is the number of pixels each data point should occupy.
### Data grouping method
-Netdata supports **average** (the default), **sum** and **max** grouping methods. The grouping method is used when the netdata server is requested to return fewer points for a time-frame, compared to the number of points available.
+Netdata supports **average** (the default), **sum** and **max** grouping methods. The grouping method is used when the Netdata server is requested to return fewer points for a time-frame, compared to the number of points available.
You can give it per chart, using:
@@ -270,9 +268,9 @@ Netdata can change the rate of charts on the fly. So a charts that shows values
The above will provide the average rate per minute (60 seconds).
Use 60 for `/minute`, 3600 for `/hour`, 86400 for `/day` (provided you have that many data).
-- The `data-gtime` setting does not change the units of the chart. You have to change them yourself with `data-units`.
-- This works only for `data-method="average"`.
-- netdata may aggregate multiple points to satisfy the `data-points` setting. For example, you request `per minute` but the requested number of points to be returned are not enough to report every single minute. In this case netdata will sum the `per second` raw data of the database to find the `per minute` for every single minute and then **average** them to find the **average per minute rate of every X minutes**. So, it works as if the data collection frequency was per minute.
+- The `data-gtime` setting does not change the units of the chart. You have to change them yourself with `data-units`.
+- This works only for `data-method="average"`.
+- Netdata may aggregate multiple points to satisfy the `data-points` setting. For example, you request `per minute` but the requested number of points to be returned are not enough to report every single minute. In this case Netdata will sum the `per second` raw data of the database to find the `per minute` for every single minute and then **average** them to find the **average per minute rate of every X minutes**. So, it works as if the data collection frequency was per minute.
### Selecting dimensions
@@ -285,7 +283,7 @@ You can select specific dimensions using this:
></div>
```
-netdata supports coma (` , `) or pipe (` | `) separated [simple patterns](../../../libnetdata/simple_pattern/) for dimensions. By default it searches for both dimension IDs and dimension NAMEs. You can control the target of the match with: `data-append-options="match-ids"` or `data-append-options="match-names"`. Spaces in `data-dimensions=""` are matched in the dimension names and IDs.
+Netdata supports coma (`,`) or pipe (`|`) separated [simple patterns](../../../libnetdata/simple_pattern/) for dimensions. By default it searches for both dimension IDs and dimension NAMEs. You can control the target of the match with: `data-append-options="match-ids"` or `data-append-options="match-names"`. Spaces in `data-dimensions=""` are matched in the dimension names and IDs.
### Chart title
@@ -344,7 +342,7 @@ On charts that by default have a legend managed by `dashboard.js` you can remove
### API options
-You can append netdata **[REST API v1](../../api)** data options, using this:
+You can append Netdata **[REST API v1](../../api)** data options, using this:
```html
<div data-netdata="unique.id"
@@ -354,10 +352,10 @@ You can append netdata **[REST API v1](../../api)** data options, using this:
A few useful options are:
-- `absolute` to show all values are absolute (i.e. turn negative dimensions to positive)
-- `percentage` to express the values as a percentage of the chart total (so, the values of the dimensions are added, and the sum of them if expressed as a percentage of the sum of all dimensions)
-- `unaligned` to prevent netdata from aligning the charts (e.g. when requesting 60 seconds aggregation per point, netdata returns chart data aligned to XX:XX:00 to XX:XX:59 - similarly for hours, days, etc - the `unaligned` option disables this feature)
-- `match-ids` or `match-names` is used to control what `data-dimensions=` will match.
+- `absolute` to show all values are absolute (i.e. turn negative dimensions to positive)
+- `percentage` to express the values as a percentage of the chart total (so, the values of the dimensions are added, and the sum of them if expressed as a percentage of the sum of all dimensions)
+- `unaligned` to prevent Netdata from aligning the charts (e.g. when requesting 60 seconds aggregation per point, Netdata returns chart data aligned to XX:XX:00 to XX:XX:59 - similarly for hours, days, etc - the `unaligned` option disables this feature)
+- `match-ids` or `match-names` is used to control what `data-dimensions=` will match.
### Chart library performance
@@ -373,7 +371,7 @@ refreshed in <span id="measurement1"></span> milliseconds!
### Syncing charts y-range
-If you give the same `data-common-max="NAME"` to 2+ charts, then all of them will share the same max value of their y-range. If one spikes, all of them will be aligned to have the same scale. This is done for the cpu interrupts and and cpu softnet charts at the dashboard and also for the `gauge` and `easypiecharts` of the netdata home page.
+If you give the same `data-common-max="NAME"` to 2+ charts, then all of them will share the same max value of their y-range. If one spikes, all of them will be aligned to have the same scale. This is done for the cpu interrupts and and cpu softnet charts at the dashboard and also for the `gauge` and `easypiecharts` of the Netdata home page.
```html
<div data-netdata="chart1"
@@ -389,7 +387,7 @@ The same functionality exists for `data-common-min`.
### Syncing chart units
-netdata dashboards support auto-scaling of units. So, `MB` can become `KB`, `GB`, etc dynamically, based on the value to be shown.
+Netdata dashboards support auto-scaling of units. So, `MB` can become `KB`, `GB`, etc dynamically, based on the value to be shown.
Giving the same `NAME` with `data-common-units="NAME"`, 2+ charts can be forced to always have the same units.
@@ -413,5 +411,4 @@ Charts can be scaled to specific units with `data-desired-units="UNITS"`. If the
></div>
```
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2Fcustom%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2Fcustom%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/gui/dashboard.css b/web/gui/dashboard.css
index 8062497d..674131a1 100644
--- a/web/gui/dashboard.css
+++ b/web/gui/dashboard.css
@@ -737,3 +737,21 @@ body {
.ps-container:hover > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y {
background-color: #999; /* scrollbar color on hover */
}
+
+.dygraph__history-tip {
+ position: absolute;
+ top: 50%;
+ transform: translateY(-50%);
+ display: none; /* overriden in js */
+ margin-right: 25px;
+ direction: rtl;
+ overflow: hidden;
+ pointer-events: none;
+}
+
+.dygraph__history-tip-content {
+ display: inline-block;
+ white-space: nowrap;
+ direction: ltr;
+ pointer-events: auto;
+}
diff --git a/web/gui/dashboard.js b/web/gui/dashboard.js
index a32a29be..0c379dc0 100644
--- a/web/gui/dashboard.js
+++ b/web/gui/dashboard.js
@@ -2281,8 +2281,28 @@ NETDATA.dygraphChartCreate = function (state, data) {
NETDATA.globalSelectionSync.stop();
},
underlayCallback: function (canvas, area, g) {
-
// the chart is about to be drawn
+
+ // update history_tip_element
+ if (state.tmp.dygraph_history_tip_element) {
+ const xHookRightSide = g.toDomXCoord(state.netdata_first);
+ if (xHookRightSide > area.x) {
+ state.tmp.dygraph_history_tip_element_displayed = true;
+ // group the styles for possible better performance
+ state.tmp.dygraph_history_tip_element.setAttribute(
+ 'style',
+ `display: block; left: ${area.x}px; right: calc(100% - ${xHookRightSide}px);`
+ )
+ } else {
+ if (state.tmp.dygraph_history_tip_element_displayed) {
+ // additional check just for performance
+ // don't update the DOM when it's not needed
+ state.tmp.dygraph_history_tip_element.style.display = 'none';
+ state.tmp.dygraph_history_tip_element_displayed = false;
+ }
+ }
+ }
+
// this function renders global highlighted time-frame
if (NETDATA.globalChartUnderlay.isActive()) {
@@ -2751,6 +2771,21 @@ NETDATA.dygraphChartCreate = function (state, data) {
state.tmp.dygraph_instance = new Dygraph(state.element_chart,
data.result.data, state.tmp.dygraph_options);
+
+ state.tmp.dygraph_history_tip_element = document.createElement('div');
+ state.tmp.dygraph_history_tip_element.innerHTML = `
+ <span class="dygraph__history-tip-content">
+ Want to extend your history of real-time metrics?
+ <br />
+ <a href="https://docs.netdata.cloud/docs/configuration-guide/#increase-the-metrics-retention-period" target=_blank>
+ Configure Netdata's <b>history</b></a>
+ or use the <a href="https://docs.netdata.cloud/database/engine/" target=_blank>DB engine</a>.
+ </span>
+ `;
+ state.tmp.dygraph_history_tip_element.className = 'dygraph__history-tip';
+ state.element_chart.appendChild(state.tmp.dygraph_history_tip_element);
+
+
state.tmp.dygraph_force_zoom = false;
state.tmp.dygraph_user_action = false;
state.tmp.dygraph_last_rendered = Date.now();
diff --git a/web/gui/dashboard.slate.css b/web/gui/dashboard.slate.css
index f1c9c410..af338252 100644
--- a/web/gui/dashboard.slate.css
+++ b/web/gui/dashboard.slate.css
@@ -755,3 +755,21 @@ code {
.ps-container:hover > .ps-scrollbar-y-rail:hover > .ps-scrollbar-y {
background-color: #999; /* scrollbar color on hover */
}
+
+.dygraph__history-tip {
+ position: absolute;
+ top: 50%;
+ transform: translateY(-50%);
+ display: none; /* overriden in js */
+ margin-right: 25px;
+ direction: rtl;
+ overflow: hidden;
+ pointer-events: none;
+}
+
+.dygraph__history-tip-content {
+ display: inline-block;
+ white-space: nowrap;
+ direction: ltr;
+ pointer-events: auto;
+}
diff --git a/web/gui/dashboard_info.js b/web/gui/dashboard_info.js
index df924264..130162be 100644
--- a/web/gui/dashboard_info.js
+++ b/web/gui/dashboard_info.js
@@ -486,6 +486,12 @@ netdataDashboard.menu = {
title: 'Perf Counters',
icon: '<i class="fas fa-tachometer-alt"></i>',
info: 'Performance Monitoring Counters (PMC). Data collected using <b>perf_event_open()</b> system call which utilises Hardware Performance Monitoring Units (PMU).'
+ },
+
+ 'vsphere': {
+ title: 'vSphere',
+ icon: '<i class="fas fa-server"></i>',
+ info: 'Performance statistics for ESXI hosts and virtual machines. Data collected from <a href="https://www.vmware.com/products/vcenter-server.html">VMware vCenter Server</a> using <code><a href="https://github.com/vmware/govmomi"> govmomi</a></code> library.'
}
};
@@ -826,6 +832,32 @@ netdataDashboard.context = {
]
},
+ 'mem.zram_usage': {
+ info: 'ZRAM total RAM usage metrics. ZRAM uses some memory to store metadata about stored memory pages, thus introducing an overhead which is proportional to disk size. It excludes same-element-filled-pages since no memory is allocated for them.'
+ },
+
+ 'mem.zram_savings': {
+ info: 'Displays original and compressed memory data sizes.'
+ },
+
+ 'mem.zram_ratio': {
+ heads: [
+ netdataDashboard.gaugeChart('Compression Ratio', '12%', 'ratio', '#0099CC')
+ ],
+ info: 'Compression ratio, calculated as <code>100 * original_size / compressed_size</code>. More means better compression and more RAM savings.'
+ },
+
+ 'mem.zram_efficiency': {
+ heads: [
+ netdataDashboard.gaugeChart('Efficiency', '12%', 'percent', NETDATA.colors[0])
+ ],
+ commonMin: true,
+ commonMax: true,
+ valueRange: "[0, 100]",
+ info: 'Memory usage efficiency, calculated as <code>100 * compressed_size / total_mem_used</code>.'
+ },
+
+
'mem.pgfaults': {
info: 'A <a href="https://en.wikipedia.org/wiki/Page_fault" target="_blank">page fault</a> is a type of interrupt, called trap, raised by computer hardware when a running program accesses a memory page that is mapped into the virtual address space, but not actually loaded into main memory. If the page is loaded in memory at the time the fault is generated, but is not marked in the memory management unit as being loaded in memory, then it is called a <b>minor</b> or soft page fault. A <b>major</b> page fault is generated when the system needs to load the memory page from disk or swap memory.'
},
@@ -953,6 +985,10 @@ netdataDashboard.context = {
height: 2.0
},
+ 'apps.uptime': {
+ info: 'Carried over process group uptime since the Netdata restart. The period of time within which at least one process in the group was running.'
+ },
+
// ------------------------------------------------------------------------
// USERS
@@ -976,6 +1012,10 @@ netdataDashboard.context = {
height: 2.0
},
+ 'users.uptime': {
+ info: 'Carried over process group uptime since the Netdata restart. The period of time within which at least one process in the group was running.'
+ },
+
// ------------------------------------------------------------------------
// GROUPS
@@ -988,7 +1028,7 @@ netdataDashboard.context = {
},
'groups.vmem': {
- info: 'Virtual memory allocated per user group. Please check <a href="https://github.com/netdata/netdata/tree/master/daemon#virtual-memory" target="_blank">this article</a> for more information.'
+ info: 'Virtual memory allocated per user group since the Netdata restart. Please check <a href="https://github.com/netdata/netdata/tree/master/daemon#virtual-memory" target="_blank">this article</a> for more information.'
},
'groups.preads': {
@@ -999,6 +1039,10 @@ netdataDashboard.context = {
height: 2.0
},
+ 'groups.uptime': {
+ info: 'Carried over process group uptime. The period of time within which at least one process in the group was running.'
+ },
+
// ------------------------------------------------------------------------
// NETWORK QoS
@@ -2296,7 +2340,7 @@ netdataDashboard.context = {
},
'spigotmc.users': {
- info: 'THe number of currently connect users on the monitored Spigot server.'
+ info: 'The number of currently connect users on the monitored Spigot server.'
},
'unbound.queries': {
@@ -2442,7 +2486,100 @@ netdataDashboard.context = {
'powersupply.voltage': {
info: undefined
- }
+ },
// ------------------------------------------------------------------------
+ // VMware vSphere
+
+ // Host specific
+ 'vsphere.host_mem_usage_percentage': {
+ info: 'Percentage of used machine memory: <code>consumed</code> / <code>machine-memory-size</code>.'
+ },
+
+ 'vsphere.host_mem_usage': {
+ info:
+ '<code>granted</code> is amount of machine memory that is mapped for a host, '+
+ 'it equals sum of all granted metrics for all powered-on virtual machines, plus machine memory for vSphere services on the host. ' +
+ '<code>consumed</code> is amount of machine memory used on the host, it includes memory used by the Service Console, the VMkernel, vSphere services, plus the total consumed metrics for all running virtual machines. ' +
+ '<code>consumed</code> = <code>total host memory</code> - <code>free host memory</code>.' +
+ '<code>active</code> is sum of all active metrics for all powered-on virtual machines plus vSphere services (such as COS, vpxa) on the host.' +
+ '<code>shared</code> is sum of all shared metrics for all powered-on virtual machines, plus amount for vSphere services on the host. ' +
+ '<code>sharedcommon</code> is amount of machine memory that is shared by all powered-on virtual machines and vSphere services on the host. ' +
+ '<code>shared</code> - <code>sharedcommon</code> = machine memory (host memory) savings (KB). ' +
+ 'For details see <a href="https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.resmgmt.doc/GUID-BFDC988B-F53D-4E97-9793-A002445AFAE1.html">Measuring and Differentiating Types of Memory Usage</a> and ' +
+ '<a href="https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/memory_counters.html">Memory Counters</a> articles.'
+ },
+
+ 'vsphere.host_mem_swap_rate': {
+ info:
+ 'This statistic refers to VMkernel swapping and not to guest OS swapping. ' +
+ '<code>in</code> is sum of <code>swapinRate</code> values for all powered-on virtual machines on the host.' +
+ '<code>swapinRate</code> is rate at which VMKernel reads data into machine memory from the swap file. ' +
+ '<code>out</code> is sum of <code>swapoutRate</code> values for all powered-on virtual machines on the host.' +
+ '<code>swapoutRate</code> is rate at which VMkernel writes to the virtual machine’s swap file from machine memory.'
+ },
+
+ // VM specific
+ 'vsphere.vm_mem_usage_percentage': {
+ info: 'Percentage of used virtual machine “physical” memory: <code>actvive</code> / <code>virtual machine configured size</code>.'
+ },
+
+ 'vsphere.vm_mem_usage': {
+ info:
+ '<code>granted</code> is amount of guest “physical” memory that is mapped to machine memory, it includes <code>shared</code> memory amount. ' +
+ '<code>consumed</code> is amount of guest “physical” memory consumed by the virtual machine for guest memory, ' +
+ '<code>consumed</code> = <code>granted</code> - <code>memory saved due to memory sharing</code>. ' +
+ '<code>active</code> is amount of memory that is actively used, as estimated by VMkernel based on recently touched memory pages. ' +
+ '<code>shared</code> is amount of guest “physical” memory shared with other virtual machines (through the VMkernel’s transparent page-sharing mechanism, a RAM de-duplication technique). ' +
+ 'For details see <a href="https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.resmgmt.doc/GUID-BFDC988B-F53D-4E97-9793-A002445AFAE1.html">Measuring and Differentiating Types of Memory Usage</a> and ' +
+ '<a href="https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/memory_counters.html">Memory Counters</a> articles.'
+
+ },
+
+ 'vsphere.vm_mem_swap_rate': {
+ info:
+ 'This statistic refers to VMkernel swapping and not to guest OS swapping. ' +
+ '<code>in</code> is rate at which VMKernel reads data into machine memory from the swap file. ' +
+ '<code>out</code> is rate at which VMkernel writes to the virtual machine’s swap file from machine memory.'
+ },
+
+ 'vsphere.vm_mem_swap': {
+ info:
+ 'This statistic refers to VMkernel swapping and not to guest OS swapping. ' +
+ '<code>swapped</code> is amount of guest physical memory swapped out to the virtual machine\'s swap file by the VMkernel. ' +
+ 'Swapped memory stays on disk until the virtual machine needs it.'
+ },
+
+ // Common
+ 'vsphere.cpu_usage_total': {
+ info: 'Summary CPU usage statistics across all CPUs/cores.'
+ },
+
+ 'vsphere.net_bandwidth_total': {
+ info: 'Summary receive/transmit statistics across all network interfaces.'
+ },
+
+ 'vsphere.net_packets_total': {
+ info: 'Summary receive/transmit statistics across all network interfaces.'
+ },
+
+ 'vsphere.net_errors_total': {
+ info: 'Summary receive/transmit statistics across all network interfaces.'
+ },
+
+ 'vsphere.net_drops_total': {
+ info: 'Summary receive/transmit statistics across all network interfaces.'
+ },
+
+ 'vsphere.disk_usage_total': {
+ info: 'Summary read/write statistics across all disks.'
+ },
+
+ 'vsphere.disk_max_latency': {
+ info: '<code>latency</code> is highest latency value across all disks.'
+ },
+
+ 'vsphere.overall_status': {
+ info: '<code>0</code> is unknown, <code>1</code> is OK, <code>2</code> is might have a problem, <code>3</code> is definitely has a problem.'
+ }
};
diff --git a/web/gui/images/favicon-128.png b/web/gui/images/favicon-128.png
deleted file mode 100644
index 5371f920..00000000
--- a/web/gui/images/favicon-128.png
+++ /dev/null
Binary files differ
diff --git a/web/gui/images/favicon-196x196.png b/web/gui/images/favicon-196x196.png
deleted file mode 100644
index a208c27f..00000000
--- a/web/gui/images/favicon-196x196.png
+++ /dev/null
Binary files differ
diff --git a/web/gui/images/ms-icon-310x150.png b/web/gui/images/ms-icon-310x150.png
deleted file mode 100644
index 5d4ac57b..00000000
--- a/web/gui/images/ms-icon-310x150.png
+++ /dev/null
Binary files differ
diff --git a/web/gui/images/ms-icon-36x36.png b/web/gui/images/ms-icon-36x36.png
deleted file mode 100644
index e251302e..00000000
--- a/web/gui/images/ms-icon-36x36.png
+++ /dev/null
Binary files differ
diff --git a/web/gui/images/packaging-beta-tag.svg b/web/gui/images/packaging-beta-tag.svg
deleted file mode 100644
index cebdc084..00000000
--- a/web/gui/images/packaging-beta-tag.svg
+++ /dev/null
@@ -1,42 +0,0 @@
-<svg width="127" height="16" viewBox="0 0 127 16" fill="none" xmlns="http://www.w3.org/2000/svg">
-<rect width="127" height="16" rx="2" fill="url(#paint0_linear)"/>
-<path d="M86 0H125C126.105 0 127 0.895431 127 2V14C127 15.1046 126.105 16 125 16H86V0Z" fill="url(#paint1_linear)"/>
-<path d="M6.5 4V10" stroke="white" stroke-linecap="round" stroke-linejoin="round"/>
-<path d="M12.5 7C13.3284 7 14 6.32843 14 5.5C14 4.67157 13.3284 4 12.5 4C11.6716 4 11 4.67157 11 5.5C11 6.32843 11.6716 7 12.5 7Z" stroke="white" stroke-linecap="round" stroke-linejoin="round"/>
-<path d="M6.5 13C7.32843 13 8 12.3284 8 11.5C8 10.6716 7.32843 10 6.5 10C5.67157 10 5 10.6716 5 11.5C5 12.3284 5.67157 13 6.5 13Z" stroke="white" stroke-linecap="round" stroke-linejoin="round"/>
-<path d="M12.5 7C12.5 8.19347 12.0259 9.33807 11.182 10.182C10.3381 11.0259 9.19347 11.5 8 11.5" stroke="white" stroke-linecap="round" stroke-linejoin="round"/>
-<g filter="url(#filter0_d)">
-<path d="M23.939 7.93848V11H22.9077V3.17969H25.792C26.6478 3.17969 27.3174 3.39811 27.8008 3.83496C28.2878 4.27181 28.5312 4.8501 28.5312 5.56982C28.5312 6.32894 28.2931 6.91439 27.8169 7.32617C27.3442 7.73438 26.6657 7.93848 25.7812 7.93848H23.939ZM23.939 7.09521H25.792C26.3434 7.09521 26.766 6.96631 27.0596 6.7085C27.3532 6.4471 27.5 6.07113 27.5 5.58057C27.5 5.11507 27.3532 4.74268 27.0596 4.46338C26.766 4.18408 26.3631 4.03906 25.8511 4.02832H23.939V7.09521ZM33.6548 11C33.5976 10.8854 33.551 10.6813 33.5152 10.3877C33.0533 10.8675 32.5018 11.1074 31.8609 11.1074C31.288 11.1074 30.8171 10.9463 30.4483 10.624C30.0831 10.2982 29.9004 9.88639 29.9004 9.38867C29.9004 8.78353 30.1296 8.31445 30.5879 7.98145C31.0499 7.64486 31.698 7.47656 32.5323 7.47656H33.4991V7.02002C33.4991 6.67269 33.3952 6.39697 33.1876 6.19287C32.9799 5.98519 32.6737 5.88135 32.2691 5.88135C31.9146 5.88135 31.6174 5.97087 31.3775 6.1499C31.1376 6.32894 31.0176 6.54557 31.0176 6.7998H30.0186C30.0186 6.50977 30.1207 6.23047 30.3248 5.96191C30.5324 5.68978 30.8117 5.47493 31.1627 5.31738C31.5171 5.15983 31.9057 5.08105 32.3282 5.08105C32.9978 5.08105 33.5224 5.24935 33.9019 5.58594C34.2815 5.91895 34.4784 6.37907 34.4927 6.96631V9.64111C34.4927 10.1746 34.5608 10.599 34.6968 10.9141V11H33.6548ZM32.0059 10.2427C32.3174 10.2427 32.6129 10.1621 32.8921 10.001C33.1714 9.83984 33.3738 9.63037 33.4991 9.37256V8.18018H32.7203C31.5028 8.18018 30.8941 8.53646 30.8941 9.24902C30.8941 9.56055 30.9979 9.80404 31.2056 9.97949C31.4133 10.1549 31.6801 10.2427 32.0059 10.2427ZM38.8214 10.2964C39.1759 10.2964 39.4856 10.189 39.7506 9.97412C40.0156 9.75928 40.1624 9.49072 40.191 9.16846H41.131C41.1131 9.50146 40.9985 9.81836 40.7872 10.1191C40.576 10.4199 40.2931 10.6598 39.9386 10.8389C39.5877 11.0179 39.2153 11.1074 38.8214 11.1074C38.0301 11.1074 37.3999 10.8442 36.9308 10.3179C36.4653 9.78792 36.2325 9.06462 36.2325 8.14795V7.98145C36.2325 7.41569 36.3364 6.9126 36.5441 6.47217C36.7517 6.03174 37.0489 5.68978 37.4357 5.44629C37.826 5.2028 38.2861 5.08105 38.816 5.08105C39.4677 5.08105 40.0084 5.2762 40.4381 5.6665C40.8714 6.0568 41.1023 6.56348 41.131 7.18652H40.191C40.1624 6.81055 40.0192 6.5026 39.7613 6.2627C39.5071 6.01921 39.192 5.89746 38.816 5.89746C38.3112 5.89746 37.9191 6.08008 37.6398 6.44531C37.364 6.80697 37.2262 7.33154 37.2262 8.01904V8.20703C37.2262 8.87663 37.364 9.39225 37.6398 9.75391C37.9155 10.1156 38.3094 10.2964 38.8214 10.2964ZM44.3102 8.30908L43.6872 8.95898V11H42.6935V2.75H43.6872V7.73975L44.2189 7.10059L46.029 5.18848H47.2375L44.9762 7.61621L47.5007 11H46.3351L44.3102 8.30908ZM52.2913 11C52.234 10.8854 52.1874 10.6813 52.1516 10.3877C51.6897 10.8675 51.1383 11.1074 50.4973 11.1074C49.9244 11.1074 49.4535 10.9463 49.0847 10.624C48.7195 10.2982 48.5369 9.88639 48.5369 9.38867C48.5369 8.78353 48.766 8.31445 49.2244 7.98145C49.6863 7.64486 50.3344 7.47656 51.1687 7.47656H52.1355V7.02002C52.1355 6.67269 52.0316 6.39697 51.824 6.19287C51.6163 5.98519 51.3101 5.88135 50.9055 5.88135C50.551 5.88135 50.2538 5.97087 50.0139 6.1499C49.774 6.32894 49.654 6.54557 49.654 6.7998H48.655C48.655 6.50977 48.7571 6.23047 48.9612 5.96191C49.1689 5.68978 49.4482 5.47493 49.7991 5.31738C50.1536 5.15983 50.5421 5.08105 50.9646 5.08105C51.6342 5.08105 52.1588 5.24935 52.5383 5.58594C52.9179 5.91895 53.1148 6.37907 53.1291 6.96631V9.64111C53.1291 10.1746 53.1972 10.599 53.3332 10.9141V11H52.2913ZM50.6423 10.2427C50.9538 10.2427 51.2493 10.1621 51.5286 10.001C51.8079 9.83984 52.0102 9.63037 52.1355 9.37256V8.18018H51.3567C50.1392 8.18018 49.5305 8.53646 49.5305 9.24902C49.5305 9.56055 49.6343 9.80404 49.842 9.97949C50.0497 10.1549 50.3165 10.2427 50.6423 10.2427ZM54.8904 8.0459C54.8904 7.13997 55.0999 6.42025 55.5188 5.88672C55.9378 5.34961 56.4928 5.08105 57.1839 5.08105C57.8929 5.08105 58.4461 5.33171 58.8436 5.83301L58.8919 5.18848H59.7996V10.8604C59.7996 11.6123 59.5758 12.2049 59.1282 12.6382C58.6842 13.0715 58.0862 13.2881 57.3343 13.2881C56.9153 13.2881 56.5053 13.1986 56.1043 13.0195C55.7033 12.8405 55.3971 12.5952 55.1858 12.2837L55.7015 11.6875C56.1276 12.2139 56.6486 12.4771 57.2645 12.4771C57.7479 12.4771 58.1238 12.341 58.3924 12.0688C58.6645 11.7967 58.8006 11.4136 58.8006 10.9194V10.4199C58.4031 10.8783 57.8606 11.1074 57.1731 11.1074C56.4928 11.1074 55.9414 10.8335 55.5188 10.2856C55.0999 9.73779 54.8904 8.99121 54.8904 8.0459ZM55.8895 8.15869C55.8895 8.81396 56.0237 9.32959 56.2923 9.70557C56.5608 10.078 56.9368 10.2642 57.4202 10.2642C58.0468 10.2642 58.507 9.97949 58.8006 9.41016V6.75684C58.4962 6.20182 58.0397 5.92432 57.431 5.92432C56.9476 5.92432 56.5698 6.1123 56.2977 6.48828C56.0255 6.86426 55.8895 7.42106 55.8895 8.15869ZM62.8231 11H61.8295V5.18848H62.8231V11ZM61.7489 3.64697C61.7489 3.48584 61.7972 3.34977 61.8939 3.23877C61.9942 3.12777 62.141 3.07227 62.3343 3.07227C62.5277 3.07227 62.6745 3.12777 62.7748 3.23877C62.875 3.34977 62.9252 3.48584 62.9252 3.64697C62.9252 3.80811 62.875 3.94238 62.7748 4.0498C62.6745 4.15723 62.5277 4.21094 62.3343 4.21094C62.141 4.21094 61.9942 4.15723 61.8939 4.0498C61.7972 3.94238 61.7489 3.80811 61.7489 3.64697ZM65.7983 5.18848L65.8305 5.91895C66.2745 5.36035 66.8546 5.08105 67.5707 5.08105C68.7989 5.08105 69.4184 5.77393 69.4291 7.15967V11H68.4355V7.1543C68.4319 6.73535 68.3352 6.42562 68.1454 6.2251C67.9592 6.02458 67.6674 5.92432 67.2699 5.92432C66.9477 5.92432 66.6648 6.01025 66.4213 6.18213C66.1778 6.354 65.988 6.57959 65.852 6.85889V11H64.8583V5.18848H65.7983ZM71.1313 8.0459C71.1313 7.13997 71.3408 6.42025 71.7597 5.88672C72.1787 5.34961 72.7337 5.08105 73.4248 5.08105C74.1338 5.08105 74.687 5.33171 75.0845 5.83301L75.1328 5.18848H76.0405V10.8604C76.0405 11.6123 75.8167 12.2049 75.3691 12.6382C74.9251 13.0715 74.3271 13.2881 73.5752 13.2881C73.1562 13.2881 72.7462 13.1986 72.3452 13.0195C71.9442 12.8405 71.638 12.5952 71.4267 12.2837L71.9424 11.6875C72.3685 12.2139 72.8895 12.4771 73.5054 12.4771C73.9888 12.4771 74.3647 12.341 74.6333 12.0688C74.9054 11.7967 75.0415 11.4136 75.0415 10.9194V10.4199C74.644 10.8783 74.1015 11.1074 73.414 11.1074C72.7337 11.1074 72.1823 10.8335 71.7597 10.2856C71.3408 9.73779 71.1313 8.99121 71.1313 8.0459ZM72.1304 8.15869C72.1304 8.81396 72.2646 9.32959 72.5332 9.70557C72.8017 10.078 73.1777 10.2642 73.6611 10.2642C74.2877 10.2642 74.7479 9.97949 75.0415 9.41016V6.75684C74.7371 6.20182 74.2806 5.92432 73.6719 5.92432C73.1885 5.92432 72.8107 6.1123 72.5386 6.48828C72.2664 6.86426 72.1304 7.42106 72.1304 8.15869Z" fill="white"/>
-</g>
-<g filter="url(#filter1_d)">
-<path d="M93.7949 12V4.17969H96.4751C97.3595 4.17969 98.0327 4.35693 98.4946 4.71143C98.9565 5.06592 99.1875 5.59408 99.1875 6.2959C99.1875 6.65397 99.0908 6.97624 98.8975 7.2627C98.7041 7.54915 98.4212 7.77116 98.0488 7.92871C98.4714 8.04329 98.7972 8.25993 99.0264 8.57861C99.2591 8.89372 99.3755 9.27327 99.3755 9.71729C99.3755 10.4513 99.1392 11.0153 98.6665 11.4092C98.1974 11.8031 97.5243 12 96.647 12H93.7949ZM95.1538 8.47119V10.915H96.6631C97.0892 10.915 97.4222 10.8094 97.6621 10.5981C97.902 10.3869 98.022 10.0933 98.022 9.71729C98.022 8.90446 97.6066 8.4891 96.7759 8.47119H95.1538ZM95.1538 7.47217H96.4858C96.9084 7.47217 97.2378 7.37728 97.4741 7.1875C97.714 6.99414 97.834 6.72201 97.834 6.37109C97.834 5.98438 97.723 5.70508 97.501 5.5332C97.2826 5.36133 96.9406 5.27539 96.4751 5.27539H95.1538V7.47217ZM105.745 8.50879H102.533V10.915H106.288V12H101.174V4.17969H106.25V5.27539H102.533V7.43457H105.745V8.50879ZM113.592 5.27539H111.153V12H109.805V5.27539H107.388V4.17969H113.592V5.27539ZM118.796 10.1792H115.767L115.133 12H113.72L116.674 4.17969H117.894L120.853 12H119.435L118.796 10.1792ZM116.148 9.0835H118.415L117.281 5.83936L116.148 9.0835Z" fill="white"/>
-</g>
-<defs>
-<filter id="filter0_d" x="21.9077" y="2.75" width="55.1328" height="12.5381" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
-<feFlood flood-opacity="0" result="BackgroundImageFix"/>
-<feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"/>
-<feOffset dy="1"/>
-<feGaussianBlur stdDeviation="0.5"/>
-<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.5 0"/>
-<feBlend mode="normal" in2="BackgroundImageFix" result="effect1_dropShadow"/>
-<feBlend mode="normal" in="SourceGraphic" in2="effect1_dropShadow" result="shape"/>
-</filter>
-<filter id="filter1_d" x="92.7949" y="4.17969" width="29.0583" height="9.82031" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
-<feFlood flood-opacity="0" result="BackgroundImageFix"/>
-<feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0"/>
-<feOffset dy="1"/>
-<feGaussianBlur stdDeviation="0.5"/>
-<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.5 0"/>
-<feBlend mode="normal" in2="BackgroundImageFix" result="effect1_dropShadow"/>
-<feBlend mode="normal" in="SourceGraphic" in2="effect1_dropShadow" result="shape"/>
-</filter>
-<linearGradient id="paint0_linear" x1="63.5" y1="0" x2="63.5" y2="16" gradientUnits="userSpaceOnUse">
-<stop stop-color="#606060"/>
-<stop offset="1" stop-color="#4D4D4D"/>
-</linearGradient>
-<linearGradient id="paint1_linear" x1="106.5" y1="0" x2="106.5" y2="16" gradientUnits="userSpaceOnUse">
-<stop stop-color="#BFBFBF"/>
-<stop offset="1" stop-color="#7F7F7F"/>
-</linearGradient>
-</defs>
-</svg>
diff --git a/web/gui/images/seo-performance-128.png b/web/gui/images/seo-performance-128.png
deleted file mode 100644
index 2a212a47..00000000
--- a/web/gui/images/seo-performance-128.png
+++ /dev/null
Binary files differ
diff --git a/web/gui/main.js b/web/gui/main.js
index 1214eba6..6aebb8ed 100644
--- a/web/gui/main.js
+++ b/web/gui/main.js
@@ -1788,7 +1788,19 @@ function renderPage(menus, data) {
sidebar += '<li class="" style="padding-top:15px;"><a href="https://github.com/netdata/netdata/blob/master/docs/Add-more-charts-to-netdata.md#add-more-charts-to-netdata" target="_blank"><i class="fas fa-plus"></i> add more charts</a></li>';
sidebar += '<li class=""><a href="https://github.com/netdata/netdata/tree/master/health#Health-monitoring" target="_blank"><i class="fas fa-plus"></i> add more alarms</a></li>';
- sidebar += '<li class="" style="margin:20px;color:#666;"><small>netdata on <b>' + data.hostname.toString() + '</b>, collects every ' + ((data.update_every === 1) ? 'second' : data.update_every.toString() + ' seconds') + ' <b>' + data.dimensions_count.toLocaleString() + '</b> metrics, presented as <b>' + data.charts_count.toLocaleString() + '</b> charts and monitored by <b>' + data.alarms_count.toLocaleString() + '</b> alarms, using ' + Math.round(data.rrd_memory_bytes / 1024 / 1024).toLocaleString() + ' MB of memory for ' + NETDATA.seconds4human(data.update_every * data.history, { space: '&nbsp;' }) + ' of real-time history.<br/>&nbsp;<br/><b>netdata</b><br/>' + data.version.toString() + '</small></li>';
+ sidebar += '<li class="" style="margin:20px;color:#666;"><small>Every ' +
+ ((data.update_every === 1) ? 'second' : data.update_every.toString() + ' seconds') + ', ' +
+ 'Netdata collects <b>' + data.dimensions_count.toLocaleString() + '</b> metrics, presents them in <b>' +
+ data.charts_count.toLocaleString() + '</b> charts and monitors them with <b>' +
+ data.alarms_count.toLocaleString() + '</b> alarms. Netdata is using ' +
+ Math.round(data.rrd_memory_bytes / 1024 / 1024).toLocaleString() + ' MB of memory on <b>' +
+ data.hostname.toString() + '</b> for ' +
+ NETDATA.seconds4human(data.update_every * data.history, {
+ minute: 'minute', minutes: 'minutes', second: 'second', seconds: 'seconds', space: '&nbsp;',
+ }) +
+ ' of real-time history.<br />&nbsp;<br />' + 'Get more history by ' +
+ '<a href="https://docs.netdata.cloud/docs/configuration-guide/#increase-the-metrics-retention-period" target=_blank>configuring Netdata\'s <b>history</b></a> or using the <a href="https://docs.netdata.cloud/database/engine/" target=_blank>DB engine.</a>' +
+ '<br/>&nbsp;<br/><b>netdata</b><br/>' + data.version.toString() + '</small></li>';
sidebar += '</ul>';
div.innerHTML = html;
document.getElementById('sidebar').innerHTML = sidebar;
diff --git a/web/gui/manifest.json b/web/gui/manifest.json
deleted file mode 100644
index 52cb4831..00000000
--- a/web/gui/manifest.json
+++ /dev/null
@@ -1,41 +0,0 @@
-{
- "name": "App",
- "icons": [
- {
- "src": "images\/android-icon-36x36.png",
- "sizes": "36x36",
- "type": "image\/png",
- "density": "0.75"
- },
- {
- "src": "images\/android-icon-48x48.png",
- "sizes": "48x48",
- "type": "image\/png",
- "density": "1.0"
- },
- {
- "src": "images\/android-icon-72x72.png",
- "sizes": "72x72",
- "type": "image\/png",
- "density": "1.5"
- },
- {
- "src": "images\/android-icon-96x96.png",
- "sizes": "96x96",
- "type": "image\/png",
- "density": "2.0"
- },
- {
- "src": "images\/android-icon-144x144.png",
- "sizes": "144x144",
- "type": "image\/png",
- "density": "3.0"
- },
- {
- "src": "images\/android-icon-192x192.png",
- "sizes": "192x192",
- "type": "image\/png",
- "density": "4.0"
- }
- ]
-}
diff --git a/web/gui/src/dashboard.js/charting/_c3.js b/web/gui/src/dashboard.js/charting/_c3.js
deleted file mode 100644
index 6688bbcc..00000000
--- a/web/gui/src/dashboard.js/charting/_c3.js
+++ /dev/null
@@ -1,114 +0,0 @@
-
-// DEPRECATED: will be removed!
-
-// c3
-
-NETDATA.c3Initialize = function(callback) {
- if (typeof netdataNoC3 === 'undefined' || !netdataNoC3) {
-
- // C3 requires D3
- if (!NETDATA.chartLibraries.d3.initialized) {
- if (NETDATA.chartLibraries.d3.enabled) {
- NETDATA.d3Initialize(function() {
- NETDATA.c3Initialize(callback);
- });
- } else {
- NETDATA.chartLibraries.c3.enabled = false;
- if (typeof callback === "function")
- return callback();
- }
- } else {
- NETDATA._loadCSS(NETDATA.c3_css);
-
- $.ajax({
- url: NETDATA.c3_js,
- cache: true,
- dataType: "script",
- xhrFields: { withCredentials: true } // required for the cookie
- })
- .done(function() {
- NETDATA.registerChartLibrary('c3', NETDATA.c3_js);
- })
- .fail(function() {
- NETDATA.chartLibraries.c3.enabled = false;
- NETDATA.error(100, NETDATA.c3_js);
- })
- .always(function() {
- if (typeof callback === "function")
- return callback();
- });
- }
- } else {
- NETDATA.chartLibraries.c3.enabled = false;
- if (typeof callback === "function")
- return callback();
- }
-};
-
-NETDATA.c3ChartUpdate = function(state, data) {
- state.c3_instance.destroy();
- return NETDATA.c3ChartCreate(state, data);
-
- //state.c3_instance.load({
- // rows: data.result,
- // unload: true
- //});
-
- //return true;
-};
-
-NETDATA.c3ChartCreate = function(state, data) {
-
- state.element_chart.id = 'c3-' + state.uuid;
- // console.log('id = ' + state.element_chart.id);
-
- state.c3_instance = c3.generate({
- bindto: '#' + state.element_chart.id,
- size: {
- width: state.chartWidth(),
- height: state.chartHeight()
- },
- color: {
- pattern: state.chartColors()
- },
- data: {
- x: 'time',
- rows: data.result,
- type: (state.chart.chart_type === 'line')?'spline':'area-spline'
- },
- axis: {
- x: {
- type: 'timeseries',
- tick: {
- format: function(x) {
- return NETDATA.dateTime.xAxisTimeString(x);
- }
- }
- }
- },
- grid: {
- x: {
- show: true
- },
- y: {
- show: true
- }
- },
- point: {
- show: false
- },
- line: {
- connectNull: false
- },
- transition: {
- duration: 0
- },
- interaction: {
- enabled: true
- }
- });
-
- // console.log(state.c3_instance);
-
- return true;
-};
diff --git a/web/gui/src/dashboard.js/charting/_morris.js b/web/gui/src/dashboard.js/charting/_morris.js
deleted file mode 100644
index 30789e4e..00000000
--- a/web/gui/src/dashboard.js/charting/_morris.js
+++ /dev/null
@@ -1,81 +0,0 @@
-
-// DEPRECATED: will be removed!
-
-// morris
-
-NETDATA.morrisInitialize = function(callback) {
- if (typeof netdataNoMorris === 'undefined' || !netdataNoMorris) {
-
- // morris requires raphael
- if (!NETDATA.chartLibraries.raphael.initialized) {
- if (NETDATA.chartLibraries.raphael.enabled) {
- NETDATA.raphaelInitialize(function() {
- NETDATA.morrisInitialize(callback);
- });
- } else {
- NETDATA.chartLibraries.morris.enabled = false;
- if (typeof callback === "function")
- return callback();
- }
- } else {
- NETDATA._loadCSS(NETDATA.morris_css);
-
- $.ajax({
- url: NETDATA.morris_js,
- cache: true,
- dataType: "script",
- xhrFields: { withCredentials: true } // required for the cookie
- })
- .done(function() {
- NETDATA.registerChartLibrary('morris', NETDATA.morris_js);
- })
- .fail(function() {
- NETDATA.chartLibraries.morris.enabled = false;
- NETDATA.error(100, NETDATA.morris_js);
- })
- .always(function() {
- if (typeof callback === "function")
- return callback();
- });
- }
- } else {
- NETDATA.chartLibraries.morris.enabled = false;
- if (typeof callback === "function")
- return callback();
- }
-};
-
-NETDATA.morrisChartUpdate = function(state, data) {
- state.morris_instance.setData(data.result.data);
- return true;
-};
-
-NETDATA.morrisChartCreate = function(state, data) {
-
- state.morris_options = {
- element: state.element_chart.id,
- data: data.result.data,
- xkey: 'time',
- ykeys: data.dimension_names,
- labels: data.dimension_names,
- lineWidth: 2,
- pointSize: 3,
- smooth: true,
- hideHover: 'auto',
- parseTime: true,
- continuousLine: false,
- behaveLikeLine: false
- };
-
- if (state.chart.chart_type === 'line')
- state.morris_instance = new Morris.Line(state.morris_options);
-
- else if (state.chart.chart_type === 'area') {
- state.morris_options.behaveLikeLine = true;
- state.morris_instance = new Morris.Area(state.morris_options);
- }
- else // stacked
- state.morris_instance = new Morris.Area(state.morris_options);
-
- return true;
-};
diff --git a/web/gui/src/dashboard.js/charting/_raphael.js b/web/gui/src/dashboard.js/charting/_raphael.js
deleted file mode 100644
index 2d89a22a..00000000
--- a/web/gui/src/dashboard.js/charting/_raphael.js
+++ /dev/null
@@ -1,48 +0,0 @@
-
-// DEPRECATED: will be removed!
-
-// raphael
-
-NETDATA.raphaelInitialize = function(callback) {
- if (typeof netdataStopRaphael === 'undefined' || !netdataStopRaphael) {
- $.ajax({
- url: NETDATA.raphael_js,
- cache: true,
- dataType: "script",
- xhrFields: { withCredentials: true } // required for the cookie
- })
- .done(function() {
- NETDATA.registerChartLibrary('raphael', NETDATA.raphael_js);
- })
- .fail(function() {
- NETDATA.chartLibraries.raphael.enabled = false;
- NETDATA.error(100, NETDATA.raphael_js);
- })
- .always(function() {
- if (typeof callback === "function")
- return callback();
- });
- } else {
- NETDATA.chartLibraries.raphael.enabled = false;
- if (typeof callback === "function")
- return callback();
- }
-};
-
-NETDATA.raphaelChartUpdate = function(state, data) {
- $(state.element_chart).raphael(data.result, {
- width: state.chartWidth(),
- height: state.chartHeight()
- });
-
- return false;
-};
-
-NETDATA.raphaelChartCreate = function(state, data) {
- $(state.element_chart).raphael(data.result, {
- width: state.chartWidth(),
- height: state.chartHeight()
- });
-
- return false;
-};
diff --git a/web/gui/src/dashboard.js/charting/dygraph.js b/web/gui/src/dashboard.js/charting/dygraph.js
index a60af18b..f34d2f4a 100644
--- a/web/gui/src/dashboard.js/charting/dygraph.js
+++ b/web/gui/src/dashboard.js/charting/dygraph.js
@@ -483,8 +483,28 @@ NETDATA.dygraphChartCreate = function (state, data) {
NETDATA.globalSelectionSync.stop();
},
underlayCallback: function (canvas, area, g) {
-
// the chart is about to be drawn
+
+ // update history_tip_element
+ if (state.tmp.dygraph_history_tip_element) {
+ const xHookRightSide = g.toDomXCoord(state.netdata_first);
+ if (xHookRightSide > area.x) {
+ state.tmp.dygraph_history_tip_element_displayed = true;
+ // group the styles for possible better performance
+ state.tmp.dygraph_history_tip_element.setAttribute(
+ 'style',
+ `display: block; left: ${area.x}px; right: calc(100% - ${xHookRightSide}px);`
+ )
+ } else {
+ if (state.tmp.dygraph_history_tip_element_displayed) {
+ // additional check just for performance
+ // don't update the DOM when it's not needed
+ state.tmp.dygraph_history_tip_element.style.display = 'none';
+ state.tmp.dygraph_history_tip_element_displayed = false;
+ }
+ }
+ }
+
// this function renders global highlighted time-frame
if (NETDATA.globalChartUnderlay.isActive()) {
@@ -953,6 +973,21 @@ NETDATA.dygraphChartCreate = function (state, data) {
state.tmp.dygraph_instance = new Dygraph(state.element_chart,
data.result.data, state.tmp.dygraph_options);
+
+ state.tmp.dygraph_history_tip_element = document.createElement('div');
+ state.tmp.dygraph_history_tip_element.innerHTML = `
+ <span class="dygraph__history-tip-content">
+ Want to extend your history of real-time metrics?
+ <br />
+ <a href="https://docs.netdata.cloud/docs/configuration-guide/#increase-the-metrics-retention-period" target=_blank>
+ Configure Netdata's <b>history</b></a>
+ or use the <a href="https://docs.netdata.cloud/database/engine/" target=_blank>DB engine</a>.
+ </span>
+ `;
+ state.tmp.dygraph_history_tip_element.className = 'dygraph__history-tip';
+ state.element_chart.appendChild(state.tmp.dygraph_history_tip_element);
+
+
state.tmp.dygraph_force_zoom = false;
state.tmp.dygraph_user_action = false;
state.tmp.dygraph_last_rendered = Date.now();
diff --git a/web/gui/version.txt b/web/gui/version.txt
new file mode 100644
index 00000000..3c59cf50
--- /dev/null
+++ b/web/gui/version.txt
@@ -0,0 +1 @@
+588ce5a7b18999dfa66698cd3a2f005f7a3c31cf
diff --git a/web/server/Makefile.in b/web/server/Makefile.in
new file mode 100644
index 00000000..89ab7279
--- /dev/null
+++ b/web/server/Makefile.in
@@ -0,0 +1,698 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/server
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ static \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/server/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/server/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/server/README.md b/web/server/README.md
index 173e8959..30cb0069 100644
--- a/web/server/README.md
+++ b/web/server/README.md
@@ -22,9 +22,9 @@ With the web server enabled, you can control the number of threads and sockets w
The default number of processor threads is `min(cpu cores, 6)`.
-The `web server max sockets` setting is automatically adjusted to 50% of the max number of open files netdata is allowed to use (via `/etc/security/limits.conf` or systemd), to allow enough file descriptors to be available for data collection.
+The `web server max sockets` setting is automatically adjusted to 50% of the max number of open files Netdata is allowed to use (via `/etc/security/limits.conf` or systemd), to allow enough file descriptors to be available for data collection.
-### Binding netdata to multiple ports
+### Binding Netdata to multiple ports
Netdata can bind to multiple IPs and ports, offering access to different services on each. Up to 100 sockets can be used (you can increase it at compile time with `CFLAGS="-DMAX_LISTEN_FDS=200" ./netdata-installer.sh ...`).
@@ -36,15 +36,15 @@ The ports to bind are controlled via `[web].bind to`, like this:
bind to = 127.0.0.1=dashboard^SSL=optional 10.1.1.1:19998=management|netdata.conf hostname:19997=badges [::]:19996=streaming^SSL=force localhost:19995=registry *:http=dashboard unix:/tmp/netdata.sock
```
-Using the above, netdata will bind to:
+Using the above, Netdata will bind to:
-- IPv4 127.0.0.1 at port 19999 (port was used from `default port`). Only the UI (dashboard) and the read API will be accessible on this port. Both HTTP and HTTPS requests will be accepted.
-- IPv4 10.1.1.1 at port 19998. The management API and netdata.conf will be accessible on this port.
-- All the IPs `hostname` resolves to (both IPv4 and IPv6 depending on the resolved IPs) at port 19997. Only badges will be accessible on this port.
-- All IPv6 IPs at port 19996. Only metric streaming requests from other netdata agents will be accepted on this port. Only encrypted streams will be allowed (i.e. slaves also need to be [configured for TLS](../../streaming).
-- All the IPs `localhost` resolves to (both IPv4 and IPv6 depending the resolved IPs) at port 19996. This port will only accept registry API requests.
-- All IPv4 and IPv6 IPs at port `http` as set in `/etc/services`. Only the UI (dashboard) and the read API will be accessible on this port.
-- Unix domain socket `/tmp/netdata.sock`. All requests are serviceable on this socket.
+- IPv4 127.0.0.1 at port 19999 (port was used from `default port`). Only the UI (dashboard) and the read API will be accessible on this port. Both HTTP and HTTPS requests will be accepted.
+- IPv4 10.1.1.1 at port 19998. The management API and `netdata.conf` will be accessible on this port.
+- All the IPs `hostname` resolves to (both IPv4 and IPv6 depending on the resolved IPs) at port 19997. Only badges will be accessible on this port.
+- All IPv6 IPs at port 19996. Only metric streaming requests from other Netdata agents will be accepted on this port. Only encrypted streams will be allowed (i.e. slaves also need to be [configured for TLS](../../streaming).
+- All the IPs `localhost` resolves to (both IPv4 and IPv6 depending the resolved IPs) at port 19996. This port will only accept registry API requests.
+- All IPv4 and IPv6 IPs at port `http` as set in `/etc/services`. Only the UI (dashboard) and the read API will be accessible on this port.
+- Unix domain socket `/tmp/netdata.sock`. All requests are serviceable on this socket.
The option `[web].default port` is used when an entries in `[web].bind to` do not specify a port.
@@ -53,21 +53,22 @@ As shown in the example above, these permissions are optional, with the default
The request types are strings identical to the `allow X from` directives of the access lists, i.e. `dashboard`, `streaming`, `registry`, `netdata.conf`, `badges` and `management`.
The access lists themselves and the general setting `allow connections from` in the next section are applied regardless of the ports that are configured to provide these services.
The API requests are serviced as follows:
-- `dashboard` gives access to the UI, the read API and badges API calls.
-- `badges` gives access only to the badges API calls.
-- `management` gives access only to the management API calls.
+
+- `dashboard` gives access to the UI, the read API and badges API calls.
+- `badges` gives access only to the badges API calls.
+- `management` gives access only to the management API calls.
### Enabling TLS support
Since v1.16.0, Netdata supports encrypted HTTP connections to the web server, plus encryption of streaming data between a slave and its master, via the TLS 1.2 protocol.
-Inbound unix socket connections are unaffected, regardless of the TLS settings.
+Inbound unix socket connections are unaffected, regardless of the TLS settings.\
??? info "Differences in TLS and SSL terminology"
While Netdata uses Transport Layer Security (TLS) 1.2 to encrypt communications rather than the obsolete SSL protocol, it's still common practice to refer to encrypted web connections as `SSL`. Many vendors, like Nginx and even Netdata itself, use `SSL` in configuration files, whereas documentation will always refer to encrypted communications as `TLS` or `TLS/SSL`.
To enable TLS, provide the path to your certificate and private key in the `[web]` section of `netdata.conf`:
-``` conf
+```conf
[web]
ssl key = /etc/netdata/ssl/key.pem
ssl certificate = /etc/netdata/ssl/cert.pem
@@ -77,31 +78,31 @@ Both files must be readable by the `netdata` user. If either of these files do n
For test purposes, you can generate self-signed certificates with the following command:
-``` bash
+```bash
$ openssl req -newkey rsa:2048 -nodes -sha512 -x509 -days 365 -keyout key.pem -out cert.pem
```
!!! note
If you use 4096 bits for your key and the certificate, Netdata will need more CPU to process the communication. `rsa4096` can be up to 4 times slower than `rsa2048`, so we recommend using 2048 bits. You can verify the difference by running:
-
- ```
- $ openssl speed rsa2048 rsa4096
- ```
+
+```sh
+$ openssl speed rsa2048 rsa4096
+```
#### TLS/SSL enforcement
When the certificates are defined and unless any other options are provided, a Netdata server will:
-- Redirect all incoming HTTP web server requests to HTTPS. Applies to the dashboard, the API, netdata.conf and badges.
-- Allow incoming slave connections to use both unencrypted and encrypted communications for streaming.
-
+- Redirect all incoming HTTP web server requests to HTTPS. Applies to the dashboard, the API, `netdata.conf` and badges.
+- Allow incoming slave connections to use both unencrypted and encrypted communications for streaming.
+
To change this behavior, you need to modify the `bind to` setting in the `[web]` section of `netdata.conf`. At the end of each port definition, you can append `^SSL=force` or `^SSL=optional`. What happens with these settings differs, depending on whether the port is used for HTTP/S requests, or for streaming.
-SSL setting | HTTP requests | HTTPS requests | Unencrypted Streams | Encrypted Streams
-:------:|:-----:|:-----:|:-----:|:--------
-none | Redirected to HTTPS | Accepted | Accepted | Accepted
-`force` | Redirected to HTTPS | Accepted | Denied | Accepted
-`optional` | Accepted | Accepted | Accepted | Accepted
+|SSL setting|HTTP requests|HTTPS requests|Unencrypted Streams|Encrypted Streams|
+|:---------:|:-----------:|:------------:|:-----------------:|:----------------|
+|none|Redirected to HTTPS|Accepted|Accepted|Accepted|
+|`force`|Redirected to HTTPS|Accepted|Denied|Accepted|
+|`optional`|Accepted|Accepted|Accepted|Accepted|
Example:
@@ -121,10 +122,10 @@ When we define the use of SSL in a Netdata agent for different ports, Netdata w
Netdata will:
-- Force all HTTP requests to the default port to be redirected to HTTPS (same port).
-- Refuse unencrypted streaming connections from slaves on the default port.
-- Allow both HTTP and HTTPS requests to port 20000 for netdata.conf
-- Force HTTP requests to port 20001 to be redirected to HTTPS (same port). Only allow requests for the dashboard, the read API and the registry on port 20001.
+- Force all HTTP requests to the default port to be redirected to HTTPS (same port).
+- Refuse unencrypted streaming connections from slaves on the default port.
+- Allow both HTTP and HTTPS requests to port 20000 for `netdata.conf`
+- Force HTTP requests to port 20001 to be redirected to HTTPS (same port). Only allow requests for the dashboard, the read API and the registry on port 20001.
#### TLS/SSL errors
@@ -150,51 +151,50 @@ Netdata supports access lists in `netdata.conf`:
`*` does string matches on the IPs of the clients.
-- `allow connections from` matches anyone that connects on the netdata port(s).
- So, if someone is not allowed, it will be connected and disconnected immediately, without reading even
- a single byte from its connection. This is a global settings with higher priority to any of the ones below.
+- `allow connections from` matches anyone that connects on the Netdata port(s).
+ So, if someone is not allowed, it will be connected and disconnected immediately, without reading even
+ a single byte from its connection. This is a global settings with higher priority to any of the ones below.
-- `allow dashboard from` receives the request and examines if it is a static dashboard file or an API call the
- dashboards do.
+- `allow dashboard from` receives the request and examines if it is a static dashboard file or an API call the
+ dashboards do.
-- `allow badges from` checks if the API request is for a badge. Badges are not matched by `allow dashboard from`.
+- `allow badges from` checks if the API request is for a badge. Badges are not matched by `allow dashboard from`.
-- `allow streaming from` checks if the slave willing to stream metrics to this netdata is allowed.
- This can be controlled per API KEY and MACHINE GUID in [stream.conf](../../streaming/stream.conf).
- The setting in `netdata.conf` is checked before the ones in [stream.conf](../../streaming/stream.conf).
+- `allow streaming from` checks if the slave willing to stream metrics to this Netdata is allowed.
+ This can be controlled per API KEY and MACHINE GUID in [stream.conf](../../streaming/stream.conf).
+ The setting in `netdata.conf` is checked before the ones in [stream.conf](../../streaming/stream.conf).
-- `allow netdata.conf from` checks the IP to allow `http://netdata.host:19999/netdata.conf`.
- The IPs listed are all the private IPv4 addresses, including link local IPv6 addresses. Keep in mind that connections to netdata API ports are filtered by `allow connections from`. So, IPs allowed by `allow netdata.conf from` should also be allowed by `allow connections from`.
+- `allow netdata.conf from` checks the IP to allow `http://netdata.host:19999/netdata.conf`.
+ The IPs listed are all the private IPv4 addresses, including link local IPv6 addresses. Keep in mind that connections to Netdata API ports are filtered by `allow connections from`. So, IPs allowed by `allow netdata.conf from` should also be allowed by `allow connections from`.
-- `allow management from` checks the IPs to allow API management calls. Management via the API is currently supported for [health](../api/health/#health-management-api)
+- `allow management from` checks the IPs to allow API management calls. Management via the API is currently supported for [health](../api/health/#health-management-api)
### Other netdata.conf [web] section options
-setting | default | info
-:------:|:-------:|:----
-ses max window | `15` | See [single exponential smoothing](../api/queries/des/)
-des max window | `15` | See [double exponential smoothing](../api/queries/des/)
-listen backlog | `4096` | The port backlog. Check `man 2 listen`.
-web files owner | `netdata` | The user that owns the web static files. Netdata will refuse to serve a file that is not owned by this user, even if it has read access to that file. If the user given is not found, netdata will only serve files owned by user given in `run as user`.
-web files group | `netdata` | If this is set, Netdata will check if the file is owned by this group and refuse to serve the file if it's not.
-disconnect idle clients after seconds | `60` | The time in seconds to disconnect web clients after being totally idle.
-timeout for first request | `60` | How long to wait for a client to send a request before closing the socket. Prevents slow request attacks.
-accept a streaming request every seconds | `0` | Can be used to set a limit on how often a master Netdata server will accept streaming requests from the slaves in a [streaming and replication setup](../../streaming)
-respect do not track policy | `no` | If set to `yes`, will respect the client's browser preferences on storing cookies.
-x-frame-options response header | | [Avoid clickjacking attacks, by ensuring that the content is not embedded into other sites](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options).
-enable gzip compression | `yes` | When set to `yes`, netdata web responses will be GZIP compressed, if the web client accepts such responses.
-gzip compression strategy | `default` | Valid strategies are `default`, `filtered`, `huffman only`, `rle` and `fixed`
-gzip compression level | `3` | Valid levels are 1 (fastest) to 9 (best ratio)
+|setting|default|info|
+|:-----:|:-----:|:---|
+|ses max window|`15`|See [single exponential smoothing](../api/queries/des/)|
+|des max window|`15`|See [double exponential smoothing](../api/queries/des/)|
+|listen backlog|`4096`|The port backlog. Check `man 2 listen`.|
+|web files owner|`netdata`|The user that owns the web static files. Netdata will refuse to serve a file that is not owned by this user, even if it has read access to that file. If the user given is not found, Netdata will only serve files owned by user given in `run as user`.|
+|web files group|`netdata`|If this is set, Netdata will check if the file is owned by this group and refuse to serve the file if it's not.|
+|disconnect idle clients after seconds|`60`|The time in seconds to disconnect web clients after being totally idle.|
+|timeout for first request|`60`|How long to wait for a client to send a request before closing the socket. Prevents slow request attacks.|
+|accept a streaming request every seconds|`0`|Can be used to set a limit on how often a master Netdata server will accept streaming requests from the slaves in a [streaming and replication setup](../../streaming)|
+|respect do not track policy|`no`|If set to `yes`, will respect the client's browser preferences on storing cookies.|
+|x-frame-options response header||[Avoid clickjacking attacks, by ensuring that the content is not embedded into other sites](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options).|
+|enable gzip compression|`yes`|When set to `yes`, Netdata web responses will be GZIP compressed, if the web client accepts such responses.|
+|gzip compression strategy|`default`|Valid strategies are `default`, `filtered`, `huffman only`, `rle` and `fixed`|
+|gzip compression level|`3`|Valid levels are 1 (fastest) to 9 (best ratio)|
## DDoS protection
-If you publish your netdata to the internet, you may want to apply some protection against DDoS:
-
-1. Use the `static-threaded` web server (it is the default)
-2. Use reasonable `[web].web server max sockets` (the default is)
-3. Don't use all your cpu cores for netdata (lower `[web].web server threads`)
-4. Run netdata with a low process scheduling priority (the default is the lowest)
-5. If possible, proxy netdata via a full featured web server (nginx, apache, etc)
+If you publish your Netdata to the internet, you may want to apply some protection against DDoS:
+1. Use the `static-threaded` web server (it is the default)
+2. Use reasonable `[web].web server max sockets` (the default is)
+3. Don't use all your CPU cores for Netdata (lower `[web].web server threads`)
+4. Run the `netdata` process with a low process scheduling priority (the default is the lowest)
+5. If possible, proxy Netdata via a full featured web server (nginx, apache, etc)
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/server/static/Makefile.in b/web/server/static/Makefile.in
new file mode 100644
index 00000000..8078caaf
--- /dev/null
+++ b/web/server/static/Makefile.in
@@ -0,0 +1,697 @@
+# Makefile.in generated by automake 1.15.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2017 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+VPATH = @srcdir@
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = web/server/static
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
+ $(am__DIST_COMMON)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+am__DIST_COMMON = $(srcdir)/Makefile.in
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CUPSCONFIG = @CUPSCONFIG@
+CXX = @CXX@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CXX_BINARY = @CXX_BINARY@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+JSON_CFLAGS = @JSON_CFLAGS@
+JSON_LIBS = @JSON_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
+LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
+LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
+LIBCURL_LIBS = @LIBCURL_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
+LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
+LIBSSL_LIBS = @LIBSSL_LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
+OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
+OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
+OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
+OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
+OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
+OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
+OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
+OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
+OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
+OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
+OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
+OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
+OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
+PROTOBUF_LIBS = @PROTOBUF_LIBS@
+PROTOC = @PROTOC@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
+XENLIGHT_LIBS = @XENLIGHT_LIBS@
+YAJL_CFLAGS = @YAJL_CFLAGS@
+YAJL_LIBS = @YAJL_LIBS@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+runstatedir = @runstatedir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/server/static/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu web/server/static/Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-recursive
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+ check-am clean clean-generic cscopelist-am ctags ctags-am \
+ distclean distclean-generic distclean-tags distdir dvi dvi-am \
+ html html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags tags-am uninstall uninstall-am
+
+.PRECIOUS: Makefile
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/web/server/static/README.md b/web/server/static/README.md
index 653b364b..eeba0e94 100644
--- a/web/server/static/README.md
+++ b/web/server/static/README.md
@@ -7,4 +7,4 @@ The kernel distributes the incoming requests to them.
Each thread uses non-blocking I/O so it can serve any number of web requests in parallel.
This web server respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection.
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2Fstatic%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2Fstatic%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/web/server/web_client.c b/web/server/web_client.c
index 2da6c1de..908e3a6a 100644
--- a/web/server/web_client.c
+++ b/web/server/web_client.c
@@ -929,7 +929,6 @@ void web_client_split_path_query(struct web_client *w, char *s) {
w->separator = 0x00;
w->url_path_length = strlen(s);
- w->url_search_path = NULL;
}
/**
@@ -1035,20 +1034,22 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) {
// a valid complete HTTP request found
*ue = '\0';
+ //This is to avoid crash in line
+ w->url_search_path = NULL;
if(w->mode != WEB_CLIENT_MODE_NORMAL) {
if(!url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1))
return HTTP_VALIDATION_MALFORMED_URL;
} else {
web_client_split_path_query(w, encoded_url);
- if (w->separator) {
+ if (w->url_search_path && w->separator) {
*w->url_search_path = 0x00;
}
if(!url_decode_r(w->decoded_url, encoded_url, NETDATA_WEB_REQUEST_URL_SIZE + 1))
return HTTP_VALIDATION_MALFORMED_URL;
- if (w->separator) {
+ if (w->url_search_path && w->separator) {
*w->url_search_path = w->separator;
char *from = (encoded_url + w->url_path_length);
@@ -1064,9 +1065,6 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) {
// copy the URL - we are going to overwrite parts of it
// TODO -- ideally we we should avoid copying buffers around
strncpyz(w->last_url, w->decoded_url, NETDATA_WEB_REQUEST_URL_SIZE);
- if (w->separator) {
- *w->url_search_path = 0x00;
- }
#ifdef ENABLE_HTTPS
if ( (!web_client_check_unix(w)) && (netdata_srv_ctx) ) {
if ((w->ssl.conn) && ((w->ssl.flags & NETDATA_SSL_NO_HANDSHAKE) && (web_client_is_using_ssl_force(w) || web_client_is_using_ssl_default(w)) && (w->mode != WEB_CLIENT_MODE_STREAM)) ) {