summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--.gitignore3
-rwxr-xr-x.travis/deploy-if-have-key7
-rw-r--r--ChangeLog52
-rw-r--r--LICENSE674
-rw-r--r--LICENSE-REDISTRIBUTED.md150
-rw-r--r--LICENSE.md153
-rw-r--r--Makefile.am23
-rw-r--r--Makefile.in352
-rw-r--r--README.md19
-rw-r--r--aclocal.m4706
-rw-r--r--charts.d/Makefile.in114
-rwxr-xr-xcompile347
-rw-r--r--conf.d/Makefile.am13
-rw-r--r--conf.d/Makefile.in199
-rw-r--r--conf.d/apps_groups.conf38
-rw-r--r--conf.d/fping.conf2
-rw-r--r--conf.d/health.d/fping.conf2
-rw-r--r--conf.d/health.d/lighttpd.conf14
-rw-r--r--conf.d/health.d/mongodb.conf13
-rw-r--r--conf.d/health.d/net.conf6
-rw-r--r--conf.d/health.d/ram.conf9
-rw-r--r--conf.d/health.d/tcp_resets.conf8
-rw-r--r--conf.d/health.d/web_log.conf3
-rw-r--r--conf.d/health.d/zfs.conf10
-rw-r--r--conf.d/health_alarm_notify.conf72
-rw-r--r--conf.d/node.d/fronius.conf.md67
-rw-r--r--conf.d/python.d.conf9
-rw-r--r--conf.d/python.d/dns_query_time.conf72
-rw-r--r--conf.d/python.d/elasticsearch.conf17
-rw-r--r--conf.d/python.d/fail2ban.conf11
-rw-r--r--conf.d/python.d/go_expvar.conf106
-rw-r--r--conf.d/python.d/isc_dhcpd.conf11
-rw-r--r--conf.d/python.d/postgres.conf1
-rw-r--r--conf.d/python.d/rabbitmq.conf75
-rw-r--r--conf.d/python.d/samba.conf58
-rw-r--r--conf.d/python.d/smartd_log.conf8
-rw-r--r--conf.d/python.d/web_log.conf48
-rw-r--r--conf.d/statsd.d/example.conf65
-rw-r--r--conf.d/stream.conf95
-rwxr-xr-xconfig.guess184
-rw-r--r--config.h.in3
-rwxr-xr-xconfig.sub106
-rw-r--r--configs.signatures49
-rwxr-xr-xconfigure676
-rw-r--r--configure.ac29
-rw-r--r--contrib/Makefile.am1
-rw-r--r--contrib/Makefile.in103
-rw-r--r--contrib/debian/changelog3
-rw-r--r--contrib/debian/compat1
-rw-r--r--contrib/debian/control25
-rw-r--r--contrib/debian/control.wheezy25
-rw-r--r--contrib/debian/copyright10
-rw-r--r--contrib/debian/netdata.conf16
-rw-r--r--contrib/debian/netdata.default5
-rw-r--r--contrib/debian/netdata.docs1
-rwxr-xr-xcontrib/debian/netdata.init56
-rw-r--r--contrib/debian/netdata.install1
-rw-r--r--contrib/debian/netdata.lintian-overrides16
-rw-r--r--contrib/debian/netdata.postinst.in41
-rw-r--r--contrib/debian/netdata.postrm43
-rw-r--r--contrib/debian/netdata.service14
-rwxr-xr-xcontrib/debian/rules87
-rw-r--r--contrib/debian/source/format1
-rwxr-xr-xcontrib/nc-backend.sh151
-rwxr-xr-xcoverity-scan.sh5
-rwxr-xr-xdepcomp487
-rw-r--r--diagrams/netdata-overview.xml1
-rwxr-xr-xinstall-sh14
-rw-r--r--installer/functions.sh340
-rwxr-xr-xkickstart-static64.sh232
-rwxr-xr-xkickstart.sh374
-rw-r--r--m4/ax_c___atomic.m46
-rwxr-xr-xmakeself/build-x86_64-static.sh39
-rwxr-xr-xmakeself/build.sh38
-rwxr-xr-xmakeself/functions.sh59
-rwxr-xr-xmakeself/install-or-update.sh162
-rwxr-xr-xmakeself/jobs/10-prepare-destination.install.sh16
-rwxr-xr-xmakeself/jobs/50-bash-4.4.install.sh47
-rwxr-xr-xmakeself/jobs/50-curl-7.53.1.install.sh30
-rwxr-xr-xmakeself/jobs/50-fping-4.0.install.sh25
-rwxr-xr-xmakeself/jobs/70-netdata-git.install.sh16
-rwxr-xr-xmakeself/jobs/99-makeself.install.sh121
-rwxr-xr-xmakeself/makeself-header.sh554
-rw-r--r--makeself/makeself-help-header.txt46
-rw-r--r--makeself/makeself-license.txt46
-rw-r--r--makeself/makeself.lsm16
-rwxr-xr-xmakeself/makeself.sh620
-rwxr-xr-xmakeself/post-installer.sh10
-rwxr-xr-xmakeself/run-all-jobs.sh46
-rwxr-xr-xmakeself/setup-x86_64-static.sh26
-rwxr-xr-xmissing414
-rwxr-xr-xnetdata-installer.sh356
-rw-r--r--netdata.spec25
-rw-r--r--netdata.spec.in21
-rw-r--r--node.d/Makefile.am1
-rw-r--r--node.d/Makefile.in124
-rw-r--r--node.d/README.md63
-rw-r--r--node.d/fronius.node.js317
-rw-r--r--node.d/node_modules/net-snmp.js63
-rw-r--r--plugins.d/Makefile.in117
-rwxr-xr-xplugins.d/alarm-notify.sh147
-rwxr-xr-xplugins.d/alarm-test.sh2
-rwxr-xr-xplugins.d/cgroup-name.sh48
-rwxr-xr-xplugins.d/charts.d.plugin9
-rwxr-xr-xplugins.d/fping.plugin26
-rwxr-xr-xplugins.d/node.d.plugin6
-rwxr-xr-xplugins.d/python.d.plugin19
-rwxr-xr-xplugins.d/tc-qos-helper.sh9
-rw-r--r--python.d/Makefile.am4
-rw-r--r--python.d/Makefile.in144
-rw-r--r--python.d/README.md170
-rw-r--r--python.d/apache.chart.py72
-rw-r--r--python.d/bind_rndc.chart.py319
-rw-r--r--python.d/cpufreq.chart.py2
-rw-r--r--python.d/dns_query_time.chart.py135
-rw-r--r--python.d/elasticsearch.chart.py198
-rw-r--r--python.d/fail2ban.chart.py267
-rw-r--r--python.d/go_expvar.chart.py228
-rw-r--r--python.d/haproxy.chart.py254
-rw-r--r--python.d/isc_dhcpd.chart.py190
-rw-r--r--python.d/mdstat.chart.py167
-rw-r--r--python.d/mongodb.chart.py53
-rw-r--r--python.d/mysql.chart.py10
-rw-r--r--python.d/ovpn_status_log.chart.py94
-rw-r--r--python.d/postgres.chart.py12
-rw-r--r--python.d/python_modules/base.py211
-rw-r--r--python.d/rabbitmq.chart.py187
-rw-r--r--python.d/redis.chart.py5
-rw-r--r--python.d/samba.chart.py124
-rw-r--r--python.d/smartd_log.chart.py13
-rw-r--r--python.d/web_log.chart.py1179
-rw-r--r--src/Makefile.am16
-rw-r--r--src/Makefile.in315
-rw-r--r--src/appconfig.c55
-rw-r--r--src/appconfig.h7
-rw-r--r--src/apps_plugin.c213
-rw-r--r--src/backend_prometheus.c397
-rw-r--r--src/backend_prometheus.h11
-rw-r--r--src/backends.c311
-rw-r--r--src/backends.h26
-rw-r--r--src/clocks.c4
-rw-r--r--src/clocks.h2
-rw-r--r--src/common.c266
-rw-r--r--src/common.h12
-rw-r--r--src/daemon.c56
-rw-r--r--src/eval.c2
-rw-r--r--src/freebsd_devstat.c662
-rw-r--r--src/freebsd_getifaddrs.c494
-rw-r--r--src/freebsd_getmntinfo.c293
-rw-r--r--src/freebsd_ipfw.c360
-rw-r--r--src/freebsd_kstat_zfs.c212
-rw-r--r--src/freebsd_sysctl.c1283
-rw-r--r--src/freeipmi_plugin.c13
-rw-r--r--src/health.c15
-rw-r--r--src/health_config.c55
-rw-r--r--src/inlined.h97
-rw-r--r--src/log.c176
-rw-r--r--src/log.h12
-rw-r--r--src/main.c100
-rw-r--r--src/plugin_freebsd.c6
-rw-r--r--src/plugin_freebsd.h8
-rw-r--r--src/plugin_idlejitter.c72
-rw-r--r--src/plugin_proc.c5
-rw-r--r--src/plugin_proc.h4
-rw-r--r--src/plugin_proc_diskspace.c41
-rw-r--r--src/plugin_tc.c6
-rw-r--r--src/plugins_d.c99
-rw-r--r--src/plugins_d.h10
-rw-r--r--src/proc_diskstats.c173
-rw-r--r--src/proc_loadavg.c5
-rw-r--r--src/proc_net_dev.c14
-rw-r--r--src/proc_net_netstat.c8
-rw-r--r--src/proc_net_snmp.c37
-rw-r--r--src/proc_net_snmp6.c4
-rw-r--r--src/proc_net_softnet_stat.c4
-rw-r--r--src/proc_spl_kstat_zfs.c153
-rw-r--r--src/proc_vmstat.c20
-rw-r--r--src/registry.c2
-rw-r--r--src/registry.h2
-rw-r--r--src/registry_init.c2
-rw-r--r--src/registry_internals.c4
-rw-r--r--src/rrd.c11
-rw-r--r--src/rrd.h98
-rw-r--r--src/rrd2json.c83
-rw-r--r--src/rrd2json.h24
-rw-r--r--src/rrd2json_api_old.c4
-rw-r--r--src/rrdcalc.c2
-rw-r--r--src/rrdcalctemplate.c2
-rw-r--r--src/rrddim.c175
-rw-r--r--src/rrdhost.c154
-rw-r--r--src/rrdpush.c249
-rw-r--r--src/rrdpush.h2
-rw-r--r--src/rrdset.c921
-rw-r--r--src/socket.c1006
-rw-r--r--src/socket.h55
-rw-r--r--src/statistical.c459
-rw-r--r--src/statistical.h19
-rw-r--r--src/statsd.c2041
-rw-r--r--src/statsd.h9
-rw-r--r--src/storage_number.h1
-rw-r--r--src/sys_fs_cgroup.c48
-rw-r--r--src/unit_test.c56
-rw-r--r--src/unit_test.h1
-rw-r--r--src/web_api_v1.c42
-rw-r--r--src/web_client.c54
-rw-r--r--src/web_client.h1
-rw-r--r--src/web_server.c366
-rw-r--r--src/web_server.h26
-rw-r--r--src/zfs_common.c677
-rw-r--r--src/zfs_common.h109
-rw-r--r--system/Makefile.in102
-rw-r--r--web/Makefile.am1
-rw-r--r--web/Makefile.in145
-rw-r--r--web/dashboard.css47
-rw-r--r--web/dashboard.html2
-rw-r--r--web/dashboard.js1899
-rw-r--r--web/dashboard.slate.css47
-rw-r--r--web/dashboard_info.js391
-rw-r--r--web/index.html186
-rw-r--r--web/infographic.html170
-rw-r--r--web/version.txt2
221 files changed, 23143 insertions, 7148 deletions
diff --git a/.gitignore b/.gitignore
index 0169e931..00c7d6d6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,6 +30,8 @@ netdata.spec
*.tar.*
+cmake-build-debug/
+makeself/tmp/
sitespeed-result/
cov-int/
netdata-coverity-analysis.tgz
@@ -93,6 +95,7 @@ profile/benchmark-dictionary
profile/benchmark-registry
*.pyc
+*.run
diagrams/*.png
diagrams/*.svg
diff --git a/.travis/deploy-if-have-key b/.travis/deploy-if-have-key
index 50e69b93..1933eeb2 100755
--- a/.travis/deploy-if-have-key
+++ b/.travis/deploy-if-have-key
@@ -32,6 +32,12 @@ then
exit 0
fi
+if [ "$TRAVIS_OS_NAME" != "linux" ]
+then
+ echo "Building non-linux version - skipping deployment to website"
+ exit 0
+fi
+
if [ "$CC" != "gcc" ]
then
echo "Building non-gcc version - skipping deployment to website"
@@ -41,4 +47,5 @@ fi
ssh-keyscan -H firehol.org >> ~/.ssh/known_hosts
ssh travis@firehol.org mkdir -p uploads/netdata/$TRAVIS_BRANCH/
scp -p *.tar.* travis@firehol.org:uploads/netdata/$TRAVIS_BRANCH/
+scp -p *.gz.run* travis@firehol.org:uploads/netdata/$TRAVIS_BRANCH/
ssh travis@firehol.org touch uploads/netdata/$TRAVIS_BRANCH/complete.txt
diff --git a/ChangeLog b/ChangeLog
index 1f6c5f27..d3c20b80 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,55 @@
+netdata (1.7.0) - 2017-07-16
+
+ * netdata is still spreading fast
+
+ we are at 320.000 users and 132.000 servers
+
+ Almost 100k new users, 52k new installations and 800k docker pulls
+ since the previous release, 4 and a half months ago.
+
+ netdata user base grows at about 1000 new users and 600 new servers
+ per day. Thank you. You are awesome.
+
+ * The next release (v1.8) will be focused on providing a global health
+ monitoring service, for all netdata users, for free.
+
+ * netdata is now a (very fast) fully featured statsd server and the
+ only one with automatic visualization: push a statsd metric and hit
+ F5 on the netdata dashboard: your metric visualized. It also supports
+ synthetic charts, defined by you, so that you can correlate and
+ visualize your application the way you like it.
+
+ * netdata got new installation options
+ It is now easier than ever to install netdata - we also distribute a
+ statically linked netdata x86_64 binary, including key dependencies
+ (like bash, curl, etc) that can run everywhere a Linux kernel runs
+ (CoreOS, CirrOS, etc).
+
+ * metrics streaming and replication has been improved significantly.
+ All known issues have been solved and key enhancements have been added.
+ Headless collectors and proxies can now send metrics to backends when
+ data source = as collected.
+
+ * backends have got quite a few enhancements, including host tags and
+ metrics filtering at the netdata side;
+ prometheus support has been re-written to utilize more prometheus
+ features and provide more flexibility and integration options.
+
+ * netdata now monitors ZFS (on Linux and FreeBSD), ElasticSearch,
+ RabbitMQ, Go applications (via expvar), ipfw (on FreeBSD 11), samba,
+ squid logs (with web_log plugin).
+
+ * netdata dashboard loading times have been improved significantly
+ (hit F5 a few times on a netdata dashboard - it is now amazingly fast),
+ to support dashboards with thousands of charts.
+
+ * netdata alarms now support custom hooks, so you can run whatever you
+ like in parallel with netdata alarms.
+
+ * As usual, this release brings dozens of more improvements, enhancements
+ and compatibility fixes.
+
+
netdata (1.6.0) - 2017-03-20
* birthday release: 1 year netdata
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..9cecc1d4
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ {one line to give the program's name and a brief idea of what it does.}
+ Copyright (C) {year} {name of author}
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ {project} Copyright (C) {year} {fullname}
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/LICENSE-REDISTRIBUTED.md b/LICENSE-REDISTRIBUTED.md
new file mode 100644
index 00000000..79891e5a
--- /dev/null
+++ b/LICENSE-REDISTRIBUTED.md
@@ -0,0 +1,150 @@
+# Netdata
+
+Copyright 2016-2017, Costa Tsaousis.
+Released under [GPL v3 or later](http://www.gnu.org/licenses/gpl-3.0.en.html).
+
+---
+
+## Re-distributed software
+
+Netdata re-distributes the following third party software.
+We decided to re-distribute all these, instead of using them
+through a CDN, to allow netdata work in cases where internet
+connectivity is not available.
+
+
+- [Dygraphs](http://dygraphs.com/)
+
+ Copyright 2009, Dan Vanderkam
+ [MIT License](http://dygraphs.com/legal.html)
+
+
+- [jQuery Sparklines](http://omnipotent.net/jquery.sparkline/)
+
+ Copyright 2009-2012, Splunk Inc.
+ [New BSD License](http://opensource.org/licenses/BSD-3-Clause)
+
+
+- [Peity](http://benpickles.github.io/peity/)
+
+ Copyright 2009-2015, Ben Pickles
+ [MIT License](https://github.com/benpickles/peity/blob/master/MIT-LICENCE)
+
+
+- [Easy Pie Chart](https://rendro.github.io/easy-pie-chart/)
+
+ Copyright 2013, Robert Fleischmann
+ [MIT License](https://github.com/rendro/easy-pie-chart/blob/master/LICENSE)
+
+
+- [Guage.js](http://bernii.github.io/gauge.js/)
+
+ Copyright, Bernard Kobos
+ [MIT License](http://bernii.github.io/gauge.js/)
+
+
+- [jQuery](https://jquery.org/)
+
+ Copyright 2015, jQuery Foundation
+ [MIT License](https://jquery.org/license/)
+
+
+- [Bootstrap](http://getbootstrap.com/getting-started/)
+
+ Copyright 2015, Twitter
+ [MIT License](http://getbootstrap.com/getting-started/#license-faqs)
+
+
+- [Bootstrap Toggle](http://www.bootstraptoggle.com/)
+
+ Copyright (c) 2011-2014 Min Hur, The New York Times Company
+ [MIT License](https://github.com/minhur/bootstrap-toggle/blob/master/LICENSE)
+
+
+- [bootstrap-table](http://bootstrap-table.wenzhixin.net.cn/)
+
+ Copyright (c) 2012-2016 Zhixin Wen <wenzhixin2010@gmail.com>
+ [MIT License](https://github.com/wenzhixin/bootstrap-table/blob/master/LICENSE)
+
+
+- [tableExport.jquery.plugin](https://github.com/hhurz/tableExport.jquery.plugin)
+
+ Copyright (c) 2015,2016 hhurz
+ [MIT License](http://rawgit.com/hhurz/tableExport.jquery.plugin/master/tableExport.js)
+
+
+- [perfect-scrollbar](https://jamesflorentino.github.io/nanoScrollerJS/)
+
+ Copyright 2016, Hyunje Alex Jun and other contributors
+ [MIT License](https://github.com/noraesae/perfect-scrollbar/blob/master/LICENSE)
+
+
+- [FontAwesome](https://fortawesome.github.io/Font-Awesome/)
+
+ Created by Dave Gandy
+ Font license: [SIL OFL 1.1](http://scripts.sil.org/OFL)
+ CSS license: [MIT License](http://opensource.org/licenses/mit-license.html)
+
+
+- [IconsDB.com Icons](http://www.iconsdb.com/soylent-red-icons/seo-performance-icon.html)
+
+ Icons provided as CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+
+
+- [morris.js](http://morrisjs.github.io/morris.js/)
+
+ Copyright 2013, Olly Smith
+ [Simplified BSD License](http://morrisjs.github.io/morris.js/)
+
+
+- [Raphaël](http://raphaeljs.com/)
+
+ Copyright 2008, Dmitry Baranovskiy
+ [MIT License](http://raphaeljs.com/license.html)
+
+
+- [C3](http://c3js.org/)
+
+ Copyright 2013, Masayuki Tanaka
+ [MIT License](https://github.com/masayuki0812/c3/blob/master/LICENSE)
+
+
+- [D3](http://d3js.org/)
+
+ Copyright 2015, Mike Bostock
+ [BSD License](http://opensource.org/licenses/BSD-3-Clause)
+
+
+- [node-extend](https://github.com/justmoon/node-extend)
+
+ Copyright 2014, Stefan Thomas
+ [MIT License](https://github.com/justmoon/node-extend/blob/master/LICENSE)
+
+
+- [node-net-snmp](https://github.com/stephenwvickers/node-net-snmp)
+
+ Copyright 2013, Stephen Vickers
+ [MIT License](https://github.com/stephenwvickers/node-net-snmp)
+
+
+- [node-asn1](https://github.com/mcavage/node-asn1)
+
+ Copyright 2011, Mark Cavage
+ [MIT License](https://github.com/mcavage/node-asn1)
+
+
+- [pixl-xml](https://github.com/jhuckaby/pixl-xml)
+
+ Copyright 2015, Joseph Huckaby
+ [MIT License](https://github.com/jhuckaby/pixl-xml)
+
+- [sensors](https://github.com/paroj/sensors.py)
+
+ Copyright 2014, Pavel Rojtberg
+ [LGPL 2.1 License](http://opensource.org/licenses/LGPL-2.1)
+
+- [PyYAML](https://bitbucket.org/blackjack/pysensors)
+
+ Copyright 2006, Kirill Simonov
+ [MIT License](http://pyyaml.org)
+
diff --git a/LICENSE.md b/LICENSE.md
index 79891e5a..37a09f48 100644
--- a/LICENSE.md
+++ b/LICENSE.md
@@ -1,150 +1,9 @@
-# Netdata
+**netdata**<br/>
+(C) Copyright 2017<br/>
+Costa Tsaousis &lt;costa@tsaousis.gr&gt;
-Copyright 2016-2017, Costa Tsaousis.
-Released under [GPL v3 or later](http://www.gnu.org/licenses/gpl-3.0.en.html).
+For license details refer to the following files:
----
-
-## Re-distributed software
-
-Netdata re-distributes the following third party software.
-We decided to re-distribute all these, instead of using them
-through a CDN, to allow netdata work in cases where internet
-connectivity is not available.
-
-
-- [Dygraphs](http://dygraphs.com/)
-
- Copyright 2009, Dan Vanderkam
- [MIT License](http://dygraphs.com/legal.html)
-
-
-- [jQuery Sparklines](http://omnipotent.net/jquery.sparkline/)
-
- Copyright 2009-2012, Splunk Inc.
- [New BSD License](http://opensource.org/licenses/BSD-3-Clause)
-
-
-- [Peity](http://benpickles.github.io/peity/)
-
- Copyright 2009-2015, Ben Pickles
- [MIT License](https://github.com/benpickles/peity/blob/master/MIT-LICENCE)
-
-
-- [Easy Pie Chart](https://rendro.github.io/easy-pie-chart/)
-
- Copyright 2013, Robert Fleischmann
- [MIT License](https://github.com/rendro/easy-pie-chart/blob/master/LICENSE)
-
-
-- [Guage.js](http://bernii.github.io/gauge.js/)
-
- Copyright, Bernard Kobos
- [MIT License](http://bernii.github.io/gauge.js/)
-
-
-- [jQuery](https://jquery.org/)
-
- Copyright 2015, jQuery Foundation
- [MIT License](https://jquery.org/license/)
-
-
-- [Bootstrap](http://getbootstrap.com/getting-started/)
-
- Copyright 2015, Twitter
- [MIT License](http://getbootstrap.com/getting-started/#license-faqs)
-
-
-- [Bootstrap Toggle](http://www.bootstraptoggle.com/)
-
- Copyright (c) 2011-2014 Min Hur, The New York Times Company
- [MIT License](https://github.com/minhur/bootstrap-toggle/blob/master/LICENSE)
-
-
-- [bootstrap-table](http://bootstrap-table.wenzhixin.net.cn/)
-
- Copyright (c) 2012-2016 Zhixin Wen <wenzhixin2010@gmail.com>
- [MIT License](https://github.com/wenzhixin/bootstrap-table/blob/master/LICENSE)
-
-
-- [tableExport.jquery.plugin](https://github.com/hhurz/tableExport.jquery.plugin)
-
- Copyright (c) 2015,2016 hhurz
- [MIT License](http://rawgit.com/hhurz/tableExport.jquery.plugin/master/tableExport.js)
-
-
-- [perfect-scrollbar](https://jamesflorentino.github.io/nanoScrollerJS/)
-
- Copyright 2016, Hyunje Alex Jun and other contributors
- [MIT License](https://github.com/noraesae/perfect-scrollbar/blob/master/LICENSE)
-
-
-- [FontAwesome](https://fortawesome.github.io/Font-Awesome/)
-
- Created by Dave Gandy
- Font license: [SIL OFL 1.1](http://scripts.sil.org/OFL)
- CSS license: [MIT License](http://opensource.org/licenses/mit-license.html)
-
-
-- [IconsDB.com Icons](http://www.iconsdb.com/soylent-red-icons/seo-performance-icon.html)
-
- Icons provided as CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
-
-
-- [morris.js](http://morrisjs.github.io/morris.js/)
-
- Copyright 2013, Olly Smith
- [Simplified BSD License](http://morrisjs.github.io/morris.js/)
-
-
-- [Raphaël](http://raphaeljs.com/)
-
- Copyright 2008, Dmitry Baranovskiy
- [MIT License](http://raphaeljs.com/license.html)
-
-
-- [C3](http://c3js.org/)
-
- Copyright 2013, Masayuki Tanaka
- [MIT License](https://github.com/masayuki0812/c3/blob/master/LICENSE)
-
-
-- [D3](http://d3js.org/)
-
- Copyright 2015, Mike Bostock
- [BSD License](http://opensource.org/licenses/BSD-3-Clause)
-
-
-- [node-extend](https://github.com/justmoon/node-extend)
-
- Copyright 2014, Stefan Thomas
- [MIT License](https://github.com/justmoon/node-extend/blob/master/LICENSE)
-
-
-- [node-net-snmp](https://github.com/stephenwvickers/node-net-snmp)
-
- Copyright 2013, Stephen Vickers
- [MIT License](https://github.com/stephenwvickers/node-net-snmp)
-
-
-- [node-asn1](https://github.com/mcavage/node-asn1)
-
- Copyright 2011, Mark Cavage
- [MIT License](https://github.com/mcavage/node-asn1)
-
-
-- [pixl-xml](https://github.com/jhuckaby/pixl-xml)
-
- Copyright 2015, Joseph Huckaby
- [MIT License](https://github.com/jhuckaby/pixl-xml)
-
-- [sensors](https://github.com/paroj/sensors.py)
-
- Copyright 2014, Pavel Rojtberg
- [LGPL 2.1 License](http://opensource.org/licenses/LGPL-2.1)
-
-- [PyYAML](https://bitbucket.org/blackjack/pysensors)
-
- Copyright 2006, Kirill Simonov
- [MIT License](http://pyyaml.org)
+- [netdata license](LICENSE) (GPL v3+)
+- [third party licenses](LICENSE-REDISTRIBUTED.md), for packages re-distributed with netdata
diff --git a/Makefile.am b/Makefile.am
index 3ccf82f8..7d5bc57f 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -37,7 +37,9 @@ EXTRA_DIST = \
m4/ax_c__generic.m4 \
autogen.sh \
README.md \
+ LICENSE \
LICENSE.md \
+ LICENSE-REDISTRIBUTED.md \
COPYING \
autogen.sh \
tests/stress.sh \
@@ -60,6 +62,7 @@ dist_noinst_DATA= \
diagrams/registry.puml \
diagrams/netdata-for-ephemeral-nodes.xml \
diagrams/netdata-proxies-example.xml \
+ diagrams/netdata-overview.xml \
configs.signatures \
Dockerfile \
netdata.spec \
@@ -71,6 +74,26 @@ dist_noinst_SCRIPTS= \
diagrams/build.sh \
coverity-scan.sh \
docker-build.sh \
+ kickstart.sh \
+ kickstart-static64.sh \
netdata-installer.sh \
installer/functions.sh \
+ makeself/build.sh \
+ makeself/makeself.sh \
+ makeself/makeself-license.txt \
+ makeself/setup-x86_64-static.sh \
+ makeself/post-installer.sh \
+ makeself/jobs/10-prepare-destination.install.sh \
+ makeself/jobs/50-curl-7.53.1.install.sh \
+ makeself/jobs/50-bash-4.4.install.sh \
+ makeself/jobs/50-fping-4.0.install.sh \
+ makeself/jobs/70-netdata-git.install.sh \
+ makeself/jobs/99-makeself.install.sh \
+ makeself/run-all-jobs.sh \
+ makeself/install-or-update.sh \
+ makeself/build-x86_64-static.sh \
+ makeself/makeself-header.sh \
+ makeself/makeself-help-header.txt \
+ makeself/makeself.lsm \
+ makeself/functions.sh \
$(NULL)
diff --git a/Makefile.in b/Makefile.in
index 492376f5..aae24649 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11.3 from Makefile.am.
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
-# Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -17,6 +16,51 @@
VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -36,11 +80,11 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
subdir = .
-DIST_COMMON = $(am__configure_deps) $(dist_noinst_DATA) \
- $(dist_noinst_SCRIPTS) $(srcdir)/Makefile.am \
- $(srcdir)/Makefile.in $(srcdir)/config.h.in \
- $(srcdir)/netdata.spec.in $(top_srcdir)/configure COPYING \
- ChangeLog config.guess config.sub depcomp install-sh missing
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(top_srcdir)/configure $(am__configure_deps) \
+ $(srcdir)/config.h.in $(srcdir)/netdata.spec.in \
+ $(dist_noinst_SCRIPTS) $(dist_noinst_DATA) COPYING ChangeLog \
+ compile config.guess config.sub install-sh missing
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
$(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
@@ -59,23 +103,63 @@ CONFIG_HEADER = config.h
CONFIG_CLEAN_FILES = netdata.spec
CONFIG_CLEAN_VPATH_FILES =
SCRIPTS = $(dist_noinst_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
SOURCES =
DIST_SOURCES =
-RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
- html-recursive info-recursive install-data-recursive \
- install-dvi-recursive install-exec-recursive \
- install-html-recursive install-info-recursive \
- install-pdf-recursive install-ps-recursive install-recursive \
- installcheck-recursive installdirs-recursive pdf-recursive \
- ps-recursive uninstall-recursive
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+ ctags-recursive dvi-recursive html-recursive info-recursive \
+ install-data-recursive install-dvi-recursive \
+ install-exec-recursive install-html-recursive \
+ install-info-recursive install-pdf-recursive \
+ install-ps-recursive install-recursive installcheck-recursive \
+ installdirs-recursive pdf-recursive ps-recursive \
+ tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
DATA = $(dist_noinst_DATA)
RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
distclean-recursive maintainer-clean-recursive
-AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
- $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \
- distdir dist dist-all distcheck
+am__recursive_targets = \
+ $(RECURSIVE_TARGETS) \
+ $(RECURSIVE_CLEAN_TARGETS) \
+ $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+ cscope distdir dist dist-all distcheck
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \
+ $(LISP)config.h.in
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
+CSCOPE = cscope
DIST_SUBDIRS = $(SUBDIRS)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
distdir = $(PACKAGE)-$(VERSION)
@@ -86,6 +170,7 @@ am__remove_distdir = \
&& rm -rf "$(distdir)" \
|| { sleep 5 && rm -rf "$(distdir)"; }; \
else :; fi
+am__post_remove_distdir = $(am__remove_distdir)
am__relativize = \
dir0=`pwd`; \
sed_first='s,^\([^/]*\)/.*$$,\1,'; \
@@ -113,12 +198,14 @@ am__relativize = \
reldir="$$dir2"
DIST_ARCHIVES = $(distdir).tar.gz $(distdir).tar.bz2 $(distdir).tar.xz
GZIP_ENV = --best
+DIST_TARGETS = dist-xz dist-bzip2 dist-gzip
distuninstallcheck_listfiles = find . -type f -print
am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \
| sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$'
distcleancheck_listfiles = find . -type f -print
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
@@ -299,7 +386,9 @@ EXTRA_DIST = \
m4/ax_c__generic.m4 \
autogen.sh \
README.md \
+ LICENSE \
LICENSE.md \
+ LICENSE-REDISTRIBUTED.md \
COPYING \
autogen.sh \
tests/stress.sh \
@@ -322,6 +411,7 @@ dist_noinst_DATA = \
diagrams/registry.puml \
diagrams/netdata-for-ephemeral-nodes.xml \
diagrams/netdata-proxies-example.xml \
+ diagrams/netdata-overview.xml \
configs.signatures \
Dockerfile \
netdata.spec \
@@ -334,8 +424,28 @@ dist_noinst_SCRIPTS = \
diagrams/build.sh \
coverity-scan.sh \
docker-build.sh \
+ kickstart.sh \
+ kickstart-static64.sh \
netdata-installer.sh \
installer/functions.sh \
+ makeself/build.sh \
+ makeself/makeself.sh \
+ makeself/makeself-license.txt \
+ makeself/setup-x86_64-static.sh \
+ makeself/post-installer.sh \
+ makeself/jobs/10-prepare-destination.install.sh \
+ makeself/jobs/50-curl-7.53.1.install.sh \
+ makeself/jobs/50-bash-4.4.install.sh \
+ makeself/jobs/50-fping-4.0.install.sh \
+ makeself/jobs/70-netdata-git.install.sh \
+ makeself/jobs/99-makeself.install.sh \
+ makeself/run-all-jobs.sh \
+ makeself/install-or-update.sh \
+ makeself/build-x86_64-static.sh \
+ makeself/makeself-header.sh \
+ makeself/makeself-help-header.txt \
+ makeself/makeself.lsm \
+ makeself/functions.sh \
$(NULL)
all: config.h
@@ -378,8 +488,8 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
$(am__aclocal_m4_deps):
config.h: stamp-h1
- @if test ! -f $@; then rm -f stamp-h1; else :; fi
- @if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) stamp-h1; else :; fi
+ @test -f $@ || rm -f stamp-h1
+ @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1
stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
@rm -f stamp-h1
@@ -395,22 +505,25 @@ netdata.spec: $(top_builddir)/config.status $(srcdir)/netdata.spec.in
cd $(top_builddir) && $(SHELL) ./config.status $@
# This directory's subdirectories are mostly independent; you can cd
-# into them and run `make' without going through this Makefile.
-# To change the values of `make' variables: instead of editing Makefiles,
-# (1) if the variable is set in `config.status', edit `config.status'
-# (which will cause the Makefiles to be regenerated when you run `make');
-# (2) otherwise, pass the desired values on the `make' command line.
-$(RECURSIVE_TARGETS):
- @fail= failcom='exit 1'; \
- for f in x $$MAKEFLAGS; do \
- case $$f in \
- *=* | --[!k]*);; \
- *k*) failcom='fail=yes';; \
- esac; \
- done; \
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+ @fail=; \
+ if $(am__make_keepgoing); then \
+ failcom='fail=yes'; \
+ else \
+ failcom='exit 1'; \
+ fi; \
dot_seen=no; \
target=`echo $@ | sed s/-recursive//`; \
- list='$(SUBDIRS)'; for subdir in $$list; do \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
echo "Making $$target in $$subdir"; \
if test "$$subdir" = "."; then \
dot_seen=yes; \
@@ -425,57 +538,12 @@ $(RECURSIVE_TARGETS):
$(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
fi; test -z "$$fail"
-$(RECURSIVE_CLEAN_TARGETS):
- @fail= failcom='exit 1'; \
- for f in x $$MAKEFLAGS; do \
- case $$f in \
- *=* | --[!k]*);; \
- *k*) failcom='fail=yes';; \
- esac; \
- done; \
- dot_seen=no; \
- case "$@" in \
- distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
- *) list='$(SUBDIRS)' ;; \
- esac; \
- rev=''; for subdir in $$list; do \
- if test "$$subdir" = "."; then :; else \
- rev="$$subdir $$rev"; \
- fi; \
- done; \
- rev="$$rev ."; \
- target=`echo $@ | sed s/-recursive//`; \
- for subdir in $$rev; do \
- echo "Making $$target in $$subdir"; \
- if test "$$subdir" = "."; then \
- local_target="$$target-am"; \
- else \
- local_target="$$target"; \
- fi; \
- ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
- || eval $$failcom; \
- done && test -z "$$fail"
-tags-recursive:
- list='$(SUBDIRS)'; for subdir in $$list; do \
- test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
- done
-ctags-recursive:
- list='$(SUBDIRS)'; for subdir in $$list; do \
- test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
- done
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
-ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
- mkid -fID $$unique
-tags: TAGS
-
-TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
@@ -491,12 +559,7 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
fi; \
done; \
- list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
+ $(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
@@ -508,15 +571,11 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
$$unique; \
fi; \
fi
-ctags: CTAGS
-CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
- list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
@@ -525,9 +584,31 @@ GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
+cscope: cscope.files
+ test ! -s cscope.files \
+ || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS)
+clean-cscope:
+ -rm -f cscope.files
+cscope.files: clean-cscope cscopelist
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+ -rm -f cscope.out cscope.in.out cscope.po.out cscope.files
distdir: $(DISTFILES)
$(am__remove_distdir)
@@ -563,13 +644,10 @@ distdir: $(DISTFILES)
done
@list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
if test "$$subdir" = .; then :; else \
- test -d "$(distdir)/$$subdir" \
- || $(MKDIR_P) "$(distdir)/$$subdir" \
- || exit 1; \
- fi; \
- done
- @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
- if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
$(am__relativize); \
new_distdir=$$reldir; \
@@ -598,40 +676,40 @@ distdir: $(DISTFILES)
|| chmod -R a+r "$(distdir)"
dist-gzip: distdir
tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
- $(am__remove_distdir)
+ $(am__post_remove_distdir)
dist-bzip2: distdir
tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2
- $(am__remove_distdir)
+ $(am__post_remove_distdir)
dist-lzip: distdir
tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz
- $(am__remove_distdir)
-
-dist-lzma: distdir
- tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma
- $(am__remove_distdir)
+ $(am__post_remove_distdir)
dist-xz: distdir
tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz
- $(am__remove_distdir)
+ $(am__post_remove_distdir)
dist-tarZ: distdir
+ @echo WARNING: "Support for shar distribution archives is" \
+ "deprecated." >&2
+ @echo WARNING: "It will be removed altogether in Automake 2.0" >&2
tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
- $(am__remove_distdir)
+ $(am__post_remove_distdir)
dist-shar: distdir
+ @echo WARNING: "Support for distribution archives compressed with" \
+ "legacy program 'compress' is deprecated." >&2
+ @echo WARNING: "It will be removed altogether in Automake 2.0" >&2
shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
- $(am__remove_distdir)
+ $(am__post_remove_distdir)
dist-zip: distdir
-rm -f $(distdir).zip
zip -rq $(distdir).zip $(distdir)
- $(am__remove_distdir)
+ $(am__post_remove_distdir)
-dist dist-all: distdir
- tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
- tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2
- tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz
- $(am__remove_distdir)
+dist dist-all:
+ $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:'
+ $(am__post_remove_distdir)
# This target untars the dist file and tries a VPATH configuration. Then
# it guarantees that the distribution is self-contained by making another
@@ -642,8 +720,6 @@ distcheck: dist
GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\
*.tar.bz2*) \
bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\
- *.tar.lzma*) \
- lzma -dc $(distdir).tar.lzma | $(am__untar) ;;\
*.tar.lz*) \
lzip -dc $(distdir).tar.lz | $(am__untar) ;;\
*.tar.xz*) \
@@ -655,18 +731,19 @@ distcheck: dist
*.zip*) \
unzip $(distdir).zip ;;\
esac
- chmod -R a-w $(distdir); chmod a+w $(distdir)
- mkdir $(distdir)/_build
- mkdir $(distdir)/_inst
+ chmod -R a-w $(distdir)
+ chmod u+w $(distdir)
+ mkdir $(distdir)/_build $(distdir)/_inst
chmod a-w $(distdir)
test -d $(distdir)/_build || exit 0; \
dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
&& dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
&& am__cwd=`pwd` \
&& $(am__cd) $(distdir)/_build \
- && ../configure --srcdir=.. --prefix="$$dc_install_base" \
+ && ../configure \
$(AM_DISTCHECK_CONFIGURE_FLAGS) \
$(DISTCHECK_CONFIGURE_FLAGS) \
+ --srcdir=.. --prefix="$$dc_install_base" \
&& $(MAKE) $(AM_MAKEFLAGS) \
&& $(MAKE) $(AM_MAKEFLAGS) dvi \
&& $(MAKE) $(AM_MAKEFLAGS) check \
@@ -689,7 +766,7 @@ distcheck: dist
&& $(MAKE) $(AM_MAKEFLAGS) distcleancheck \
&& cd "$$am__cwd" \
|| exit 1
- $(am__remove_distdir)
+ $(am__post_remove_distdir)
@(echo "$(distdir) archives ready for distribution: "; \
list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \
sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x'
@@ -824,13 +901,12 @@ ps-am:
uninstall-am:
-.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) all \
- ctags-recursive install-am install-strip tags-recursive
+.MAKE: $(am__recursive_targets) all install-am install-strip
-.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \
- all all-am am--refresh check check-am clean clean-generic \
- ctags ctags-recursive dist dist-all dist-bzip2 dist-gzip \
- dist-lzip dist-lzma dist-shar dist-tarZ dist-xz dist-zip \
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \
+ am--refresh check check-am clean clean-cscope clean-generic \
+ cscope cscopelist-am ctags ctags-am dist dist-all dist-bzip2 \
+ dist-gzip dist-lzip dist-shar dist-tarZ dist-xz dist-zip \
distcheck distclean distclean-generic distclean-hdr \
distclean-tags distcleancheck distdir distuninstallcheck dvi \
dvi-am html html-am info info-am install install-am \
@@ -840,8 +916,8 @@ uninstall-am:
install-pdf-am install-ps install-ps-am install-strip \
installcheck installcheck-am installdirs installdirs-am \
maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am tags tags-recursive \
- uninstall uninstall-am
+ mostlyclean-generic pdf pdf-am ps ps-am tags tags-am uninstall \
+ uninstall-am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
diff --git a/README.md b/README.md
index cff7f31b..15102b1d 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# netdata [![Build Status](https://travis-ci.org/firehol/netdata.svg?branch=master)](https://travis-ci.org/firehol/netdata) [![Coverity Scan Build Status](https://scan.coverity.com/projects/9140/badge.svg)](https://scan.coverity.com/projects/firehol-netdata) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498)](https://www.codacy.com/app/netdata/netdata?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=firehol/netdata&amp;utm_campaign=Badge_Grade) [![Code Climate](https://codeclimate.com/github/firehol/netdata/badges/gpa.svg)](https://codeclimate.com/github/firehol/netdata)
+# netdata [![Build Status](https://travis-ci.org/firehol/netdata.svg?branch=master)](https://travis-ci.org/firehol/netdata) [![Coverity Scan Build Status](https://scan.coverity.com/projects/9140/badge.svg)](https://scan.coverity.com/projects/firehol-netdata) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498)](https://www.codacy.com/app/netdata/netdata?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=firehol/netdata&amp;utm_campaign=Badge_Grade) [![Code Climate](https://codeclimate.com/github/firehol/netdata/badges/gpa.svg)](https://codeclimate.com/github/firehol/netdata) [![license](https://img.shields.io/github/license/firehol/netdata.svg)](LICENSE)
> *New to netdata? Here is a live demo: [http://my-netdata.io](http://my-netdata.io)*
**netdata** is a system for **distributed real-time performance and health monitoring**.
@@ -13,6 +13,7 @@ disrupting their core function._
netdata runs on **Linux**, **FreeBSD**, and **MacOS**.
[![Twitter Follow](https://img.shields.io/twitter/follow/linuxnetdata.svg?style=social&label=New%20-%20stay%20in%20touch%20-%20follow%20netdata%20on%20twitter)](https://twitter.com/linuxnetdata)
+[![analytics](http://www.google-analytics.com/collect?v=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Ffirehol%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Freadme&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
---
@@ -273,10 +274,22 @@ This is a list of what it currently monitors:
- **SNMP devices**<br/>
can be monitored too (although you will need to configure these)
+- **statsd**<br/>
+ [netdata is a fully featured statsd server](https://github.com/firehol/netdata/wiki/statsd)
+
And you can extend it, by writing plugins that collect data from any source, using any computer language.
---
+## netdata infographic
+
+This is a high level overview of netdata feature set and architecture.
+Click it to to interact with it (it has direct links to documentation).
+
+[![netdata-overview](https://cloud.githubusercontent.com/assets/2662304/26529478/104652ac-43c9-11e7-903f-edb9bb2ced24.png)](https://my-netdata.io/infographic.html)
+
+---
+
## Installation
Use our **[automatic installer](https://github.com/firehol/netdata/wiki/Installation)** to build and install it on your system.
@@ -303,6 +316,6 @@ Check the **[netdata wiki](https://github.com/firehol/netdata/wiki)**.
## License
-netdata is GPLv3+.
+netdata is [GPLv3+](LICENSE).
-It re-distributes other open-source tools and libraries. Please check its [License Statement](https://github.com/firehol/netdata/blob/master/LICENSE.md).
+It re-distributes other open-source tools and libraries. Please check the [third party licenses](https://github.com/firehol/netdata/blob/master/LICENSE-REDISTRIBUTED.md).
diff --git a/aclocal.m4 b/aclocal.m4
index 2bb8c79e..58b64dc7 100644
--- a/aclocal.m4
+++ b/aclocal.m4
@@ -1,8 +1,7 @@
-# generated automatically by aclocal 1.11.3 -*- Autoconf -*-
+# generated automatically by aclocal 1.14.1 -*- Autoconf -*-
+
+# Copyright (C) 1996-2013 Free Software Foundation, Inc.
-# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
-# 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation,
-# Inc.
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -12,13 +11,14 @@
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
+m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])])
m4_ifndef([AC_AUTOCONF_VERSION],
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
-m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.68],,
-[m4_warning([this file was generated for autoconf 2.68.
+m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],,
+[m4_warning([this file was generated for autoconf 2.69.
You have another version of autoconf. It may work, but is not guaranteed to.
If you have problems, you may need to regenerate the build system entirely.
-To do so, use the procedure documented by the package, typically `autoreconf'.])])
+To do so, use the procedure documented by the package, typically 'autoreconf'.])])
# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*-
# serial 1 (pkg-config-0.24)
@@ -180,25 +180,22 @@ else
fi[]dnl
])# PKG_CHECK_MODULES
-# Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008, 2011 Free Software
-# Foundation, Inc.
+# Copyright (C) 2002-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 1
-
# AM_AUTOMAKE_VERSION(VERSION)
# ----------------------------
# Automake X.Y traces this macro to ensure aclocal.m4 has been
# generated from the m4 files accompanying Automake X.Y.
# (This private macro should not be called outside this file.)
AC_DEFUN([AM_AUTOMAKE_VERSION],
-[am__api_version='1.11'
+[am__api_version='1.14'
dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
dnl require some minimum version. Point them to the right macro.
-m4_if([$1], [1.11.3], [],
+m4_if([$1], [1.14.1], [],
[AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
])
@@ -214,24 +211,22 @@ m4_define([_AM_AUTOCONF_VERSION], [])
# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
-[AM_AUTOMAKE_VERSION([1.11.3])dnl
+[AM_AUTOMAKE_VERSION([1.14.1])dnl
m4_ifndef([AC_AUTOCONF_VERSION],
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
# AM_AUX_DIR_EXPAND -*- Autoconf -*-
-# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc.
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 1
-
# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets
-# $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to
-# `$srcdir', `$srcdir/..', or `$srcdir/../..'.
+# $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to
+# '$srcdir', '$srcdir/..', or '$srcdir/../..'.
#
# Of course, Automake must honor this variable whenever it calls a
# tool from the auxiliary directory. The problem is that $srcdir (and
@@ -250,7 +245,7 @@ _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
#
# The reason of the latter failure is that $top_srcdir and $ac_aux_dir
# are both prefixed by $srcdir. In an in-source build this is usually
-# harmless because $srcdir is `.', but things will broke when you
+# harmless because $srcdir is '.', but things will broke when you
# start a VPATH build or use an absolute $srcdir.
#
# So we could use something similar to $top_srcdir/$ac_aux_dir/missing,
@@ -276,22 +271,19 @@ am_aux_dir=`cd $ac_aux_dir && pwd`
# AM_CONDITIONAL -*- Autoconf -*-
-# Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006, 2008
-# Free Software Foundation, Inc.
+# Copyright (C) 1997-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 9
-
# AM_CONDITIONAL(NAME, SHELL-CONDITION)
# -------------------------------------
# Define a conditional.
AC_DEFUN([AM_CONDITIONAL],
-[AC_PREREQ(2.52)dnl
- ifelse([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])],
- [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl
+[AC_PREREQ([2.52])dnl
+ m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])],
+ [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl
AC_SUBST([$1_TRUE])dnl
AC_SUBST([$1_FALSE])dnl
_AM_SUBST_NOTMAKE([$1_TRUE])dnl
@@ -310,16 +302,14 @@ AC_CONFIG_COMMANDS_PRE(
Usually this means the macro was only invoked conditionally.]])
fi])])
-# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2009,
-# 2010, 2011 Free Software Foundation, Inc.
+# Copyright (C) 1999-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 12
-# There are a few dirty hacks below to avoid letting `AC_PROG_CC' be
+# There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be
# written in clear, in which case automake, when reading aclocal.m4,
# will think it sees a *use*, and therefore will trigger all it's
# C support machinery. Also note that it means that autoscan, seeing
@@ -329,7 +319,7 @@ fi])])
# _AM_DEPENDENCIES(NAME)
# ----------------------
# See how the compiler implements dependency checking.
-# NAME is "CC", "CXX", "GCJ", or "OBJC".
+# NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC".
# We try a few techniques and use that to set a single cache variable.
#
# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was
@@ -342,12 +332,13 @@ AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl
AC_REQUIRE([AM_MAKE_INCLUDE])dnl
AC_REQUIRE([AM_DEP_TRACK])dnl
-ifelse([$1], CC, [depcc="$CC" am_compiler_list=],
- [$1], CXX, [depcc="$CXX" am_compiler_list=],
- [$1], OBJC, [depcc="$OBJC" am_compiler_list='gcc3 gcc'],
- [$1], UPC, [depcc="$UPC" am_compiler_list=],
- [$1], GCJ, [depcc="$GCJ" am_compiler_list='gcc3 gcc'],
- [depcc="$$1" am_compiler_list=])
+m4_if([$1], [CC], [depcc="$CC" am_compiler_list=],
+ [$1], [CXX], [depcc="$CXX" am_compiler_list=],
+ [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'],
+ [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'],
+ [$1], [UPC], [depcc="$UPC" am_compiler_list=],
+ [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'],
+ [depcc="$$1" am_compiler_list=])
AC_CACHE_CHECK([dependency style of $depcc],
[am_cv_$1_dependencies_compiler_type],
@@ -355,8 +346,8 @@ AC_CACHE_CHECK([dependency style of $depcc],
# We make a subdir and do the tests there. Otherwise we can end up
# making bogus files that we don't know about and never remove. For
# instance it was reported that on HP-UX the gcc test will end up
- # making a dummy file named `D' -- because `-MD' means `put the output
- # in D'.
+ # making a dummy file named 'D' -- because '-MD' means "put the output
+ # in D".
rm -rf conftest.dir
mkdir conftest.dir
# Copy depcomp to subdir because otherwise we won't find it if we're
@@ -396,16 +387,16 @@ AC_CACHE_CHECK([dependency style of $depcc],
: > sub/conftest.c
for i in 1 2 3 4 5 6; do
echo '#include "conftst'$i'.h"' >> sub/conftest.c
- # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
- # Solaris 8's {/usr,}/bin/sh.
- touch sub/conftst$i.h
+ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+ # Solaris 10 /bin/sh.
+ echo '/* dummy */' > sub/conftst$i.h
done
echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
- # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # We check with '-c' and '-o' for the sake of the "dashmstdout"
# mode. It turns out that the SunPro C++ compiler does not properly
- # handle `-M -o', and we need to detect this. Also, some Intel
- # versions had trouble with output in subdirs
+ # handle '-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs.
am__obj=sub/conftest.${OBJEXT-o}
am__minus_obj="-o $am__obj"
case $depmode in
@@ -414,8 +405,8 @@ AC_CACHE_CHECK([dependency style of $depcc],
test "$am__universal" = false || continue
;;
nosideeffect)
- # after this tag, mechanisms are not by side-effect, so they'll
- # only be used when explicitly requested
+ # After this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested.
if test "x$enable_dependency_tracking" = xyes; then
continue
else
@@ -423,7 +414,7 @@ AC_CACHE_CHECK([dependency style of $depcc],
fi
;;
msvc7 | msvc7msys | msvisualcpp | msvcmsys)
- # This compiler won't grok `-c -o', but also, the minuso test has
+ # This compiler won't grok '-c -o', but also, the minuso test has
# not run yet. These depmodes are late enough in the game, and
# so weak that their functioning should not be impacted.
am__obj=conftest.${OBJEXT-o}
@@ -471,7 +462,7 @@ AM_CONDITIONAL([am__fastdep$1], [
# AM_SET_DEPDIR
# -------------
# Choose a directory name for dependency files.
-# This macro is AC_REQUIREd in _AM_DEPENDENCIES
+# This macro is AC_REQUIREd in _AM_DEPENDENCIES.
AC_DEFUN([AM_SET_DEPDIR],
[AC_REQUIRE([AM_SET_LEADING_DOT])dnl
AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl
@@ -481,9 +472,13 @@ AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl
# AM_DEP_TRACK
# ------------
AC_DEFUN([AM_DEP_TRACK],
-[AC_ARG_ENABLE(dependency-tracking,
-[ --disable-dependency-tracking speeds up one-time build
- --enable-dependency-tracking do not reject slow dependency extractors])
+[AC_ARG_ENABLE([dependency-tracking], [dnl
+AS_HELP_STRING(
+ [--enable-dependency-tracking],
+ [do not reject slow dependency extractors])
+AS_HELP_STRING(
+ [--disable-dependency-tracking],
+ [speeds up one-time build])])
if test "x$enable_dependency_tracking" != xno; then
am_depcomp="$ac_aux_dir/depcomp"
AMDEPBACKSLASH='\'
@@ -498,20 +493,18 @@ _AM_SUBST_NOTMAKE([am__nodep])dnl
# Generate code to set up dependency tracking. -*- Autoconf -*-
-# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008
-# Free Software Foundation, Inc.
+# Copyright (C) 1999-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-#serial 5
# _AM_OUTPUT_DEPENDENCY_COMMANDS
# ------------------------------
AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
[{
- # Autoconf 2.62 quotes --file arguments for eval, but not when files
+ # Older Autoconf quotes --file arguments for eval, but not when files
# are listed without --file. Let's play safe and only enable the eval
# if we detect the quoting.
case $CONFIG_FILES in
@@ -524,7 +517,7 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
# Strip MF so we end up with the name of the file.
mf=`echo "$mf" | sed -e 's/:.*$//'`
# Check whether this is an Automake generated Makefile or not.
- # We used to match only the files named `Makefile.in', but
+ # We used to match only the files named 'Makefile.in', but
# some people rename them; so instead we look at the file content.
# Grep'ing the first line is not enough: some people post-process
# each Makefile.in and add a new line on top of each file to say so.
@@ -536,21 +529,19 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
continue
fi
# Extract the definition of DEPDIR, am__include, and am__quote
- # from the Makefile without running `make'.
+ # from the Makefile without running 'make'.
DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
test -z "$DEPDIR" && continue
am__include=`sed -n 's/^am__include = //p' < "$mf"`
- test -z "am__include" && continue
+ test -z "$am__include" && continue
am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
- # When using ansi2knr, U may be empty or an underscore; expand it
- U=`sed -n 's/^U = //p' < "$mf"`
# Find all dependency output files, they are included files with
# $(DEPDIR) in their names. We invoke sed twice because it is the
# simplest approach to changing $(DEPDIR) to its actual value in the
# expansion.
for file in `sed -n "
s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
- sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do
+ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do
# Make sure the directory exists.
test -f "$dirpart/$file" && continue
fdir=`AS_DIRNAME(["$file"])`
@@ -568,7 +559,7 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
# This macro should only be invoked once -- use via AC_REQUIRE.
#
# This code is only required when automatic dependency tracking
-# is enabled. FIXME. This creates each `.P' file that we will
+# is enabled. FIXME. This creates each '.P' file that we will
# need in order to bootstrap the dependency handling code.
AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
[AC_CONFIG_COMMANDS([depfiles],
@@ -578,18 +569,21 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
# Do all the work for Automake. -*- Autoconf -*-
-# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
-# 2005, 2006, 2008, 2009 Free Software Foundation, Inc.
+# Copyright (C) 1996-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 16
-
# This macro actually does too much. Some checks are only needed if
# your package does certain things. But this isn't really a big deal.
+dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O.
+m4_define([AC_PROG_CC],
+m4_defn([AC_PROG_CC])
+[_AM_PROG_CC_C_O
+])
+
# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE])
# AM_INIT_AUTOMAKE([OPTIONS])
# -----------------------------------------------
@@ -602,7 +596,7 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
# arguments mandatory, and then we can depend on a new Autoconf
# release and drop the old call support.
AC_DEFUN([AM_INIT_AUTOMAKE],
-[AC_PREREQ([2.62])dnl
+[AC_PREREQ([2.65])dnl
dnl Autoconf wants to disallow AM_ names. We explicitly allow
dnl the ones we care about.
m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl
@@ -631,31 +625,40 @@ AC_SUBST([CYGPATH_W])
# Define the identity of the package.
dnl Distinguish between old-style and new-style calls.
m4_ifval([$2],
-[m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
+[AC_DIAGNOSE([obsolete],
+ [$0: two- and three-arguments forms are deprecated.])
+m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
AC_SUBST([PACKAGE], [$1])dnl
AC_SUBST([VERSION], [$2])],
[_AM_SET_OPTIONS([$1])dnl
dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT.
-m4_if(m4_ifdef([AC_PACKAGE_NAME], 1)m4_ifdef([AC_PACKAGE_VERSION], 1), 11,,
+m4_if(
+ m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]),
+ [ok:ok],,
[m4_fatal([AC_INIT should be called with package and version arguments])])dnl
AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl
AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl
_AM_IF_OPTION([no-define],,
-[AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package])
- AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl
+[AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package])
+ AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl
# Some tools Automake needs.
AC_REQUIRE([AM_SANITY_CHECK])dnl
AC_REQUIRE([AC_ARG_PROGRAM])dnl
-AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version})
-AM_MISSING_PROG(AUTOCONF, autoconf)
-AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version})
-AM_MISSING_PROG(AUTOHEADER, autoheader)
-AM_MISSING_PROG(MAKEINFO, makeinfo)
+AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}])
+AM_MISSING_PROG([AUTOCONF], [autoconf])
+AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}])
+AM_MISSING_PROG([AUTOHEADER], [autoheader])
+AM_MISSING_PROG([MAKEINFO], [makeinfo])
AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl
-AC_REQUIRE([AM_PROG_MKDIR_P])dnl
+AC_REQUIRE([AC_PROG_MKDIR_P])dnl
+# For better backward compatibility. To be removed once Automake 1.9.x
+# dies out for good. For more background, see:
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
+AC_SUBST([mkdir_p], ['$(MKDIR_P)'])
# We need awk for the "check" target. The system "awk" is bad on
# some platforms.
AC_REQUIRE([AC_PROG_AWK])dnl
@@ -666,34 +669,78 @@ _AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])],
[_AM_PROG_TAR([v7])])])
_AM_IF_OPTION([no-dependencies],,
[AC_PROVIDE_IFELSE([AC_PROG_CC],
- [_AM_DEPENDENCIES(CC)],
- [define([AC_PROG_CC],
- defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl
+ [_AM_DEPENDENCIES([CC])],
+ [m4_define([AC_PROG_CC],
+ m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl
AC_PROVIDE_IFELSE([AC_PROG_CXX],
- [_AM_DEPENDENCIES(CXX)],
- [define([AC_PROG_CXX],
- defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl
+ [_AM_DEPENDENCIES([CXX])],
+ [m4_define([AC_PROG_CXX],
+ m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl
AC_PROVIDE_IFELSE([AC_PROG_OBJC],
- [_AM_DEPENDENCIES(OBJC)],
- [define([AC_PROG_OBJC],
- defn([AC_PROG_OBJC])[_AM_DEPENDENCIES(OBJC)])])dnl
+ [_AM_DEPENDENCIES([OBJC])],
+ [m4_define([AC_PROG_OBJC],
+ m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_OBJCXX],
+ [_AM_DEPENDENCIES([OBJCXX])],
+ [m4_define([AC_PROG_OBJCXX],
+ m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl
])
-_AM_IF_OPTION([silent-rules], [AC_REQUIRE([AM_SILENT_RULES])])dnl
-dnl The `parallel-tests' driver may need to know about EXEEXT, so add the
-dnl `am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This macro
-dnl is hooked onto _AC_COMPILER_EXEEXT early, see below.
+AC_REQUIRE([AM_SILENT_RULES])dnl
+dnl The testsuite driver may need to know about EXEEXT, so add the
+dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This
+dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below.
AC_CONFIG_COMMANDS_PRE(dnl
[m4_provide_if([_AM_COMPILER_EXEEXT],
[AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl
-])
-dnl Hook into `_AC_COMPILER_EXEEXT' early to learn its expansion. Do not
+# POSIX will say in a future version that running "rm -f" with no argument
+# is OK; and we want to be able to make that assumption in our Makefile
+# recipes. So use an aggressive probe to check that the usage we want is
+# actually supported "in the wild" to an acceptable degree.
+# See automake bug#10828.
+# To make any issue more visible, cause the running configure to be aborted
+# by default if the 'rm' program in use doesn't match our expectations; the
+# user can still override this though.
+if rm -f && rm -fr && rm -rf; then : OK; else
+ cat >&2 <<'END'
+Oops!
+
+Your 'rm' program seems unable to run without file operands specified
+on the command line, even when the '-f' option is present. This is contrary
+to the behaviour of most rm programs out there, and not conforming with
+the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
+
+Please tell bug-automake@gnu.org about your system, including the value
+of your $PATH and any error possibly output before this message. This
+can help us improve future automake versions.
+
+END
+ if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
+ echo 'Configuration will proceed anyway, since you have set the' >&2
+ echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
+ echo >&2
+ else
+ cat >&2 <<'END'
+Aborting the configuration process, to ensure you take notice of the issue.
+
+You can download and install GNU coreutils to get an 'rm' implementation
+that behaves properly: <http://www.gnu.org/software/coreutils/>.
+
+If you want to complete the configuration process using your problematic
+'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
+to "yes", and re-run configure.
+
+END
+ AC_MSG_ERROR([Your 'rm' program is bad, sorry.])
+ fi
+fi])
+
+dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not
dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further
dnl mangled by Autoconf and run in a shell conditional statement.
m4_define([_AC_COMPILER_EXEEXT],
m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])])
-
# When config.status generates a header, we must update the stamp-h file.
# This file resides in the same directory as the config header
# that is generated. The stamp files are numbered to have different names.
@@ -715,15 +762,12 @@ for _am_header in $config_headers :; do
done
echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
-# Copyright (C) 2001, 2003, 2005, 2008, 2011 Free Software Foundation,
-# Inc.
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 1
-
# AM_PROG_INSTALL_SH
# ------------------
# Define $install_sh.
@@ -737,16 +781,14 @@ if test x"${install_sh}" != xset; then
install_sh="\${SHELL} $am_aux_dir/install-sh"
esac
fi
-AC_SUBST(install_sh)])
+AC_SUBST([install_sh])])
-# Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+# Copyright (C) 2003-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 2
-
# Check whether the underlying file-system supports filenames
# with a leading dot. For instance MS-DOS doesn't.
AC_DEFUN([AM_SET_LEADING_DOT],
@@ -763,20 +805,17 @@ AC_SUBST([am__leading_dot])])
# Add --enable-maintainer-mode option to configure. -*- Autoconf -*-
# From Jim Meyering
-# Copyright (C) 1996, 1998, 2000, 2001, 2002, 2003, 2004, 2005, 2008,
-# 2011 Free Software Foundation, Inc.
+# Copyright (C) 1996-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 5
-
# AM_MAINTAINER_MODE([DEFAULT-MODE])
# ----------------------------------
# Control maintainer-specific portions of Makefiles.
-# Default is to disable them, unless `enable' is passed literally.
-# For symmetry, `disable' may be passed as well. Anyway, the user
+# Default is to disable them, unless 'enable' is passed literally.
+# For symmetry, 'disable' may be passed as well. Anyway, the user
# can override the default with the --enable/--disable switch.
AC_DEFUN([AM_MAINTAINER_MODE],
[m4_case(m4_default([$1], [disable]),
@@ -787,10 +826,11 @@ AC_DEFUN([AM_MAINTAINER_MODE],
AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles])
dnl maintainer-mode's default is 'disable' unless 'enable' is passed
AC_ARG_ENABLE([maintainer-mode],
-[ --][am_maintainer_other][-maintainer-mode am_maintainer_other make rules and dependencies not useful
- (and sometimes confusing) to the casual installer],
- [USE_MAINTAINER_MODE=$enableval],
- [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes]))
+ [AS_HELP_STRING([--]am_maintainer_other[-maintainer-mode],
+ am_maintainer_other[ make rules and dependencies not useful
+ (and sometimes confusing) to the casual installer])],
+ [USE_MAINTAINER_MODE=$enableval],
+ [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes]))
AC_MSG_RESULT([$USE_MAINTAINER_MODE])
AM_CONDITIONAL([MAINTAINER_MODE], [test $USE_MAINTAINER_MODE = yes])
MAINT=$MAINTAINER_MODE_TRUE
@@ -798,18 +838,14 @@ AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles])
]
)
-AU_DEFUN([jm_MAINTAINER_MODE], [AM_MAINTAINER_MODE])
-
# Check to see how 'make' treats includes. -*- Autoconf -*-
-# Copyright (C) 2001, 2002, 2003, 2005, 2009 Free Software Foundation, Inc.
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 4
-
# AM_MAKE_INCLUDE()
# -----------------
# Check to see how make treats includes.
@@ -827,7 +863,7 @@ am__quote=
_am_result=none
# First try GNU make style include.
echo "include confinc" > confmf
-# Ignore all kinds of additional output from `make'.
+# Ignore all kinds of additional output from 'make'.
case `$am_make -s -f confmf 2> /dev/null` in #(
*the\ am__doit\ target*)
am__include=include
@@ -854,15 +890,12 @@ rm -f confinc confmf
# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*-
-# Copyright (C) 1997, 1999, 2000, 2001, 2003, 2004, 2005, 2008
-# Free Software Foundation, Inc.
+# Copyright (C) 1997-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 6
-
# AM_MISSING_PROG(NAME, PROGRAM)
# ------------------------------
AC_DEFUN([AM_MISSING_PROG],
@@ -870,11 +903,10 @@ AC_DEFUN([AM_MISSING_PROG],
$1=${$1-"${am_missing_run}$2"}
AC_SUBST($1)])
-
# AM_MISSING_HAS_RUN
# ------------------
-# Define MISSING if not defined so far and test if it supports --run.
-# If it does, set am_missing_run to use it, otherwise, to nothing.
+# Define MISSING if not defined so far and test if it is modern enough.
+# If it is, set am_missing_run to use it, otherwise, to nothing.
AC_DEFUN([AM_MISSING_HAS_RUN],
[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
AC_REQUIRE_AUX_FILE([missing])dnl
@@ -887,54 +919,22 @@ if test x"${MISSING+set}" != xset; then
esac
fi
# Use eval to expand $SHELL
-if eval "$MISSING --run true"; then
- am_missing_run="$MISSING --run "
+if eval "$MISSING --is-lightweight"; then
+ am_missing_run="$MISSING "
else
am_missing_run=
- AC_MSG_WARN([`missing' script is too old or missing])
+ AC_MSG_WARN(['missing' script is too old or missing])
fi
])
-# Copyright (C) 2003, 2004, 2005, 2006, 2011 Free Software Foundation,
-# Inc.
-#
-# This file is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# serial 1
-
-# AM_PROG_MKDIR_P
-# ---------------
-# Check for `mkdir -p'.
-AC_DEFUN([AM_PROG_MKDIR_P],
-[AC_PREREQ([2.60])dnl
-AC_REQUIRE([AC_PROG_MKDIR_P])dnl
-dnl Automake 1.8 to 1.9.6 used to define mkdir_p. We now use MKDIR_P,
-dnl while keeping a definition of mkdir_p for backward compatibility.
-dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile.
-dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of
-dnl Makefile.ins that do not define MKDIR_P, so we do our own
-dnl adjustment using top_builddir (which is defined more often than
-dnl MKDIR_P).
-AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl
-case $mkdir_p in
- [[\\/$]]* | ?:[[\\/]]*) ;;
- */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;;
-esac
-])
-
# Helper functions for option handling. -*- Autoconf -*-
-# Copyright (C) 2001, 2002, 2003, 2005, 2008, 2010 Free Software
-# Foundation, Inc.
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 5
-
# _AM_MANGLE_OPTION(NAME)
# -----------------------
AC_DEFUN([_AM_MANGLE_OPTION],
@@ -944,7 +944,7 @@ AC_DEFUN([_AM_MANGLE_OPTION],
# --------------------
# Set option NAME. Presently that only means defining a flag for this option.
AC_DEFUN([_AM_SET_OPTION],
-[m4_define(_AM_MANGLE_OPTION([$1]), 1)])
+[m4_define(_AM_MANGLE_OPTION([$1]), [1])])
# _AM_SET_OPTIONS(OPTIONS)
# ------------------------
@@ -958,24 +958,82 @@ AC_DEFUN([_AM_SET_OPTIONS],
AC_DEFUN([_AM_IF_OPTION],
[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
-# Check to make sure that the build environment is sane. -*- Autoconf -*-
+# Copyright (C) 1999-2013 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_PROG_CC_C_O
+# ---------------
+# Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC
+# to automatically call this.
+AC_DEFUN([_AM_PROG_CC_C_O],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+AC_REQUIRE_AUX_FILE([compile])dnl
+AC_LANG_PUSH([C])dnl
+AC_CACHE_CHECK(
+ [whether $CC understands -c and -o together],
+ [am_cv_prog_cc_c_o],
+ [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])])
+ # Make sure it works both with $CC and with simple cc.
+ # Following AC_PROG_CC_C_O, we do the test twice because some
+ # compilers refuse to overwrite an existing .o file with -o,
+ # though they will create one.
+ am_cv_prog_cc_c_o=yes
+ for am_i in 1 2; do
+ if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \
+ && test -f conftest2.$ac_objext; then
+ : OK
+ else
+ am_cv_prog_cc_c_o=no
+ break
+ fi
+ done
+ rm -f core conftest*
+ unset am_i])
+if test "$am_cv_prog_cc_c_o" != yes; then
+ # Losing compiler, so override with the script.
+ # FIXME: It is wrong to rewrite CC.
+ # But if we don't then we get into trouble of one sort or another.
+ # A longer-term fix would be to have automake use am__CC in this case,
+ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+ CC="$am_aux_dir/compile $CC"
+fi
+AC_LANG_POP([C])])
+
+# For backward compatibility.
+AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])])
-# Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005, 2008
-# Free Software Foundation, Inc.
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 5
+# AM_RUN_LOG(COMMAND)
+# -------------------
+# Run COMMAND, save the exit status in ac_status, and log it.
+# (This has been adapted from Autoconf's _AC_RUN_LOG macro.)
+AC_DEFUN([AM_RUN_LOG],
+[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD
+ ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+ (exit $ac_status); }])
+
+# Check to make sure that the build environment is sane. -*- Autoconf -*-
+
+# Copyright (C) 1996-2013 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
# AM_SANITY_CHECK
# ---------------
AC_DEFUN([AM_SANITY_CHECK],
[AC_MSG_CHECKING([whether build environment is sane])
-# Just in case
-sleep 1
-echo timestamp > conftest.file
# Reject unsafe characters in $srcdir or the absolute working directory
# name. Accept space and tab only in the latter.
am_lf='
@@ -986,32 +1044,40 @@ case `pwd` in
esac
case $srcdir in
*[[\\\"\#\$\&\'\`$am_lf\ \ ]]*)
- AC_MSG_ERROR([unsafe srcdir value: `$srcdir']);;
+ AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);;
esac
-# Do `set' in a subshell so we don't clobber the current shell's
+# Do 'set' in a subshell so we don't clobber the current shell's
# arguments. Must try -L first in case configure is actually a
# symlink; some systems play weird games with the mod time of symlinks
# (eg FreeBSD returns the mod time of the symlink's containing
# directory).
if (
- set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
- if test "$[*]" = "X"; then
- # -L didn't work.
- set X `ls -t "$srcdir/configure" conftest.file`
- fi
- rm -f conftest.file
- if test "$[*]" != "X $srcdir/configure conftest.file" \
- && test "$[*]" != "X conftest.file $srcdir/configure"; then
-
- # If neither matched, then we have a broken ls. This can happen
- # if, for instance, CONFIG_SHELL is bash and it inherits a
- # broken ls alias from the environment. This has actually
- # happened. Such a system could not be considered "sane".
- AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken
-alias in your environment])
- fi
-
+ am_has_slept=no
+ for am_try in 1 2; do
+ echo "timestamp, slept: $am_has_slept" > conftest.file
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$[*]" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ if test "$[*]" != "X $srcdir/configure conftest.file" \
+ && test "$[*]" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken
+ alias in your environment])
+ fi
+ if test "$[2]" = conftest.file || test $am_try -eq 2; then
+ break
+ fi
+ # Just in case.
+ sleep 1
+ am_has_slept=yes
+ done
test "$[2]" = conftest.file
)
then
@@ -1021,46 +1087,118 @@ else
AC_MSG_ERROR([newly created file is older than distributed files!
Check your system clock])
fi
-AC_MSG_RESULT(yes)])
+AC_MSG_RESULT([yes])
+# If we didn't sleep, we still need to ensure time stamps of config.status and
+# generated files are strictly newer.
+am_sleep_pid=
+if grep 'slept: no' conftest.file >/dev/null 2>&1; then
+ ( sleep 1 ) &
+ am_sleep_pid=$!
+fi
+AC_CONFIG_COMMANDS_PRE(
+ [AC_MSG_CHECKING([that generated files are newer than configure])
+ if test -n "$am_sleep_pid"; then
+ # Hide warnings about reused PIDs.
+ wait $am_sleep_pid 2>/dev/null
+ fi
+ AC_MSG_RESULT([done])])
+rm -f conftest.file
+])
-# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc.
+# Copyright (C) 2009-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 1
+# AM_SILENT_RULES([DEFAULT])
+# --------------------------
+# Enable less verbose build rules; with the default set to DEFAULT
+# ("yes" being less verbose, "no" or empty being verbose).
+AC_DEFUN([AM_SILENT_RULES],
+[AC_ARG_ENABLE([silent-rules], [dnl
+AS_HELP_STRING(
+ [--enable-silent-rules],
+ [less verbose build output (undo: "make V=1")])
+AS_HELP_STRING(
+ [--disable-silent-rules],
+ [verbose build output (undo: "make V=0")])dnl
+])
+case $enable_silent_rules in @%:@ (((
+ yes) AM_DEFAULT_VERBOSITY=0;;
+ no) AM_DEFAULT_VERBOSITY=1;;
+ *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);;
+esac
+dnl
+dnl A few 'make' implementations (e.g., NonStop OS and NextStep)
+dnl do not support nested variable expansions.
+dnl See automake bug#9928 and bug#10237.
+am_make=${MAKE-make}
+AC_CACHE_CHECK([whether $am_make supports nested variables],
+ [am_cv_make_support_nested_variables],
+ [if AS_ECHO([['TRUE=$(BAR$(V))
+BAR0=false
+BAR1=true
+V=1
+am__doit:
+ @$(TRUE)
+.PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then
+ am_cv_make_support_nested_variables=yes
+else
+ am_cv_make_support_nested_variables=no
+fi])
+if test $am_cv_make_support_nested_variables = yes; then
+ dnl Using '$V' instead of '$(V)' breaks IRIX make.
+ AM_V='$(V)'
+ AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+else
+ AM_V=$AM_DEFAULT_VERBOSITY
+ AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
+fi
+AC_SUBST([AM_V])dnl
+AM_SUBST_NOTMAKE([AM_V])dnl
+AC_SUBST([AM_DEFAULT_V])dnl
+AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl
+AC_SUBST([AM_DEFAULT_VERBOSITY])dnl
+AM_BACKSLASH='\'
+AC_SUBST([AM_BACKSLASH])dnl
+_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl
+])
+
+# Copyright (C) 2001-2013 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
# AM_PROG_INSTALL_STRIP
# ---------------------
-# One issue with vendor `install' (even GNU) is that you can't
+# One issue with vendor 'install' (even GNU) is that you can't
# specify the program used to strip binaries. This is especially
# annoying in cross-compiling environments, where the build's strip
# is unlikely to handle the host's binaries.
# Fortunately install-sh will honor a STRIPPROG variable, so we
-# always use install-sh in `make install-strip', and initialize
+# always use install-sh in "make install-strip", and initialize
# STRIPPROG with the value of the STRIP variable (set by the user).
AC_DEFUN([AM_PROG_INSTALL_STRIP],
[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
-# Installed binaries are usually stripped using `strip' when the user
-# run `make install-strip'. However `strip' might not be the right
+# Installed binaries are usually stripped using 'strip' when the user
+# run "make install-strip". However 'strip' might not be the right
# tool to use in cross-compilation environments, therefore Automake
-# will honor the `STRIP' environment variable to overrule this program.
-dnl Don't test for $cross_compiling = yes, because it might be `maybe'.
+# will honor the 'STRIP' environment variable to overrule this program.
+dnl Don't test for $cross_compiling = yes, because it might be 'maybe'.
if test "$cross_compiling" != no; then
AC_CHECK_TOOL([STRIP], [strip], :)
fi
INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
AC_SUBST([INSTALL_STRIP_PROGRAM])])
-# Copyright (C) 2006, 2008, 2010 Free Software Foundation, Inc.
+# Copyright (C) 2006-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 3
-
# _AM_SUBST_NOTMAKE(VARIABLE)
# ---------------------------
# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in.
@@ -1074,18 +1212,16 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
# Check how to create a tarball. -*- Autoconf -*-
-# Copyright (C) 2004, 2005, 2012 Free Software Foundation, Inc.
+# Copyright (C) 2004-2013 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 2
-
# _AM_PROG_TAR(FORMAT)
# --------------------
# Check how to create a tarball in format FORMAT.
-# FORMAT should be one of `v7', `ustar', or `pax'.
+# FORMAT should be one of 'v7', 'ustar', or 'pax'.
#
# Substitute a variable $(am__tar) that is a command
# writing to stdout a FORMAT-tarball containing the directory
@@ -1095,76 +1231,114 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
# Substitute a variable $(am__untar) that extract such
# a tarball read from stdin.
# $(am__untar) < result.tar
+#
AC_DEFUN([_AM_PROG_TAR],
[# Always define AMTAR for backward compatibility. Yes, it's still used
# in the wild :-( We should find a proper way to deprecate it ...
AC_SUBST([AMTAR], ['$${TAR-tar}'])
-m4_if([$1], [v7],
- [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'],
- [m4_case([$1], [ustar],, [pax],,
- [m4_fatal([Unknown tar format])])
-AC_MSG_CHECKING([how to create a $1 tar archive])
-# Loop over all known methods to create a tar archive until one works.
+
+# We'll loop over all known methods to create a tar archive until one works.
_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none'
-_am_tools=${am_cv_prog_tar_$1-$_am_tools}
-# Do not fold the above two line into one, because Tru64 sh and
-# Solaris sh will not grok spaces in the rhs of `-'.
-for _am_tool in $_am_tools
-do
- case $_am_tool in
- gnutar)
- for _am_tar in tar gnutar gtar;
- do
- AM_RUN_LOG([$_am_tar --version]) && break
- done
- am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
- am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
- am__untar="$_am_tar -xf -"
- ;;
- plaintar)
- # Must skip GNU tar: if it does not support --format= it doesn't create
- # ustar tarball either.
- (tar --version) >/dev/null 2>&1 && continue
- am__tar='tar chf - "$$tardir"'
- am__tar_='tar chf - "$tardir"'
- am__untar='tar xf -'
- ;;
- pax)
- am__tar='pax -L -x $1 -w "$$tardir"'
- am__tar_='pax -L -x $1 -w "$tardir"'
- am__untar='pax -r'
- ;;
- cpio)
- am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
- am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
- am__untar='cpio -i -H $1 -d'
- ;;
- none)
- am__tar=false
- am__tar_=false
- am__untar=false
- ;;
- esac
- # If the value was cached, stop now. We just wanted to have am__tar
- # and am__untar set.
- test -n "${am_cv_prog_tar_$1}" && break
+m4_if([$1], [v7],
+ [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'],
+
+ [m4_case([$1],
+ [ustar],
+ [# The POSIX 1988 'ustar' format is defined with fixed-size fields.
+ # There is notably a 21 bits limit for the UID and the GID. In fact,
+ # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343
+ # and bug#13588).
+ am_max_uid=2097151 # 2^21 - 1
+ am_max_gid=$am_max_uid
+ # The $UID and $GID variables are not portable, so we need to resort
+ # to the POSIX-mandated id(1) utility. Errors in the 'id' calls
+ # below are definitely unexpected, so allow the users to see them
+ # (that is, avoid stderr redirection).
+ am_uid=`id -u || echo unknown`
+ am_gid=`id -g || echo unknown`
+ AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format])
+ if test $am_uid -le $am_max_uid; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ _am_tools=none
+ fi
+ AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format])
+ if test $am_gid -le $am_max_gid; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ _am_tools=none
+ fi],
+
+ [pax],
+ [],
+
+ [m4_fatal([Unknown tar format])])
+
+ AC_MSG_CHECKING([how to create a $1 tar archive])
+
+ # Go ahead even if we have the value already cached. We do so because we
+ # need to set the values for the 'am__tar' and 'am__untar' variables.
+ _am_tools=${am_cv_prog_tar_$1-$_am_tools}
+
+ for _am_tool in $_am_tools; do
+ case $_am_tool in
+ gnutar)
+ for _am_tar in tar gnutar gtar; do
+ AM_RUN_LOG([$_am_tar --version]) && break
+ done
+ am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
+ am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
+ am__untar="$_am_tar -xf -"
+ ;;
+ plaintar)
+ # Must skip GNU tar: if it does not support --format= it doesn't create
+ # ustar tarball either.
+ (tar --version) >/dev/null 2>&1 && continue
+ am__tar='tar chf - "$$tardir"'
+ am__tar_='tar chf - "$tardir"'
+ am__untar='tar xf -'
+ ;;
+ pax)
+ am__tar='pax -L -x $1 -w "$$tardir"'
+ am__tar_='pax -L -x $1 -w "$tardir"'
+ am__untar='pax -r'
+ ;;
+ cpio)
+ am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
+ am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
+ am__untar='cpio -i -H $1 -d'
+ ;;
+ none)
+ am__tar=false
+ am__tar_=false
+ am__untar=false
+ ;;
+ esac
- # tar/untar a dummy directory, and stop if the command works
- rm -rf conftest.dir
- mkdir conftest.dir
- echo GrepMe > conftest.dir/file
- AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
+ # If the value was cached, stop now. We just wanted to have am__tar
+ # and am__untar set.
+ test -n "${am_cv_prog_tar_$1}" && break
+
+ # tar/untar a dummy directory, and stop if the command works.
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ echo GrepMe > conftest.dir/file
+ AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
+ rm -rf conftest.dir
+ if test -s conftest.tar; then
+ AM_RUN_LOG([$am__untar <conftest.tar])
+ AM_RUN_LOG([cat conftest.dir/file])
+ grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
+ fi
+ done
rm -rf conftest.dir
- if test -s conftest.tar; then
- AM_RUN_LOG([$am__untar <conftest.tar])
- grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
- fi
-done
-rm -rf conftest.dir
-AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
-AC_MSG_RESULT([$am_cv_prog_tar_$1])])
+ AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
+ AC_MSG_RESULT([$am_cv_prog_tar_$1])])
+
AC_SUBST([am__tar])
AC_SUBST([am__untar])
]) # _AM_PROG_TAR
diff --git a/charts.d/Makefile.in b/charts.d/Makefile.in
index a613e1b3..5d17f4d2 100644
--- a/charts.d/Makefile.in
+++ b/charts.d/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11.3 from Makefile.am.
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
-# Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -17,6 +16,51 @@
VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -36,8 +80,8 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
subdir = charts.d
-DIST_COMMON = $(dist_charts_DATA) $(dist_charts_SCRIPTS) \
- $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_charts_SCRIPTS) $(dist_charts_DATA)
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
$(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
@@ -82,12 +126,31 @@ am__uninstall_files_from_dir = { \
}
am__installdirs = "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsdir)"
SCRIPTS = $(dist_charts_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
SOURCES =
DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
DATA = $(dist_charts_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
@@ -295,8 +358,11 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
$(am__aclocal_m4_deps):
install-dist_chartsSCRIPTS: $(dist_charts_SCRIPTS)
@$(NORMAL_INSTALL)
- test -z "$(chartsdir)" || $(MKDIR_P) "$(DESTDIR)$(chartsdir)"
@list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
@@ -327,8 +393,11 @@ uninstall-dist_chartsSCRIPTS:
dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir)
install-dist_chartsDATA: $(dist_charts_DATA)
@$(NORMAL_INSTALL)
- test -z "$(chartsdir)" || $(MKDIR_P) "$(DESTDIR)$(chartsdir)"
@list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -343,11 +412,11 @@ uninstall-dist_chartsDATA:
@list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir)
-tags: TAGS
-TAGS:
+tags TAGS:
+
+ctags CTAGS:
-ctags: CTAGS
-CTAGS:
+cscope cscopelist:
distdir: $(DISTFILES)
@@ -486,16 +555,17 @@ uninstall-am: uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS
.MAKE: install-am install-strip
-.PHONY: all all-am check check-am clean clean-generic distclean \
- distclean-generic distdir dvi dvi-am html html-am info info-am \
- install install-am install-data install-data-am \
- install-dist_chartsDATA install-dist_chartsSCRIPTS install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am uninstall uninstall-am \
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_chartsDATA \
+ install-dist_chartsSCRIPTS install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS
diff --git a/compile b/compile
new file mode 100755
index 00000000..531136b0
--- /dev/null
+++ b/compile
@@ -0,0 +1,347 @@
+#! /bin/sh
+# Wrapper for compilers which do not understand '-c -o'.
+
+scriptversion=2012-10-14.11; # UTC
+
+# Copyright (C) 1999-2013 Free Software Foundation, Inc.
+# Written by Tom Tromey <tromey@cygnus.com>.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# This file is maintained in Automake, please report
+# bugs to <bug-automake@gnu.org> or send patches to
+# <automake-patches@gnu.org>.
+
+nl='
+'
+
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent tools from complaining about whitespace usage.
+IFS=" "" $nl"
+
+file_conv=
+
+# func_file_conv build_file lazy
+# Convert a $build file to $host form and store it in $file
+# Currently only supports Windows hosts. If the determined conversion
+# type is listed in (the comma separated) LAZY, no conversion will
+# take place.
+func_file_conv ()
+{
+ file=$1
+ case $file in
+ / | /[!/]*) # absolute file, and not a UNC file
+ if test -z "$file_conv"; then
+ # lazily determine how to convert abs files
+ case `uname -s` in
+ MINGW*)
+ file_conv=mingw
+ ;;
+ CYGWIN*)
+ file_conv=cygwin
+ ;;
+ *)
+ file_conv=wine
+ ;;
+ esac
+ fi
+ case $file_conv/,$2, in
+ *,$file_conv,*)
+ ;;
+ mingw/*)
+ file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'`
+ ;;
+ cygwin/*)
+ file=`cygpath -m "$file" || echo "$file"`
+ ;;
+ wine/*)
+ file=`winepath -w "$file" || echo "$file"`
+ ;;
+ esac
+ ;;
+ esac
+}
+
+# func_cl_dashL linkdir
+# Make cl look for libraries in LINKDIR
+func_cl_dashL ()
+{
+ func_file_conv "$1"
+ if test -z "$lib_path"; then
+ lib_path=$file
+ else
+ lib_path="$lib_path;$file"
+ fi
+ linker_opts="$linker_opts -LIBPATH:$file"
+}
+
+# func_cl_dashl library
+# Do a library search-path lookup for cl
+func_cl_dashl ()
+{
+ lib=$1
+ found=no
+ save_IFS=$IFS
+ IFS=';'
+ for dir in $lib_path $LIB
+ do
+ IFS=$save_IFS
+ if $shared && test -f "$dir/$lib.dll.lib"; then
+ found=yes
+ lib=$dir/$lib.dll.lib
+ break
+ fi
+ if test -f "$dir/$lib.lib"; then
+ found=yes
+ lib=$dir/$lib.lib
+ break
+ fi
+ if test -f "$dir/lib$lib.a"; then
+ found=yes
+ lib=$dir/lib$lib.a
+ break
+ fi
+ done
+ IFS=$save_IFS
+
+ if test "$found" != yes; then
+ lib=$lib.lib
+ fi
+}
+
+# func_cl_wrapper cl arg...
+# Adjust compile command to suit cl
+func_cl_wrapper ()
+{
+ # Assume a capable shell
+ lib_path=
+ shared=:
+ linker_opts=
+ for arg
+ do
+ if test -n "$eat"; then
+ eat=
+ else
+ case $1 in
+ -o)
+ # configure might choose to run compile as 'compile cc -o foo foo.c'.
+ eat=1
+ case $2 in
+ *.o | *.[oO][bB][jJ])
+ func_file_conv "$2"
+ set x "$@" -Fo"$file"
+ shift
+ ;;
+ *)
+ func_file_conv "$2"
+ set x "$@" -Fe"$file"
+ shift
+ ;;
+ esac
+ ;;
+ -I)
+ eat=1
+ func_file_conv "$2" mingw
+ set x "$@" -I"$file"
+ shift
+ ;;
+ -I*)
+ func_file_conv "${1#-I}" mingw
+ set x "$@" -I"$file"
+ shift
+ ;;
+ -l)
+ eat=1
+ func_cl_dashl "$2"
+ set x "$@" "$lib"
+ shift
+ ;;
+ -l*)
+ func_cl_dashl "${1#-l}"
+ set x "$@" "$lib"
+ shift
+ ;;
+ -L)
+ eat=1
+ func_cl_dashL "$2"
+ ;;
+ -L*)
+ func_cl_dashL "${1#-L}"
+ ;;
+ -static)
+ shared=false
+ ;;
+ -Wl,*)
+ arg=${1#-Wl,}
+ save_ifs="$IFS"; IFS=','
+ for flag in $arg; do
+ IFS="$save_ifs"
+ linker_opts="$linker_opts $flag"
+ done
+ IFS="$save_ifs"
+ ;;
+ -Xlinker)
+ eat=1
+ linker_opts="$linker_opts $2"
+ ;;
+ -*)
+ set x "$@" "$1"
+ shift
+ ;;
+ *.cc | *.CC | *.cxx | *.CXX | *.[cC]++)
+ func_file_conv "$1"
+ set x "$@" -Tp"$file"
+ shift
+ ;;
+ *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO])
+ func_file_conv "$1" mingw
+ set x "$@" "$file"
+ shift
+ ;;
+ *)
+ set x "$@" "$1"
+ shift
+ ;;
+ esac
+ fi
+ shift
+ done
+ if test -n "$linker_opts"; then
+ linker_opts="-link$linker_opts"
+ fi
+ exec "$@" $linker_opts
+ exit 1
+}
+
+eat=
+
+case $1 in
+ '')
+ echo "$0: No command. Try '$0 --help' for more information." 1>&2
+ exit 1;
+ ;;
+ -h | --h*)
+ cat <<\EOF
+Usage: compile [--help] [--version] PROGRAM [ARGS]
+
+Wrapper for compilers which do not understand '-c -o'.
+Remove '-o dest.o' from ARGS, run PROGRAM with the remaining
+arguments, and rename the output as expected.
+
+If you are trying to build a whole package this is not the
+right script to run: please start by reading the file 'INSTALL'.
+
+Report bugs to <bug-automake@gnu.org>.
+EOF
+ exit $?
+ ;;
+ -v | --v*)
+ echo "compile $scriptversion"
+ exit $?
+ ;;
+ cl | *[/\\]cl | cl.exe | *[/\\]cl.exe )
+ func_cl_wrapper "$@" # Doesn't return...
+ ;;
+esac
+
+ofile=
+cfile=
+
+for arg
+do
+ if test -n "$eat"; then
+ eat=
+ else
+ case $1 in
+ -o)
+ # configure might choose to run compile as 'compile cc -o foo foo.c'.
+ # So we strip '-o arg' only if arg is an object.
+ eat=1
+ case $2 in
+ *.o | *.obj)
+ ofile=$2
+ ;;
+ *)
+ set x "$@" -o "$2"
+ shift
+ ;;
+ esac
+ ;;
+ *.c)
+ cfile=$1
+ set x "$@" "$1"
+ shift
+ ;;
+ *)
+ set x "$@" "$1"
+ shift
+ ;;
+ esac
+ fi
+ shift
+done
+
+if test -z "$ofile" || test -z "$cfile"; then
+ # If no '-o' option was seen then we might have been invoked from a
+ # pattern rule where we don't need one. That is ok -- this is a
+ # normal compilation that the losing compiler can handle. If no
+ # '.c' file was seen then we are probably linking. That is also
+ # ok.
+ exec "$@"
+fi
+
+# Name of file we expect compiler to create.
+cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'`
+
+# Create the lock directory.
+# Note: use '[/\\:.-]' here to ensure that we don't use the same name
+# that we are using for the .o file. Also, base the name on the expected
+# object file name, since that is what matters with a parallel build.
+lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d
+while true; do
+ if mkdir "$lockdir" >/dev/null 2>&1; then
+ break
+ fi
+ sleep 1
+done
+# FIXME: race condition here if user kills between mkdir and trap.
+trap "rmdir '$lockdir'; exit 1" 1 2 15
+
+# Run the compile.
+"$@"
+ret=$?
+
+if test -f "$cofile"; then
+ test "$cofile" = "$ofile" || mv "$cofile" "$ofile"
+elif test -f "${cofile}bj"; then
+ test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile"
+fi
+
+rmdir "$lockdir"
+exit $ret
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/conf.d/Makefile.am b/conf.d/Makefile.am
index efe1f2a6..4cbecb56 100644
--- a/conf.d/Makefile.am
+++ b/conf.d/Makefile.am
@@ -17,6 +17,7 @@ dist_config_DATA = \
nodeconfigdir=$(configdir)/node.d
dist_nodeconfig_DATA = \
node.d/README.md \
+ node.d/fronius.conf.md \
node.d/named.conf.md \
node.d/sma_webbox.conf.md \
node.d/snmp.conf.md \
@@ -28,12 +29,14 @@ dist_pythonconfig_DATA = \
python.d/apache_cache.conf \
python.d/bind_rndc.conf \
python.d/cpufreq.conf \
+ python.d/dns_query_time.conf \
python.d/dovecot.conf \
python.d/elasticsearch.conf \
python.d/example.conf \
python.d/exim.conf \
python.d/fail2ban.conf \
python.d/freeradius.conf \
+ python.d/go_expvar.conf \
python.d/haproxy.conf \
python.d/hddtemp.conf \
python.d/ipfs.conf \
@@ -48,8 +51,10 @@ dist_pythonconfig_DATA = \
python.d/phpfpm.conf \
python.d/postfix.conf \
python.d/postgres.conf \
+ python.d/rabbitmq.conf \
python.d/redis.conf \
python.d/retroshare.conf \
+ python.d/samba.conf \
python.d/sensors.conf \
python.d/squid.conf \
python.d/smartd_log.conf \
@@ -70,10 +75,12 @@ dist_healthconfig_DATA = \
health.d/ipfs.conf \
health.d/ipmi.conf \
health.d/isc_dhcpd.conf \
+ health.d/lighttpd.conf \
health.d/mdstat.conf \
health.d/memcached.conf \
health.d/mysql.conf \
health.d/named.conf \
+ health.d/mongodb.conf \
health.d/nginx.conf \
health.d/postgres.conf \
health.d/redis.conf \
@@ -81,6 +88,7 @@ dist_healthconfig_DATA = \
health.d/squid.conf \
health.d/varnish.conf \
health.d/web_log.conf \
+ health.d/zfs.conf \
$(NULL)
if LINUX
@@ -123,3 +131,8 @@ dist_chartsconfig_DATA = \
charts.d/postfix.conf \
charts.d/squid.conf \
$(NULL)
+
+statsdconfigdir=$(configdir)/statsd.d
+dist_statsdconfig_DATA = \
+ statsd.d/example.conf \
+ $(NULL)
diff --git a/conf.d/Makefile.in b/conf.d/Makefile.in
index fb05396f..7a1e300e 100644
--- a/conf.d/Makefile.in
+++ b/conf.d/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11.3 from Makefile.am.
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
-# Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -16,6 +15,51 @@
@SET_MAKE@
VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -51,10 +95,10 @@ host_triplet = @host@
@LINUX_TRUE@ $(NULL)
subdir = conf.d
-DIST_COMMON = $(am__dist_healthconfig_DATA_DIST) \
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
$(dist_chartsconfig_DATA) $(dist_config_DATA) \
- $(dist_nodeconfig_DATA) $(dist_pythonconfig_DATA) \
- $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ $(am__dist_healthconfig_DATA_DIST) $(dist_nodeconfig_DATA) \
+ $(dist_pythonconfig_DATA) $(dist_statsdconfig_DATA)
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
$(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
@@ -70,8 +114,25 @@ mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
SOURCES =
DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
@@ -101,27 +162,31 @@ am__uninstall_files_from_dir = { \
}
am__installdirs = "$(DESTDIR)$(chartsconfigdir)" \
"$(DESTDIR)$(configdir)" "$(DESTDIR)$(healthconfigdir)" \
- "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(pythonconfigdir)"
+ "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(pythonconfigdir)" \
+ "$(DESTDIR)$(statsdconfigdir)"
am__dist_healthconfig_DATA_DIST = health.d/apache.conf \
health.d/backend.conf health.d/bind_rndc.conf \
health.d/elasticsearch.conf health.d/fping.conf \
health.d/haproxy.conf health.d/ipfs.conf health.d/ipmi.conf \
- health.d/isc_dhcpd.conf health.d/mdstat.conf \
- health.d/memcached.conf health.d/mysql.conf \
- health.d/named.conf health.d/nginx.conf health.d/postgres.conf \
- health.d/redis.conf health.d/retroshare.conf \
- health.d/squid.conf health.d/varnish.conf \
- health.d/web_log.conf health.d/cpu.conf health.d/disks.conf \
- health.d/entropy.conf health.d/ipc.conf health.d/memory.conf \
- health.d/net.conf health.d/netfilter.conf health.d/qos.conf \
- health.d/ram.conf health.d/softnet.conf health.d/swap.conf \
+ health.d/isc_dhcpd.conf health.d/lighttpd.conf \
+ health.d/mdstat.conf health.d/memcached.conf \
+ health.d/mysql.conf health.d/named.conf health.d/mongodb.conf \
+ health.d/nginx.conf health.d/postgres.conf health.d/redis.conf \
+ health.d/retroshare.conf health.d/squid.conf \
+ health.d/varnish.conf health.d/web_log.conf health.d/zfs.conf \
+ health.d/cpu.conf health.d/disks.conf health.d/entropy.conf \
+ health.d/ipc.conf health.d/memory.conf health.d/net.conf \
+ health.d/netfilter.conf health.d/qos.conf health.d/ram.conf \
+ health.d/softnet.conf health.d/swap.conf \
health.d/tcp_resets.conf health.d/udp_errors.conf
DATA = $(dist_chartsconfig_DATA) $(dist_config_DATA) \
$(dist_healthconfig_DATA) $(dist_nodeconfig_DATA) \
- $(dist_pythonconfig_DATA)
+ $(dist_pythonconfig_DATA) $(dist_statsdconfig_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
@@ -282,6 +347,7 @@ dist_config_DATA = \
nodeconfigdir = $(configdir)/node.d
dist_nodeconfig_DATA = \
node.d/README.md \
+ node.d/fronius.conf.md \
node.d/named.conf.md \
node.d/sma_webbox.conf.md \
node.d/snmp.conf.md \
@@ -293,12 +359,14 @@ dist_pythonconfig_DATA = \
python.d/apache_cache.conf \
python.d/bind_rndc.conf \
python.d/cpufreq.conf \
+ python.d/dns_query_time.conf \
python.d/dovecot.conf \
python.d/elasticsearch.conf \
python.d/example.conf \
python.d/exim.conf \
python.d/fail2ban.conf \
python.d/freeradius.conf \
+ python.d/go_expvar.conf \
python.d/haproxy.conf \
python.d/hddtemp.conf \
python.d/ipfs.conf \
@@ -313,8 +381,10 @@ dist_pythonconfig_DATA = \
python.d/phpfpm.conf \
python.d/postfix.conf \
python.d/postgres.conf \
+ python.d/rabbitmq.conf \
python.d/redis.conf \
python.d/retroshare.conf \
+ python.d/samba.conf \
python.d/sensors.conf \
python.d/squid.conf \
python.d/smartd_log.conf \
@@ -328,12 +398,13 @@ dist_healthconfig_DATA = health.d/apache.conf health.d/backend.conf \
health.d/bind_rndc.conf health.d/elasticsearch.conf \
health.d/fping.conf health.d/haproxy.conf health.d/ipfs.conf \
health.d/ipmi.conf health.d/isc_dhcpd.conf \
- health.d/mdstat.conf health.d/memcached.conf \
- health.d/mysql.conf health.d/named.conf health.d/nginx.conf \
+ health.d/lighttpd.conf health.d/mdstat.conf \
+ health.d/memcached.conf health.d/mysql.conf \
+ health.d/named.conf health.d/mongodb.conf health.d/nginx.conf \
health.d/postgres.conf health.d/redis.conf \
health.d/retroshare.conf health.d/squid.conf \
- health.d/varnish.conf health.d/web_log.conf $(NULL) \
- $(am__append_1)
+ health.d/varnish.conf health.d/web_log.conf health.d/zfs.conf \
+ $(NULL) $(am__append_1)
chartsconfigdir = $(configdir)/charts.d
dist_chartsconfig_DATA = \
charts.d/apache.conf \
@@ -357,6 +428,11 @@ dist_chartsconfig_DATA = \
charts.d/squid.conf \
$(NULL)
+statsdconfigdir = $(configdir)/statsd.d
+dist_statsdconfig_DATA = \
+ statsd.d/example.conf \
+ $(NULL)
+
all: all-am
.SUFFIXES:
@@ -392,8 +468,11 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
$(am__aclocal_m4_deps):
install-dist_chartsconfigDATA: $(dist_chartsconfig_DATA)
@$(NORMAL_INSTALL)
- test -z "$(chartsconfigdir)" || $(MKDIR_P) "$(DESTDIR)$(chartsconfigdir)"
@list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(chartsconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(chartsconfigdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -410,8 +489,11 @@ uninstall-dist_chartsconfigDATA:
dir='$(DESTDIR)$(chartsconfigdir)'; $(am__uninstall_files_from_dir)
install-dist_configDATA: $(dist_config_DATA)
@$(NORMAL_INSTALL)
- test -z "$(configdir)" || $(MKDIR_P) "$(DESTDIR)$(configdir)"
@list='$(dist_config_DATA)'; test -n "$(configdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(configdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(configdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -428,8 +510,11 @@ uninstall-dist_configDATA:
dir='$(DESTDIR)$(configdir)'; $(am__uninstall_files_from_dir)
install-dist_healthconfigDATA: $(dist_healthconfig_DATA)
@$(NORMAL_INSTALL)
- test -z "$(healthconfigdir)" || $(MKDIR_P) "$(DESTDIR)$(healthconfigdir)"
@list='$(dist_healthconfig_DATA)'; test -n "$(healthconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(healthconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(healthconfigdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -446,8 +531,11 @@ uninstall-dist_healthconfigDATA:
dir='$(DESTDIR)$(healthconfigdir)'; $(am__uninstall_files_from_dir)
install-dist_nodeconfigDATA: $(dist_nodeconfig_DATA)
@$(NORMAL_INSTALL)
- test -z "$(nodeconfigdir)" || $(MKDIR_P) "$(DESTDIR)$(nodeconfigdir)"
@list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodeconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodeconfigdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -464,8 +552,11 @@ uninstall-dist_nodeconfigDATA:
dir='$(DESTDIR)$(nodeconfigdir)'; $(am__uninstall_files_from_dir)
install-dist_pythonconfigDATA: $(dist_pythonconfig_DATA)
@$(NORMAL_INSTALL)
- test -z "$(pythonconfigdir)" || $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)"
@list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -480,11 +571,32 @@ uninstall-dist_pythonconfigDATA:
@list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(pythonconfigdir)'; $(am__uninstall_files_from_dir)
-tags: TAGS
-TAGS:
+install-dist_statsdconfigDATA: $(dist_statsdconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(statsdconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(statsdconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(statsdconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(statsdconfigdir)" || exit $$?; \
+ done
-ctags: CTAGS
-CTAGS:
+uninstall-dist_statsdconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(statsdconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
distdir: $(DISTFILES)
@@ -521,7 +633,7 @@ check-am: all-am
check: check-am
all-am: Makefile $(DATA)
installdirs:
- for dir in "$(DESTDIR)$(chartsconfigdir)" "$(DESTDIR)$(configdir)" "$(DESTDIR)$(healthconfigdir)" "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(pythonconfigdir)"; do \
+ for dir in "$(DESTDIR)$(chartsconfigdir)" "$(DESTDIR)$(configdir)" "$(DESTDIR)$(healthconfigdir)" "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(statsdconfigdir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-am
@@ -577,7 +689,7 @@ info-am:
install-data-am: install-dist_chartsconfigDATA install-dist_configDATA \
install-dist_healthconfigDATA install-dist_nodeconfigDATA \
- install-dist_pythonconfigDATA
+ install-dist_pythonconfigDATA install-dist_statsdconfigDATA
install-dvi: install-dvi-am
@@ -623,25 +735,28 @@ ps-am:
uninstall-am: uninstall-dist_chartsconfigDATA \
uninstall-dist_configDATA uninstall-dist_healthconfigDATA \
- uninstall-dist_nodeconfigDATA uninstall-dist_pythonconfigDATA
+ uninstall-dist_nodeconfigDATA uninstall-dist_pythonconfigDATA \
+ uninstall-dist_statsdconfigDATA
.MAKE: install-am install-strip
-.PHONY: all all-am check check-am clean clean-generic distclean \
- distclean-generic distdir dvi dvi-am html html-am info info-am \
- install install-am install-data install-data-am \
- install-dist_chartsconfigDATA install-dist_configDATA \
- install-dist_healthconfigDATA install-dist_nodeconfigDATA \
- install-dist_pythonconfigDATA install-dvi install-dvi-am \
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_chartsconfigDATA \
+ install-dist_configDATA install-dist_healthconfigDATA \
+ install-dist_nodeconfigDATA install-dist_pythonconfigDATA \
+ install-dist_statsdconfigDATA install-dvi install-dvi-am \
install-exec install-exec-am install-html install-html-am \
install-info install-info-am install-man install-pdf \
install-pdf-am install-ps install-ps-am install-strip \
installcheck installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am uninstall uninstall-am \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
uninstall-dist_chartsconfigDATA uninstall-dist_configDATA \
uninstall-dist_healthconfigDATA uninstall-dist_nodeconfigDATA \
- uninstall-dist_pythonconfigDATA
+ uninstall-dist_pythonconfigDATA \
+ uninstall-dist_statsdconfigDATA
# Tell versions [3.59,3.63) of GNU make to not export all variables.
diff --git a/conf.d/apps_groups.conf b/conf.d/apps_groups.conf
index 4c5171b3..43bf1352 100644
--- a/conf.d/apps_groups.conf
+++ b/conf.d/apps_groups.conf
@@ -95,12 +95,14 @@ php: php*
ftpd: proftpd in.tftpd vsftpd
uwsgi: uwsgi
unicorn: *unicorn*
+puma: *puma*
# -----------------------------------------------------------------------------
# database servers
sql: mysqld* mariad* postgres* oracle_* ora_*
nosql: mongod redis* memcached
+timedb: prometheus *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* *net.opentsdb.tools.TSDMain*
# -----------------------------------------------------------------------------
# email servers
@@ -111,7 +113,7 @@ email: dovecot imapd pop3d amavis* master zmstat* zmmailboxdmgr qmgr oqmgr
# network, routing, VPN
ppp: ppp*
-vpn: openvpn pptp* cjdroute
+vpn: openvpn pptp* cjdroute gvpe tincd
wifi: hostapd wpa_supplicant
routing: ospfd* ospf6d* bgpd isisd ripd ripngd pimd ldpd zebra vtysh bird*
@@ -219,11 +221,40 @@ media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd
media: mpd minidlnad mt-daapd avahi* Plex*
# -----------------------------------------------------------------------------
+# java applications
+
+hdfsdatanode: *org.apache.hadoop.hdfs.server.datanode.DataNode*
+hdfsnamenode: *org.apache.hadoop.hdfs.server.namenode.NameNode*
+hdfsjournalnode: *org.apache.hadoop.hdfs.qjournal.server.JournalNode*
+hdfszkfc: *org.apache.hadoop.hdfs.tools.DFSZKFailoverController*
+
+yarnnode: *org.apache.hadoop.yarn.server.nodemanager.NodeManager*
+yarnmgr: *org.apache.hadoop.yarn.server.resourcemanager.ResourceManager*
+yarnproxy: *org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer*
+
+sparkworker: *org.apache.spark.deploy.worker.Worker*
+sparkmaster: *org.apache.spark.deploy.master.Master*
+
+hbaseregion: *org.apache.hadoop.hbase.regionserver.HRegionServer*
+hbaserest: *org.apache.hadoop.hbase.rest.RESTServer*
+hbasethrift: *org.apache.hadoop.hbase.thrift.ThriftServer*
+hbasemaster: *org.apache.hadoop.hbase.master.HMaster*
+
+zookeeper: *org.apache.zookeeper.server.quorum.QuorumPeerMain*
+
+hive2: *org.apache.hive.service.server.HiveServer2*
+hivemetastore: *org.apache.hadoop.hive.metastore.HiveMetaStore*
+
+solr: *solr.install.dir*
+
+airflow: *airflow*
+
+# -----------------------------------------------------------------------------
# X
X: X Xorg xinit lightdm xdm pulseaudio gkrellm xfwm4 xfdesktop xfce* Thunar
X: xfsettingsd xfconfd gnome-* gdm gconf* dconf* xfconf* *gvfs gvfs* kdm slim
-X: evolution-* firefox chromium opera epiphany WebKit*
+X: evolution-* firefox chromium opera vivaldi-bin epiphany WebKit*
# -----------------------------------------------------------------------------
# Kernel / System
@@ -240,7 +271,8 @@ kernel: fsnotify_mark kthrotld deferwq scsi_*
# -----------------------------------------------------------------------------
# other application servers
-crsproxy: crsproxy
+kafka: *kafka.Kafka*
+
sidekiq: *sidekiq*
java: java
ipfs: ipfs
diff --git a/conf.d/fping.conf b/conf.d/fping.conf
index 82ee2332..63a7f7ac 100644
--- a/conf.d/fping.conf
+++ b/conf.d/fping.conf
@@ -29,7 +29,7 @@ hosts=""
# The time in milliseconds (1 sec = 1000 ms) to ping the hosts
# by default 5 pings per host per iteration
-# fping will now allow this to be below 20ms
+# fping will not allow this to be below 20ms
#ping_every="200"
diff --git a/conf.d/health.d/fping.conf b/conf.d/health.d/fping.conf
index 69251b18..43658fef 100644
--- a/conf.d/health.d/fping.conf
+++ b/conf.d/health.d/fping.conf
@@ -28,7 +28,7 @@ families: *
lookup: average -10s unaligned of average
units: ms
every: 10s
- green: 300
+ green: 500
red: 1000
warn: $this > $green OR $max > $red
crit: $this > $red
diff --git a/conf.d/health.d/lighttpd.conf b/conf.d/health.d/lighttpd.conf
new file mode 100644
index 00000000..915907a4
--- /dev/null
+++ b/conf.d/health.d/lighttpd.conf
@@ -0,0 +1,14 @@
+
+# make sure lighttpd is running
+
+template: lighttpd_last_collected_secs
+ on: lighttpd.requests
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: webmaster
+
diff --git a/conf.d/health.d/mongodb.conf b/conf.d/health.d/mongodb.conf
new file mode 100644
index 00000000..a80cb311
--- /dev/null
+++ b/conf.d/health.d/mongodb.conf
@@ -0,0 +1,13 @@
+
+# make sure mongodb is running
+
+template: mongodb_last_collected_secs
+ on: mongodb.read_operations
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
+ crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of seconds since the last successful data collection
+ to: dba
diff --git a/conf.d/health.d/net.conf b/conf.d/health.d/net.conf
index 0232395a..bd288817 100644
--- a/conf.d/health.d/net.conf
+++ b/conf.d/health.d/net.conf
@@ -99,9 +99,9 @@ families: *
calc: $this * 100 / (($1m_received_packets_rate < 1000)?(1000):($1m_received_packets_rate))
every: 10s
units: %
- warn: $this > (($status >= $WARNING)?(200):(1000))
- crit: $this > (($status >= $WARNING)?(1000):(2000))
+ warn: $this > (($status >= $WARNING)?(200):(5000))
+ crit: $this > (($status >= $WARNING)?(5000):(6000))
options: no-clear-notification
- info: the % of the rate of received packets in the last 10 seconds, compared to the rate of the last minute
+ info: the % of the rate of received packets in the last 10 seconds, compared to the rate of the last minute (clear notification for this alarm will not be sent)
to: sysadmin
diff --git a/conf.d/health.d/ram.conf b/conf.d/health.d/ram.conf
index d60df75b..b99e5e22 100644
--- a/conf.d/health.d/ram.conf
+++ b/conf.d/health.d/ram.conf
@@ -1,7 +1,14 @@
+ alarm: used_ram_to_ignore
+ on: system.ram
+ calc: ($zfs.arc_size.arcsz = nan)?(0):($zfs.arc_size.arcsz)
+ every: 10s
+ info: the amount of memory that is reported as used, but it is actually capable for resizing itself based on the system needs (eg. ZFS ARC)
+
alarm: ram_in_use
on: system.ram
- calc: $used * 100 / ($used + $cached + $free)
+# calc: $used * 100 / ($used + $cached + $free)
+ calc: ($used - $used_ram_to_ignore) * 100 / ($used - $used_ram_to_ignore + $cached + $free)
units: %
every: 10s
warn: $this > (($status >= $WARNING) ? (80) : (90))
diff --git a/conf.d/health.d/tcp_resets.conf b/conf.d/health.d/tcp_resets.conf
index 49fb1b92..803c88a8 100644
--- a/conf.d/health.d/tcp_resets.conf
+++ b/conf.d/health.d/tcp_resets.conf
@@ -26,10 +26,10 @@
lookup: average -10s unaligned absolute of OutRsts
units: tcp resets/s
every: 10s
- warn: $this > ((($1m_ipv4_tcp_resets_sent < 5)?(5):($1m_ipv4_tcp_resets_sent)) * (($status >= $WARNING) ? (1) : (4)))
+ warn: $this > ((($1m_ipv4_tcp_resets_sent < 5)?(5):($1m_ipv4_tcp_resets_sent)) * (($status >= $WARNING) ? (1) : (20)))
delay: up 0 down 60m multiplier 1.2 max 2h
options: no-clear-notification
- info: average TCP RESETS this host is sending, over the last 10 seconds (this can be an indication that a port scan is made, or that a service running on this host has crashed)
+ info: average TCP RESETS this host is sending, over the last 10 seconds (this can be an indication that a port scan is made, or that a service running on this host has crashed; clear notification for this alarm will not be sent)
to: sysadmin
# -----------------------------------------------------------------------------
@@ -47,8 +47,8 @@ options: no-clear-notification
lookup: average -10s unaligned absolute of AttemptFails
units: tcp resets/s
every: 10s
- warn: $this > ((($1m_ipv4_tcp_resets_received < 5)?(5):($1m_ipv4_tcp_resets_received)) * (($status >= $WARNING) ? (1) : (4)))
+ warn: $this > ((($1m_ipv4_tcp_resets_received < 5)?(5):($1m_ipv4_tcp_resets_received)) * (($status >= $WARNING) ? (1) : (10)))
delay: up 0 down 60m multiplier 1.2 max 2h
options: no-clear-notification
- info: average TCP RESETS this host is receiving, over the last 10 seconds (this can be an indication that a service this host needs, has crashed)
+ info: average TCP RESETS this host is receiving, over the last 10 seconds (this can be an indication that a service this host needs, has crashed; clear notification for this alarm will not be sent)
to: sysadmin
diff --git a/conf.d/health.d/web_log.conf b/conf.d/health.d/web_log.conf
index c668959f..d1808817 100644
--- a/conf.d/health.d/web_log.conf
+++ b/conf.d/health.d/web_log.conf
@@ -156,6 +156,7 @@ families: *
delay: down 15m multiplier 1.5 max 1h
options: no-clear-notification
info: the percentage of successful web requests over the last 5 minutes, \
- compared with the previous 5 minutes
+ compared with the previous 5 minutes \
+ (clear notification for this alarm will not be sent)
to: webmaster
diff --git a/conf.d/health.d/zfs.conf b/conf.d/health.d/zfs.conf
new file mode 100644
index 00000000..af73824e
--- /dev/null
+++ b/conf.d/health.d/zfs.conf
@@ -0,0 +1,10 @@
+
+ alarm: zfs_memory_throttle
+ on: zfs.memory_ops
+ lookup: sum -10m unaligned absolute of throttled
+ units: events
+ every: 1m
+ warn: $this > 0
+ delay: down 1h multiplier 1.5 max 2h
+ info: the number of times ZFS had to limit the ARC growth in the last 10 minutes
+ to: sysadmin
diff --git a/conf.d/health_alarm_notify.conf b/conf.d/health_alarm_notify.conf
index 23776b96..4d8444ed 100644
--- a/conf.d/health_alarm_notify.conf
+++ b/conf.d/health_alarm_notify.conf
@@ -303,6 +303,68 @@ SEND_PD="YES"
DEFAULT_RECIPIENT_PD=""
+#------------------------------------------------------------------------------
+# custom notifications
+#
+
+# enable/disable sending custom notifications
+SEND_CUSTOM="YES"
+
+# if a role's recipients are not configured, use the following.
+# (empty = do not send a notification for unconfigured roles)
+DEFAULT_RECIPIENT_CUSTOM=""
+
+# The custom_sender() is a custom function to do whatever you need to do
+custom_sender() {
+ # variables you can use:
+ # ${host} the host generated this event
+ # ${url_host} same as ${host} but URL encoded
+ # ${unique_id} the unique id of this event
+ # ${alarm_id} the unique id of the alarm that generated this event
+ # ${event_id} the incremental id of the event, for this alarm id
+ # ${when} the timestamp this event occurred
+ # ${name} the name of the alarm, as given in netdata health.d entries
+ # ${url_name} same as ${name} but URL encoded
+ # ${chart} the name of the chart (type.id)
+ # ${url_chart} same as ${chart} but URL encoded
+ # ${family} the family of the chart
+ # ${url_family} same as ${family} but URL encoded
+ # ${status} the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+ # ${old_status} the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+ # ${value} the current value of the alarm
+ # ${old_value} the previous value of the alarm
+ # ${src} the line number and file the alarm has been configured
+ # ${duration} the duration in seconds of the previous alarm state
+ # ${duration_txt} same as ${duration} for humans
+ # ${non_clear_duration} the total duration in seconds this is/was non-clear
+ # ${non_clear_duration_txt} same as ${non_clear_duration} for humans
+ # ${units} the units of the value
+ # ${info} a short description of the alarm
+ # ${value_string} friendly value (with units)
+ # ${old_value_string} friendly old value (with units)
+ # ${image} the URL of an image to represent the status of the alarm
+ # ${color} a color in #AABBCC format for the alarm
+ # ${goto_url} the URL the user can click to see the netdata dashboard
+
+ # these are more human friendly:
+ # ${alarm} like "name = value units"
+ # ${status_message} like "needs attention", "recovered", "is critical"
+ # ${severity} like "Escalated to CRITICAL", "Recovered from WARNING"
+ # ${raised_for} like "(alarm was raised for 10 minutes)"
+
+ # example human readable SMS
+ local msg="${host} ${status_message}: ${alarm} ${raised_for}"
+
+ # limit it to 160 characters and encode it for use in a URL
+ urlencode "${msg:0:160}" >/dev/null; msg="${REPLY}"
+
+ # a space separated list of the recipients to send alarms to
+ to="${1}"
+
+ info "not sending custom notification to ${to}, for ${status} of '${host}.${chart}.${name}' - custom_sender() is not configured."
+}
+
+
###############################################################################
# RECIPIENTS PER ROLE
@@ -330,6 +392,8 @@ role_recipients_messagebird[sysadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}"
role_recipients_pd[sysadmin]="${DEFAULT_RECIPIENT_PD}"
+role_recipients_custom[sysadmin]="${DEFAULT_RECIPIENT_CUSTOM}"
+
# -----------------------------------------------------------------------------
# DNS related alarms
@@ -353,6 +417,8 @@ role_recipients_messagebird[domainadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}"
role_recipients_pd[domainadmin]="${DEFAULT_RECIPIENT_PD}"
+role_recipients_custom[domainadmin]="${DEFAULT_RECIPIENT_CUSTOM}"
+
# -----------------------------------------------------------------------------
# database servers alarms
# mysql, redis, memcached, postgres, etc
@@ -377,6 +443,8 @@ role_recipients_messagebird[dba]="${DEFAULT_RECIPIENT_MESSAGEBIRD}"
role_recipients_pd[dba]="${DEFAULT_RECIPIENT_PD}"
+role_recipients_custom[dba]="${DEFAULT_RECIPIENT_CUSTOM}"
+
# -----------------------------------------------------------------------------
# web servers alarms
# apache, nginx, lighttpd, etc
@@ -401,6 +469,8 @@ role_recipients_messagebird[webmaster]="${DEFAULT_RECIPIENT_MESSAGEBIRD}"
role_recipients_pd[webmaster]="${DEFAULT_RECIPIENT_PD}"
+role_recipients_custom[webmaster]="${DEFAULT_RECIPIENT_CUSTOM}"
+
# -----------------------------------------------------------------------------
# proxy servers alarms
# squid, etc
@@ -424,3 +494,5 @@ role_recipients_twilio[proxyadmin]="${DEFAULT_RECIPIENT_TWILIO}"
role_recipients_messagebird[proxyadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}"
role_recipients_pd[proxyadmin]="${DEFAULT_RECIPIENT_PD}"
+
+role_recipients_custom[proxyadmin]="${DEFAULT_RECIPIENT_CUSTOM}"
diff --git a/conf.d/node.d/fronius.conf.md b/conf.d/node.d/fronius.conf.md
new file mode 100644
index 00000000..c80afa0b
--- /dev/null
+++ b/conf.d/node.d/fronius.conf.md
@@ -0,0 +1,67 @@
+[Fronius Symo 8.2](https://www.fronius.com/en/photovoltaics/products/all-products/inverters/fronius-symo/fronius-symo-8-2-3-m)
+
+The plugin has been tested with a single inverter, namely Fronius Symo 8.2-3-M:
+
+- Datalogger version: 240.162630
+- Software version: 3.7.4-6
+- Hardware version: 2.4D
+
+Other products and versions may work, but without any guarantees.
+
+Example netdata configuration for node.d/fronius.conf. Copy this section to fronius.conf and change name/ip.
+The module supports any number of servers. Sometimes there is a lag when collecting every 3 seconds, so 5 should be okay too. You can modify this per server.
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "Solar",
+ "hostname": "symo.ip.or.dns",
+ "update_every": 5,
+ "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi"
+ }
+ ]
+}
+```
+
+The output of /solar_api/v1/GetPowerFlowRealtimeData.fcgi looks like this:
+```json
+{
+ "Head" : {
+ "RequestArguments" : {},
+ "Status" : {
+ "Code" : 0,
+ "Reason" : "",
+ "UserMessage" : ""
+ },
+ "Timestamp" : "2017-07-05T12:35:12+02:00"
+ },
+ "Body" : {
+ "Data" : {
+ "Site" : {
+ "Mode" : "meter",
+ "P_Grid" : -6834.549847,
+ "P_Load" : -1271.450153,
+ "P_Akku" : null,
+ "P_PV" : 8106,
+ "rel_SelfConsumption" : 15.685297,
+ "rel_Autonomy" : 100,
+ "E_Day" : 35020,
+ "E_Year" : 5826076,
+ "E_Total" : 14788870,
+ "Meter_Location" : "grid"
+ },
+ "Inverters" : {
+ "1" : {
+ "DT" : 123,
+ "P" : 8106,
+ "E_Day" : 35020,
+ "E_Year" : 5826076,
+ "E_Total" : 14788870
+ }
+ }
+ }
+ }
+}
+```
diff --git a/conf.d/python.d.conf b/conf.d/python.d.conf
index 9ed346cd..0a37e40a 100644
--- a/conf.d/python.d.conf
+++ b/conf.d/python.d.conf
@@ -26,11 +26,13 @@ log_interval: 3600
# If "default_run" = "no" the default for all modules is disabled (no).
# Setting any of these to "yes" will enable it.
-# apache_cache: yes
+# apache_cache has been replaced by web_log
+apache_cache: no
# apache: yes
# bind_rndc: yes
# cpufreq: yes
# cpuidle: yes
+# dns_query_time: yes
# dovecot: yes
# elasticsearch: yes
@@ -43,7 +45,7 @@ example: no
# gunicorn_log has been replaced by web_log
gunicorn_log: no
-
+go_expvar: no
# haproxy: yes
# hddtemp: yes
# ipfs: yes
@@ -52,6 +54,7 @@ gunicorn_log: no
# memcached: yes
# mysql: yes
# nginx: yes
+# nsd: yes
# nginx_log has been replaced by web_log
nginx_log: no
@@ -60,9 +63,11 @@ nginx_log: no
# phpfpm: yes
# postfix: yes
# postgres: yes
+# rabbitmq: yes
# redis: yes
# retroshare: yes
# sensors: yes
+# samba: yes
# smartd_log: yes
# squid: yes
# tomcat: yes
diff --git a/conf.d/python.d/dns_query_time.conf b/conf.d/python.d/dns_query_time.conf
new file mode 100644
index 00000000..f4d4dbf9
--- /dev/null
+++ b/conf.d/python.d/dns_query_time.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for dns_query_time
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, dns_query_time also supports the following:
+#
+# dns_servers: 'dns servers' # list of dns servers to query
+# domains: 'domains' # list of domains
+# aggregate: yes/no # Default: yes. Aggregate all servers in one chart or not
+# response_timeout: 4 # Defalt: 4. Dns query response timeout (query = -100 if response time > response_time)
+#
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+#
+#aggregate: yes
+#dns_servers: '8.8.8.8 8.8.4.4'
+#domains: 'python.org distrowatch.com linuxmint.com linux.com rsyslog.com liblognorm.com archlinux.org cisco.com debian.org kernel.org gns3.com opera.com github.com youtube.com amazon.co.uk kde.org netdata.firehol.org ubuntu.com redhat.com opensuse.org wireshark.org vmware.com microsoft.com elastic.co'
diff --git a/conf.d/python.d/elasticsearch.conf b/conf.d/python.d/elasticsearch.conf
index f98aaece..7c35df22 100644
--- a/conf.d/python.d/elasticsearch.conf
+++ b/conf.d/python.d/elasticsearch.conf
@@ -61,19 +61,16 @@
# cluster_health: False/True # Calls to cluster health elasticsearch API. Enabled by default.
# cluster_stats: False/True # Calls to cluster stats elasticsearch API. Enabled by default.
#
-# ----------------------------------------------------------------------
-# IMPORTANT Information
-#
-# Module uses python `requests` package
#
-# You need to install it manually. (python-requests or python3-requests depending on the version of python).
+# if the URL is password protected, the following are supported:
#
+# user: 'username'
+# pass: 'password'
#
+# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
# only one of them will run (they have the same name)
#
-#local:
-# host: '127.0.0.1'
-# port: '9200'
-# cluster_health: True
-# cluster_stats: True
+local:
+ host: '127.0.0.1'
+ port: '9200'
diff --git a/conf.d/python.d/fail2ban.conf b/conf.d/python.d/fail2ban.conf
index d9664e35..76277108 100644
--- a/conf.d/python.d/fail2ban.conf
+++ b/conf.d/python.d/fail2ban.conf
@@ -58,15 +58,6 @@
#
# log_path: 'path to fail2ban.log' # Default: '/var/log/fail2ban.log'
# conf_path: 'path to jail.local/jail.conf' # Default: '/etc/fail2ban/jail.local'
-# conf_dir: 'path to jail.d/' # Default: '' empty
+# conf_dir: 'path to jail.d/' # Default: '/etc/fail2ban/jail.d/'
# exclude: 'jails you want to exclude from autodetection' # Default: '[]' empty list
#------------------------------------------------------------------------------------------------------------------
-# ------------------------------------------------------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-local:
- log_path: '/var/log/fail2ban.log'
- conf_path: '/etc/fail2ban/jail.local'
-# conf_dir: '/etc/fail2ban/jail.d/'
-# exclude: 'dropbear apache'
diff --git a/conf.d/python.d/go_expvar.conf b/conf.d/python.d/go_expvar.conf
new file mode 100644
index 00000000..5be4890d
--- /dev/null
+++ b/conf.d/python.d/go_expvar.conf
@@ -0,0 +1,106 @@
+# netdata python.d.plugin configuration for go_expvar
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: my_name # the JOB's name as it will appear at the
+# # dashboard. If name: is not supplied the
+# # job_name: will be used (use _ for spaces)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'http://127.0.0.1/debug/vars' # the URL of the expvar endpoint
+# ss_cert: # ignore HTTPS self-signed certificate
+# proxy: # use HTTP proxy
+#
+# As the plugin cannot possibly know the port your application listens on, there is no default value. Please include
+# the whole path of the endpoint, as the expvar handler can be installed in a non-standard location.
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# collect_memstats: true # enables charts for Go runtime's memory statistics
+# extra_charts: {} # defines extra data/charts to monitor, please see the example below
+#
+# If collect_memstats is disabled and no extra charts are defined, this module will disable itself, as it has no data to
+# collect.
+#
+# Please visit the module wiki page for more information on how to use the extra_charts variable:
+#
+# https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications#monitoring-custom-vars-with-go_expvar
+#
+# Configuration example
+# ---------------------
+
+#app1:
+# name : 'app1'
+# url : 'http://127.0.0.1:8080/debug/vars'
+# collect_memstats: true
+# extra_charts:
+# - id: "runtime_goroutines"
+# options:
+# name: num_goroutines
+# title: "runtime: number of goroutines"
+# units: goroutines
+# family: runtime
+# context: expvar.runtime.goroutines
+# chart_type: line
+# lines:
+# - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
+# - id: "foo_counters"
+# options:
+# name: counters
+# title: "some random counters"
+# units: awesomeness
+# family: counters
+# context: expvar.foo.counters
+# chart_type: line
+# lines:
+# - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
+# - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
+
diff --git a/conf.d/python.d/isc_dhcpd.conf b/conf.d/python.d/isc_dhcpd.conf
index 7c8fe3ce..938ca6e7 100644
--- a/conf.d/python.d/isc_dhcpd.conf
+++ b/conf.d/python.d/isc_dhcpd.conf
@@ -56,8 +56,11 @@
#
# Additionally to the above, isc_dhcpd supports the following:
#
-# leases_path: 'PATH' # the path to dhcpd.leases file
-# pools: 'dhcpd pools list' # Pools in CIDR format
+# leases_path: 'PATH' # the path to dhcpd.leases file
+# pools:
+# office: '192.168.2.0/24' # name(dimension): pool in CIDR format
+# wifi: '192.168.3.0/24' # name(dimension): pool in CIDR format
+# 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
#
#-----------------------------------------------------------------------
# IMPORTANT notes
@@ -75,4 +78,6 @@
#
#leases:
# leases_path : '/var/lib/dhcp/dhcpd.leases'
-# pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
+# pools:
+# office: '192.168.2.0/24'
+# wifi: '192.168.3.0/24'
diff --git a/conf.d/python.d/postgres.conf b/conf.d/python.d/postgres.conf
index d4d2bafc..12dddae6 100644
--- a/conf.d/python.d/postgres.conf
+++ b/conf.d/python.d/postgres.conf
@@ -68,6 +68,7 @@
#
# table_stats : false
# index_stats : false
+# database_poll : 'dbase_name1 dbase_name2' # poll only specified databases (all other will be excluded from charts)
#
# Postfix permissions are configured at its pg_hba.conf file. You can
# "trust" local clients to allow netdata to connect, or you can create
diff --git a/conf.d/python.d/rabbitmq.conf b/conf.d/python.d/rabbitmq.conf
new file mode 100644
index 00000000..eccf65df
--- /dev/null
+++ b/conf.d/python.d/rabbitmq.conf
@@ -0,0 +1,75 @@
+# netdata python.d.plugin configuration for rabbitmq
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, rabbitmq plugin also supports the following:
+#
+# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1
+# port: 'port' # Rabbitmq port. Default: 15672
+# scheme: 'scheme' # http or https. Default: http
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ host: '127.0.0.1'
+ user: 'guest'
+ pass: 'guest'
diff --git a/conf.d/python.d/samba.conf b/conf.d/python.d/samba.conf
new file mode 100644
index 00000000..865281cd
--- /dev/null
+++ b/conf.d/python.d/samba.conf
@@ -0,0 +1,58 @@
+# netdata python.d.plugin configuration for samba
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+#
+
diff --git a/conf.d/python.d/smartd_log.conf b/conf.d/python.d/smartd_log.conf
index e16454df..8764ffd3 100644
--- a/conf.d/python.d/smartd_log.conf
+++ b/conf.d/python.d/smartd_log.conf
@@ -58,11 +58,11 @@
#
# log_path: '/path/to/smartdlogs' # path to smartd log files. Default is /var/log/smartd
# raw_values: no # raw or normalized values on charts. Default is normalized.
-# smart_attributes: '1 2 3 4 44' # add additional smart attributes charts. Default are ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200'].
+# smart_attributes: '1 2 3 4 44' # smart attributes charts. Default are ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200'].
#
# ----------------------------------------------------------------------
# Additional information
-# Plugin reads smartd log files (-A option).
+# Plugin reads smartd log files (-A option).
# You need to add (man smartd) to /etc/default/smartmontools '-i 600 -A /var/log/smartd/' to pass additional options to smartd on startup
# Then restart smartd service and check /path/log/smartdlogs
# ls /var/log/smartd/
@@ -74,12 +74,12 @@
# RAW vs NORMALIZED values
# "Normalized value", commonly referred to as just "value". This is a most universal measurement, on the scale from 0 (bad) to some maximum (good) value.
# Maximum values are typically 100, 200 or 253. Rule of thumb is: high values are good, low values are bad.
-#
+#
# "Raw value" - the value of the attribute as it is tracked by the device, before any normalization takes place.
# Some raw numbers provide valuable insight when properly interpreted. These cases will be discussed later on.
# Raw values are typically listed in hexadecimal numbers. The raw value has different structure for different vendors and is often not meaningful as a decimal number.
#
#
# JOB configuration
-#
+#
log_path: '/var/log/smartd'
diff --git a/conf.d/python.d/web_log.conf b/conf.d/python.d/web_log.conf
index 06656285..e51b565d 100644
--- a/conf.d/python.d/web_log.conf
+++ b/conf.d/python.d/web_log.conf
@@ -60,13 +60,21 @@
# Additionally to the above, web_log also supports the following:
#
# path: 'PATH' # the path to web server log file
-# detailed_response_codes: yes/no # Default: yes. Additional chart where response codes are not grouped
-# detailed_response_aggregate: yes/no # Default: yes. Not aggregated detailed response codes charts
-# all_time : yes/no # Default: yes. All time unique client IPs chart (50000 addresses ~ 400KB)
+# path: 'PATH[0-9]*[0-9]' # log files with date suffix are also supported
+# detailed_response_codes: yes/no # default: yes. Additional chart where response codes are not grouped
+# detailed_response_aggregate: yes/no # default: yes. Not aggregated detailed response codes charts
+# all_time : yes/no # default: yes. All time unique client IPs chart (50000 addresses ~ 400KB)
+# filter: # filter with regex
+# include: 'REGEX' # only those rows that matches the regex
+# exclude: 'REGEX' # all rows except those that matches the regex
# categories: # requests per url chart configuration
# cacti: 'cacti.*' # name(dimension): REGEX to match
# observium: 'observium.*' # name(dimension): REGEX to match
# stub_status: 'stub_status' # name(dimension): REGEX to match
+# user_defined: # requests per pattern in <user_defined> field (custom_log_format)
+# cacti: 'cacti.*' # name(dimension): REGEX to match
+# observium: 'observium.*' # name(dimension): REGEX to match
+# stub_status: 'stub_status' # name(dimension): REGEX to match
# custom_log_format: # define a custom log format
# pattern: '(?P<address>[\da-f.:]+) -.*?"(?P<method>[A-Z]+) (?P<url>.*?)" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d\.\d+) '
# time_multiplier: 1000000 # type <int> - convert time to microseconds
@@ -83,7 +91,7 @@
# nginx:
# log_format netdata '$remote_addr - $remote_user [$time_local] '
# '"$request" $status $body_bytes_sent '
-# '$request_length $request_time '
+# '$request_length $request_time $upstream_response_time '
# '"$http_referer" "$http_user_agent"';
# access_log /var/log/nginx/access.log netdata;
#
@@ -145,3 +153,35 @@ gunicorn_log:
gunicorn_log2:
name: 'gunicorn'
path: '/var/log/gunicorn/gunicorn-access.log'
+
+# -------------------------------------------
+# Apache Cache
+apache_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/apache/cache.log'
+
+apache2_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/apache2/cache.log'
+
+httpd_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/httpd/cache.log'
+
+# -------------------------------------------
+# Squid
+
+# debian/ubuntu
+squid_log1:
+ name: 'squid'
+ type: 'squid'
+ path: '/var/log/squid3/access.log'
+
+#gentoo
+squid_log2:
+ name: 'squid'
+ type: 'squid'
+ path: '/var/log/squid/access.log'
diff --git a/conf.d/statsd.d/example.conf b/conf.d/statsd.d/example.conf
new file mode 100644
index 00000000..0af9dd27
--- /dev/null
+++ b/conf.d/statsd.d/example.conf
@@ -0,0 +1,65 @@
+# statsd synthetic charts configuration
+
+# You can add many .conf files, one for each of your apps
+
+# start a new app - you can add many apps in the same file
+[app]
+ # give a name for this app
+ # this controls the main menu on the dashboard
+ # and will be the prefix for all charts of the app
+ name = myapp
+
+ # match all the metrics of the app
+ metrics = myapp.*
+
+ # shall private charts of these metrics be created?
+ private charts = no
+
+ # shall gaps be shown when metrics are not collected?
+ gaps when not collected = no
+
+ # the memory mode for the charts of this app: none|map|save
+ # the default is to use the global memory mode
+ #memory mode = ram
+
+ # the history size for the charts of this app, in seconds
+ # the default is to use the global history
+ #history = 3600
+
+
+
+# create a chart
+# this is its id - the chart will be named myapp.mychart
+[mychart]
+ # a name for the chart, similar to the id (2 names for each chart)
+ name = mychart
+
+ # the chart title
+ title = my chart title
+
+ # the submenu of the dashboard
+ family = my family
+
+ # the context for alarm templates
+ context = chart.context
+
+ # the units of the chart
+ units = tests/s
+
+ # the sorting priority of the chart on the dashboard
+ priority = 91000
+
+ # the type of chart to create: line | area | stacked
+ type = area
+
+ # one or more dimensions for the chart
+ # type = events | last | min | max | sum | average | percentile | median | stddev
+ # events = the number of events for this metric
+ # last = the last value collected
+ # all the others are only valid for histograms and timers
+ dimension = myapp.metric1 avg average 1 1
+ dimension = myapp.metric1 lower min 1 1
+ dimension = myapp.metric1 upper max 1 1
+ dimension = myapp.metric2 other last 1 1
+
+# You can add as many charts as needed
diff --git a/conf.d/stream.conf b/conf.d/stream.conf
index 0ebdccb8..0ae5ba67 100644
--- a/conf.d/stream.conf
+++ b/conf.d/stream.conf
@@ -5,59 +5,64 @@
# number of hosts.
#
# You can generate API keys, with the linux command: uuidgen
-#
+
+
# -----------------------------------------------------------------------------
# 1. ON SLAVE NETDATA - THE ONE THAT WILL BE SENDING METRICS
[stream]
# Enable this on slaves, to have them send metrics.
- enabled = no
-
- # The destination to send metrics to.
- # A space separated list of:
- # [PROTOCOL:]HOST[%INTERFACE][:PORT]
- # The first available will get the metrics.
- # PROTOCOL = tcp or udp (only tcp is supported by masters)
- # HOST = an IPv4, IPv6 IP, or a hostname.
- # IPv6 IPs should be given with brackets [ip:address]
- # INTERFACE = the network interface to use
- # PORT = the port number or service name (/etc/services)
- # This communication is not HTTP (cannot be proxied by web proxies).
- destination =
-
- # The API_KEY to use (as the sender)
- api key =
+ enabled = no
+
+ # Where is the receiving netdata?
+ # A space separated list of:
+ #
+ # [PROTOCOL:]HOST[%INTERFACE][:PORT]
+ #
+ # If many are given, the first available will get the metrics.
+ #
+ # PROTOCOL = tcp or udp (only tcp is supported by masters)
+ # HOST = an IPv4, IPv6 IP, or a hostname.
+ # IPv6 IPs should be given with brackets [ip:address]
+ # INTERFACE = the network interface to use
+ # PORT = the port number or service name (/etc/services)
+ #
+ # This communication is not HTTP (cannot be proxied by web proxies).
+ destination =
+
+ # The API_KEY to use (as the sender)
+ api key =
# The timeout to connect and send metrics
- timeout seconds = 60
+ timeout seconds = 60
- # If the destination line above does specify a port, use this
- default port = 19999
+ # If the destination line above does not specify a port, use this
+ default port = 19999
- # The buffer to use for sending metrics.
- # 1MB by default is good for 2-3 seconds of data, so increase this
- # if you expect latencies.
- buffer size bytes = 1048576
+ # The buffer to use for sending metrics.
+ # 1MB is good for 10-20 seconds of data, so increase this
+ # if you expect latencies.
+ buffer size bytes = 1048576
- # If the connection fails, or it disconnects,
- # retry after that many seconds.
- reconnect delay seconds = 5
+ # If the connection fails, or it disconnects,
+ # retry after that many seconds.
+ reconnect delay seconds = 5
- # Attempt to sync the clock the of the master with the clock of the
- # slave for that many iterations, when starting.
- initial clock resync iterations = 60
+ # Attempt to sync the clock the of the master with the clock of the
+ # slave for that many iterations, when starting.
+ initial clock resync iterations = 60
# -----------------------------------------------------------------------------
# 2. ON MASTER NETDATA - THE ONE THAT WILL BE RECEIVING METRICS
-#
+
# You can have one API key per slave, or the same API key for all slaves.
#
-# All options below are used in this order:
+# netdata searches for options in this order:
#
-# a) MACHINE_GUID (settings for each machine)
-# b) API_KEY (settings for the API key)
-# c) this netdata defaults (as in netdata.conf)
+# a) [MACHINE_GUID] section (settings for each machine)
+# b) [API_KEY] section (settings for the API key)
+# c) master netdata settings (netdata.conf)
#
# You can combine the above (the more specific setting will be used).
@@ -68,13 +73,13 @@
# Default settings for the API key
# You can disable the API key, by setting this to: no
- # The default (for unknown API keys) is also: no
+ # The default (for unknown API keys) is: no
enabled = no
# The default history in entries, for all hosts using this API key.
# You can also set it per host below.
# If you don't set it here, the history size of the central netdata
- # will be used
+ # will be used.
default history = 3600
# The default memory mode to be used for all hosts using this API key.
@@ -84,11 +89,11 @@
# save save on exit, load on start
# map like swap (continuously syncing to disks)
# ram keep it in RAM, don't touch the disk
- # none no database (passing through this netdata)
+ # none no database at all (use this on headless proxies)
default memory mode = ram
# Shall we enable health monitoring for the hosts using this API key?
- # 3 values:
+ # 3 possible values:
# yes enable alarms
# no do not enable alarms
# auto enable alarms, only when the sending netdata is connected
@@ -107,16 +112,18 @@
# -----------------------------------------------------------------------------
-# 3. ON MASTER NETDATA - THE ONE THAT WILL BE RECEIVING METRICS
-#
-# THIS IS OPTIONAL - YOU DON'T NEED IT BY DEFAULT
-# It only exists to give you finer control of the master settings for each
+# 3. PER SENDING HOST SETTINGS, ON MASTER NETDATA
+# THIS IS OPTIONAL - YOU DON'T NEED IT
+
+# This section exists to give you finer control of the master settings for each
# slave host, when the same API key is used by many netdata slaves / proxies.
#
# Each netdata has a unique GUID - generated the first time netdata starts.
# You can find it at /var/lib/netdata/registry/netdata.public.unique.id
+# (at the slave).
+#
# The host sending data will have one. If the host is not ephemeral,
-# you can give settings for each specific host here.
+# you can give settings for each sending host here.
[MACHINE_GUID]
# enable this host: yes | no
diff --git a/config.guess b/config.guess
index d622a44e..b79252d6 100755
--- a/config.guess
+++ b/config.guess
@@ -1,14 +1,12 @@
#! /bin/sh
# Attempt to guess a canonical system name.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
-# 2011, 2012 Free Software Foundation, Inc.
+# Copyright 1992-2013 Free Software Foundation, Inc.
-timestamp='2012-02-10'
+timestamp='2013-06-10'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
@@ -22,19 +20,17 @@ timestamp='2012-02-10'
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Originally written by Per Bothner. Please send patches (context
-# diff format) to <config-patches@gnu.org> and include a ChangeLog
-# entry.
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
#
-# This script attempts to guess a canonical system name similar to
-# config.sub. If it succeeds, it prints the system name on stdout, and
-# exits with 0. Otherwise, it exits with 1.
+# Originally written by Per Bothner.
#
# You can get the latest version of this script from:
# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+#
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
+
me=`echo "$0" | sed -e 's,.*/,,'`
@@ -54,9 +50,7 @@ version="\
GNU config.guess ($timestamp)
Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
-2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
-Free Software Foundation, Inc.
+Copyright 1992-2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -138,6 +132,27 @@ UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+case "${UNAME_SYSTEM}" in
+Linux|GNU|GNU/*)
+ # If the system lacks a compiler, then just pick glibc.
+ # We could probably try harder.
+ LIBC=gnu
+
+ eval $set_cc_for_build
+ cat <<-EOF > $dummy.c
+ #include <features.h>
+ #if defined(__UCLIBC__)
+ LIBC=uclibc
+ #elif defined(__dietlibc__)
+ LIBC=dietlibc
+ #else
+ LIBC=gnu
+ #endif
+ EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
+ ;;
+esac
+
# Note: order is significant - the case branches are not exclusive.
case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
@@ -200,6 +215,10 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
echo "${machine}-${os}${release}"
exit ;;
+ *:Bitrig:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+ exit ;;
*:OpenBSD:*:*)
UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
@@ -302,7 +321,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
echo arm-acorn-riscix${UNAME_RELEASE}
exit ;;
- arm:riscos:*:*|arm:RISCOS:*:*)
+ arm*:riscos:*:*|arm*:RISCOS:*:*)
echo arm-unknown-riscos
exit ;;
SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
@@ -801,6 +820,9 @@ EOF
i*:CYGWIN*:*)
echo ${UNAME_MACHINE}-pc-cygwin
exit ;;
+ *:MINGW64*:*)
+ echo ${UNAME_MACHINE}-pc-mingw64
+ exit ;;
*:MINGW*:*)
echo ${UNAME_MACHINE}-pc-mingw32
exit ;;
@@ -852,21 +874,21 @@ EOF
exit ;;
*:GNU:*:*)
# the GNU system
- echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
exit ;;
*:GNU/*:*:*)
# other systems with GNU libc and userland
- echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
+ echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
exit ;;
i*86:Minix:*:*)
echo ${UNAME_MACHINE}-pc-minix
exit ;;
aarch64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
aarch64_be:Linux:*:*)
UNAME_MACHINE=aarch64_be
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
alpha:Linux:*:*)
case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
@@ -879,59 +901,54 @@ EOF
EV68*) UNAME_MACHINE=alphaev68 ;;
esac
objdump --private-headers /bin/sh | grep -q ld.so.1
- if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
- echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+ if test "$?" = 0 ; then LIBC="gnulibc1" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ arc:Linux:*:* | arceb:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
arm*:Linux:*:*)
eval $set_cc_for_build
if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
| grep -q __ARM_EABI__
then
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
else
if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
| grep -q __ARM_PCS_VFP
then
- echo ${UNAME_MACHINE}-unknown-linux-gnueabi
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi
else
- echo ${UNAME_MACHINE}-unknown-linux-gnueabihf
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf
fi
fi
exit ;;
avr32*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
cris:Linux:*:*)
- echo ${UNAME_MACHINE}-axis-linux-gnu
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
exit ;;
crisv32:Linux:*:*)
- echo ${UNAME_MACHINE}-axis-linux-gnu
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
exit ;;
frv:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
hexagon:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
i*86:Linux:*:*)
- LIBC=gnu
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #ifdef __dietlibc__
- LIBC=dietlibc
- #endif
-EOF
- eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
- echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
+ echo ${UNAME_MACHINE}-pc-linux-${LIBC}
exit ;;
ia64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
m32r*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
m68*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
mips:Linux:*:* | mips64:Linux:*:*)
eval $set_cc_for_build
@@ -950,54 +967,63 @@ EOF
#endif
EOF
eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
;;
+ or1k:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
or32:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
padre:Linux:*:*)
- echo sparc-unknown-linux-gnu
+ echo sparc-unknown-linux-${LIBC}
exit ;;
parisc64:Linux:*:* | hppa64:Linux:*:*)
- echo hppa64-unknown-linux-gnu
+ echo hppa64-unknown-linux-${LIBC}
exit ;;
parisc:Linux:*:* | hppa:Linux:*:*)
# Look for CPU level
case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
- PA7*) echo hppa1.1-unknown-linux-gnu ;;
- PA8*) echo hppa2.0-unknown-linux-gnu ;;
- *) echo hppa-unknown-linux-gnu ;;
+ PA7*) echo hppa1.1-unknown-linux-${LIBC} ;;
+ PA8*) echo hppa2.0-unknown-linux-${LIBC} ;;
+ *) echo hppa-unknown-linux-${LIBC} ;;
esac
exit ;;
ppc64:Linux:*:*)
- echo powerpc64-unknown-linux-gnu
+ echo powerpc64-unknown-linux-${LIBC}
exit ;;
ppc:Linux:*:*)
- echo powerpc-unknown-linux-gnu
+ echo powerpc-unknown-linux-${LIBC}
+ exit ;;
+ ppc64le:Linux:*:*)
+ echo powerpc64le-unknown-linux-${LIBC}
+ exit ;;
+ ppcle:Linux:*:*)
+ echo powerpcle-unknown-linux-${LIBC}
exit ;;
s390:Linux:*:* | s390x:Linux:*:*)
- echo ${UNAME_MACHINE}-ibm-linux
+ echo ${UNAME_MACHINE}-ibm-linux-${LIBC}
exit ;;
sh64*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
sh*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
sparc:Linux:*:* | sparc64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
tile*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
vax:Linux:*:*)
- echo ${UNAME_MACHINE}-dec-linux-gnu
+ echo ${UNAME_MACHINE}-dec-linux-${LIBC}
exit ;;
x86_64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
xtensa*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
i*86:DYNIX/ptx:4*:*)
# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
@@ -1201,6 +1227,9 @@ EOF
BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
echo i586-pc-haiku
exit ;;
+ x86_64:Haiku:*:*)
+ echo x86_64-unknown-haiku
+ exit ;;
SX-4:SUPER-UX:*:*)
echo sx4-nec-superux${UNAME_RELEASE}
exit ;;
@@ -1227,19 +1256,21 @@ EOF
exit ;;
*:Darwin:*:*)
UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
- case $UNAME_PROCESSOR in
- i386)
- eval $set_cc_for_build
- if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
- if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
- (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
- grep IS_64BIT_ARCH >/dev/null
- then
- UNAME_PROCESSOR="x86_64"
- fi
- fi ;;
- unknown) UNAME_PROCESSOR=powerpc ;;
- esac
+ eval $set_cc_for_build
+ if test "$UNAME_PROCESSOR" = unknown ; then
+ UNAME_PROCESSOR=powerpc
+ fi
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ case $UNAME_PROCESSOR in
+ i386) UNAME_PROCESSOR=x86_64 ;;
+ powerpc) UNAME_PROCESSOR=powerpc64 ;;
+ esac
+ fi
+ fi
echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
exit ;;
*:procnto*:*:* | *:QNX:[0123456789]*:*)
@@ -1256,7 +1287,7 @@ EOF
NEO-?:NONSTOP_KERNEL:*:*)
echo neo-tandem-nsk${UNAME_RELEASE}
exit ;;
- NSE-?:NONSTOP_KERNEL:*:*)
+ NSE-*:NONSTOP_KERNEL:*:*)
echo nse-tandem-nsk${UNAME_RELEASE}
exit ;;
NSR-?:NONSTOP_KERNEL:*:*)
@@ -1330,9 +1361,6 @@ EOF
exit ;;
esac
-#echo '(No uname command or uname output not recognized.)' 1>&2
-#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
-
eval $set_cc_for_build
cat >$dummy.c <<EOF
#ifdef _SEQUENT_
diff --git a/config.h.in b/config.h.in
index 8feeefb5..156f9a72 100644
--- a/config.h.in
+++ b/config.h.in
@@ -93,6 +93,9 @@
/* Have PTHREAD_PRIO_INHERIT. */
#undef HAVE_PTHREAD_PRIO_INHERIT
+/* Define to 1 if you have the `recvmmsg' function. */
+#undef HAVE_RECVMMSG
+
/* Define to 1 if you have the <resolv.h> header file. */
#undef HAVE_RESOLV_H
diff --git a/config.sub b/config.sub
index c894da45..9633db70 100755
--- a/config.sub
+++ b/config.sub
@@ -1,24 +1,18 @@
#! /bin/sh
# Configuration validation subroutine script.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
-# 2011, 2012 Free Software Foundation, Inc.
+# Copyright 1992-2013 Free Software Foundation, Inc.
-timestamp='2012-02-10'
+timestamp='2013-08-10'
-# This file is (in principle) common to ALL GNU software.
-# The presence of a machine in this file suggests that SOME GNU software
-# can handle that machine. It does not imply ALL GNU software can.
-#
-# This file is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
@@ -26,11 +20,12 @@ timestamp='2012-02-10'
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
-# Please send patches to <config-patches@gnu.org>. Submit a context
-# diff and a properly formatted GNU ChangeLog entry.
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
#
# Configuration subroutine to validate and canonicalize a configuration type.
# Supply the specified configuration type as an argument.
@@ -73,9 +68,7 @@ Report bugs and patches to <config-patches@gnu.org>."
version="\
GNU config.sub ($timestamp)
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
-2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
-Free Software Foundation, Inc.
+Copyright 1992-2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -123,7 +116,7 @@ esac
maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
case $maybe_os in
nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
- linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
knetbsd*-gnu* | netbsd*-gnu* | \
kopensolaris*-gnu* | \
storm-chaos* | os2-emx* | rtmk-nova*)
@@ -156,7 +149,7 @@ case $os in
-convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
-c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
-harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
- -apple | -axis | -knuth | -cray | -microblaze)
+ -apple | -axis | -knuth | -cray | -microblaze*)
os=
basic_machine=$1
;;
@@ -225,6 +218,12 @@ case $os in
-isc*)
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
;;
+ -lynx*178)
+ os=-lynxos178
+ ;;
+ -lynx*5)
+ os=-lynxos5
+ ;;
-lynx*)
os=-lynxos
;;
@@ -253,10 +252,12 @@ case $basic_machine in
| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
| am33_2.0 \
- | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
- | be32 | be64 \
+ | arc | arceb \
+ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
+ | avr | avr32 \
+ | be32 | be64 \
| bfin \
- | c4x | clipper \
+ | c4x | c8051 | clipper \
| d10v | d30v | dlx | dsp16xx \
| epiphany \
| fido | fr30 | frv \
@@ -267,7 +268,7 @@ case $basic_machine in
| le32 | le64 \
| lm32 \
| m32c | m32r | m32rle | m68000 | m68k | m88k \
- | maxq | mb | microblaze | mcore | mep | metag \
+ | maxq | mb | microblaze | microblazeel | mcore | mep | metag \
| mips | mipsbe | mipseb | mipsel | mipsle \
| mips16 \
| mips64 | mips64el \
@@ -285,16 +286,17 @@ case $basic_machine in
| mipsisa64r2 | mipsisa64r2el \
| mipsisa64sb1 | mipsisa64sb1el \
| mipsisa64sr71k | mipsisa64sr71kel \
+ | mipsr5900 | mipsr5900el \
| mipstx39 | mipstx39el \
| mn10200 | mn10300 \
| moxie \
| mt \
| msp430 \
| nds32 | nds32le | nds32be \
- | nios | nios2 \
+ | nios | nios2 | nios2eb | nios2el \
| ns16k | ns32k \
| open8 \
- | or32 \
+ | or1k | or32 \
| pdp10 | pdp11 | pj | pjl \
| powerpc | powerpc64 | powerpc64le | powerpcle \
| pyramid \
@@ -364,13 +366,13 @@ case $basic_machine in
| aarch64-* | aarch64_be-* \
| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
- | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
| arm-* | armbe-* | armle-* | armeb-* | armv*-* \
| avr-* | avr32-* \
| be32-* | be64-* \
| bfin-* | bs2000-* \
| c[123]* | c30-* | [cjt]90-* | c4x-* \
- | clipper-* | craynv-* | cydra-* \
+ | c8051-* | clipper-* | craynv-* | cydra-* \
| d10v-* | d30v-* | dlx-* \
| elxsi-* \
| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
@@ -383,7 +385,8 @@ case $basic_machine in
| lm32-* \
| m32c-* | m32r-* | m32rle-* \
| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
- | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
+ | microblaze-* | microblazeel-* \
| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
| mips16-* \
| mips64-* | mips64el-* \
@@ -401,12 +404,13 @@ case $basic_machine in
| mipsisa64r2-* | mipsisa64r2el-* \
| mipsisa64sb1-* | mipsisa64sb1el-* \
| mipsisa64sr71k-* | mipsisa64sr71kel-* \
+ | mipsr5900-* | mipsr5900el-* \
| mipstx39-* | mipstx39el-* \
| mmix-* \
| mt-* \
| msp430-* \
| nds32-* | nds32le-* | nds32be-* \
- | nios-* | nios2-* \
+ | nios-* | nios2-* | nios2eb-* | nios2el-* \
| none-* | np1-* | ns16k-* | ns32k-* \
| open8-* \
| orion-* \
@@ -782,11 +786,15 @@ case $basic_machine in
basic_machine=ns32k-utek
os=-sysv
;;
- microblaze)
+ microblaze*)
basic_machine=microblaze-xilinx
;;
+ mingw64)
+ basic_machine=x86_64-pc
+ os=-mingw64
+ ;;
mingw32)
- basic_machine=i386-pc
+ basic_machine=i686-pc
os=-mingw32
;;
mingw32ce)
@@ -822,7 +830,7 @@ case $basic_machine in
basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
;;
msys)
- basic_machine=i386-pc
+ basic_machine=i686-pc
os=-msys
;;
mvs)
@@ -1013,7 +1021,11 @@ case $basic_machine in
basic_machine=i586-unknown
os=-pw32
;;
- rdos)
+ rdos | rdos64)
+ basic_machine=x86_64-pc
+ os=-rdos
+ ;;
+ rdos32)
basic_machine=i386-pc
os=-rdos
;;
@@ -1340,21 +1352,21 @@ case $os in
-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
| -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
| -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
- | -sym* | -kopensolaris* \
+ | -sym* | -kopensolaris* | -plan9* \
| -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
| -aos* | -aros* \
| -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
| -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
| -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
- | -openbsd* | -solidbsd* \
+ | -bitrig* | -openbsd* | -solidbsd* \
| -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
| -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
| -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
| -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
| -chorusos* | -chorusrdb* | -cegcc* \
| -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
- | -mingw32* | -linux-gnu* | -linux-android* \
- | -linux-newlib* | -linux-uclibc* \
+ | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+ | -linux-newlib* | -linux-musl* | -linux-uclibc* \
| -uxpv* | -beos* | -mpeix* | -udk* \
| -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
| -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
@@ -1486,9 +1498,6 @@ case $os in
-aros*)
os=-aros
;;
- -kaos*)
- os=-kaos
- ;;
-zvmoe)
os=-zvmoe
;;
@@ -1537,6 +1546,12 @@ case $basic_machine in
c4x-* | tic4x-*)
os=-coff
;;
+ c8051-*)
+ os=-elf
+ ;;
+ hexagon-*)
+ os=-elf
+ ;;
tic54x-*)
os=-coff
;;
@@ -1577,6 +1592,9 @@ case $basic_machine in
mips*-*)
os=-elf
;;
+ or1k-*)
+ os=-elf
+ ;;
or32-*)
os=-coff
;;
diff --git a/configs.signatures b/configs.signatures
index 713a1d32..d1308a88 100644
--- a/configs.signatures
+++ b/configs.signatures
@@ -2,6 +2,7 @@ declare -A configs_signatures=(
['0056936ce99788ed9ae1c611c87aa6d8']='apps_groups.conf'
['0102351817595a85d01ebd54a5f2f36b']='python.d/ovpn_status_log.conf'
['01302e01162d465614276de43fad7546']='python.d.conf'
+ ['01c54057e0ca55b5bb49df1662d6b8c3']='python.d/web_log.conf'
['02fa10fa85ab88e9723998de48d1aca0']='health.d/disks.conf'
['036dc300bd7b0e0ef229b9822686d63e']='python.d/isc_dhcpd.conf'
['0388b873d0d7e47c19005b7241db77d8']='python.d/tomcat.conf'
@@ -20,8 +21,10 @@ declare -A configs_signatures=(
['0856124b1eecf01681b4fdf4e21efb3f']='health.d/net.conf'
['08ff5218f938fc48e09e718821169d14']='health.d/redis.conf'
['091572888425bc3b8b559c3c53367ec7']='apps_groups.conf'
+ ['09225283977a6584f8063016091cc4f2']='health.d/tcp_resets.conf'
['09264cec953ae1c4c2985e6446abb386']='health.d/mysql.conf'
['093540fdc2a228e976ce5d48a3adf9fc']='health.d/disks.conf'
+ ['09e030d26be08a13fa3560e47fa27825']='apps_groups.conf'
['0ad10fa896346202aee99384b0ec968d']='health.d/cpu.conf'
['0c5e0fa364d7bdf7c16e8459a0544572']='health.d/netfilter.conf'
['0cd4e1fb57497e4d4c2451a9e58f724d']='python.d/redis.conf'
@@ -29,6 +32,8 @@ declare -A configs_signatures=(
['0dd38dcd2473ddb9f8b1b41147432d10']='health_alarm_notify.conf'
['0e59bc11d0a869ea0247c04c08c8d72e']='python.d/ipfs.conf'
['0ef8af1f358741afa7fd5d0ffabefaac']='charts.d/mysql.conf'
+ ['0f65b08edebedd06e376274021196a6b']='health.d/lighttpd.conf'
+ ['107e6ac69b30fb9837ac64c35f891ec7']='health.d/tcp_resets.conf'
['10c3b525850a1cb9de760a8ee96fbc6e']='charts.d/opensips.conf'
['1112c848ef91ebb9c622020d09712d67']='health.d/net.conf'
['13141998a5d71308d9c119834c27bfd3']='python.d.conf'
@@ -44,7 +49,10 @@ declare -A configs_signatures=(
['1972e48345e6c3f0d65f94a03317622b']='health_alarm_notify.conf'
['1c12b678ab65f271a96da1bbd0a1ab1c']='health.d/softnet.conf'
['1c3168c95b53e999df3d45162b3f50b8']='health.d/fping.conf'
+ ['1d6efba856acaaaf3b50bc6d66611b92']='python.d/web_log.conf'
+ ['1e09f326178acf07d361c08a44d8b1f3']='python.d/rabbitmq.conf'
['1ea8e8ef1fa8a3a0fcdfba236f4cb195']='python.d/mysql.conf'
+ ['1eb0bc80934a3166fcde4d153c476d14']='health.d/fping.conf'
['1ef0fd38e7969c023bc3fa6d89eaf6d6']='python.d/mdstat.conf'
['1f5545b3ff52b3eb75ee05401f67a9bc']='fping.conf'
['1fa47f32ab52a22f8e7087cae85dd25e']='health.d/net.conf'
@@ -56,6 +64,7 @@ declare -A configs_signatures=(
['22ceb822983134a7ca67343241f30341']='health.d/disks.conf'
['2385e5d35b440619621c4af62492d91b']='health.d/disks.conf'
['23ae815aefa221b1929f96752a1f7556']='health.d/squid.conf'
+ ['243503ceee1d5b4e1e55a28768a116ae']='health.d/net.conf'
['2472e49550326f7142e2c425ccbca005']='health.d/softnet.conf'
['254de8ec49602bea2da3631676d7cfec']='health.d/cpu.conf'
['256a7f06f7e579a61752fc64418cffe5']='charts.d/nut.conf'
@@ -72,6 +81,7 @@ declare -A configs_signatures=(
['2f3a8e33df83f14e0af8ca2465697215']='python.d/exim.conf'
['2f4a85fedecce1bf425fa1039f6b021e']='apps_groups.conf'
['2fa8fb929fd597f2ab97b6efc540a043']='health_alarm_notify.conf'
+ ['307ac41f6c67fcf007d6f7135fac314c']='stream.conf'
['312b4b8e2805e19cf9be554b319567d6']='health.d/softnet.conf'
['318bb45755726a25120bb33413d4b582']='health.d/net.conf'
['325617412a628e3bc776e3fbb777a2a6']='health.d/redis.conf'
@@ -121,9 +131,11 @@ declare -A configs_signatures=(
['47180421d580baeaedf8c0ef3d647fb5']='python.d/hddtemp.conf'
['48195c5c8c0476a49b714b4c76bdb570']='python.d/squid.conf'
['48eef63bcf744bae114b502b6dacb4a1']='charts.d/phpfpm.conf'
+ ['4960852f8951b54ca2fe10065752143e']='python.d.conf'
['4a448831776de8acf2e0bdc4cc994cb4']='apps_groups.conf'
['4b775fb31342f1478b3773d041a72911']='python.d.conf'
['4ccb06fff1ce06dc5bc80e0a9f568f6e']='charts.d.conf'
+ ['4cd585f5dfdacaf287413ad037b4e60a']='apps_groups.conf'
['4d13684cadfa90e73ab465409bf7263b']='health.d/mysql.conf'
['4d91ee6fe4c887ea3865ef36ac63da3c']='health.d/mysql.conf'
['4e995acb0d6fd77403a2a9dca984b55b']='charts.d.conf'
@@ -132,6 +144,7 @@ declare -A configs_signatures=(
['4fdf72784296326e0b46cb526a5d77a1']='python.d.conf'
['4fef19afccd9a591165b72f0b1a2ac2e']='python.d/freeradius.conf'
['501eb2484b459b410b3f792c2dbaa955']='health.d/swap.conf'
+ ['5050b5963599f13ad5dc0263fa39a906']='python.d/fail2ban.conf'
['508771d8e4611a058991a1bc11039dea']='health.d/disks.conf'
['5120492fa26be3749192607f62dc05f8']='health.d/mdstat.conf'
['5271cf9fc0fd10915a9759add70f7d78']='health.d/swap.conf'
@@ -140,6 +153,7 @@ declare -A configs_signatures=(
['53160707fdc6ce46c195b1b55bb0bcb1']='health.d/swap.conf'
['535e5113b07b0fc6f3abd59546c276f6']='charts.d.conf'
['5379cdc26d7725e2b0d688d785816cef']='python.d/mysql.conf'
+ ['5452eccad2f220d1191411737f6f4b2b']='python.d/isc_dhcpd.conf'
['54614490a14e1a4b7b3d9fecb6b4cfa5']='python.d/exim.conf'
['547779cdc460a926980de1590294b96b']='health.d/softnet.conf'
['55608bdd908a3806df1468f6ee318b2b']='health.d/qos.conf'
@@ -151,6 +165,7 @@ declare -A configs_signatures=(
['5829812db29598db5857c9f433e96fef']='python.d/apache.conf'
['58e835b7176865ec5a6f59f7aba832bf']='health.d/named.conf'
['598f9814966a9e2fe48e8218151d3fa6']='stream.conf'
+ ['59dded33e3adfe622f36c557a4f4bed7']='health.d/net.conf'
['5b917d894bb6a755d59264e9d48e9d56']='fping.conf'
['5bbef0708f5eff4d4a53aaf35fc48a62']='health.d/disks.conf'
['5bf51bb24fb41db9b1e448bd060d3f8c']='apps_groups.conf'
@@ -174,6 +189,7 @@ declare -A configs_signatures=(
['650b5fc9da23b25ee7ee1481e4aa2851']='health_alarm_notify.conf'
['653e0c014c8fcfb4db6cd3351d87d720']='python.d.conf'
['6546909d10cc5efcef9dd873bea85956']='python.d/mysql.conf'
+ ['65a59d96c039d0180603ffd945a8968c']='apps_groups.conf'
['65c6933a17fb6b7f8e6baeab73431c17']='charts.d/apcupsd.conf'
['6608c6546b3c6bde084fc1d34b1163c1']='health.d/retroshare.conf'
['669ebef43ee341f6889d382e86d0e200']='health.d/named.conf'
@@ -181,6 +197,8 @@ declare -A configs_signatures=(
['6814b9bc84483db428f6a479ba221855']='python.d/mysql.conf'
['6a18f61a595c0d48c3363bcc0dbfa6b9']='health_alarm_notify.conf'
['6a47af861ad3dd112124c37fbf09672b']='apps_groups.conf'
+ ['6aa4507f86657383917a0407f2a9cc0d']='python.d.conf'
+ ['6acad8ce5c33e642742825db0eb9bb56']='python.d/web_log.conf'
['6b39de5d85db45115db236347a6896d4']='health.d/named.conf'
['6b917300747e7e8314844237e2462261']='python.d/apache_cache.conf'
['6bb278bd9e171c4cb5c0fe639231288b']='python.d/web_log.conf'
@@ -220,7 +238,9 @@ declare -A configs_signatures=(
['7d8bd884ec26cb35d16c4fc05f969799']='python.d/squid.conf'
['7deb236ec68a512b9bdd18e6a51d76f7']='python.d/mysql.conf'
['7e5fc1644aa7a54f9dbb1bd102521b09']='health.d/memcached.conf'
+ ['7f13631183fbdf79c21c8e5a171e9b34']='health.d/zfs.conf'
['80266bddd3df374923c750a6de91d120']='health.d/apache.conf'
+ ['803a7f9dcb942eeac0fd764b9e3e38ca']='fping.conf'
['80d242d619eb7e91cebfdbf58d79b0f8']='health.d/disks.conf'
['80df37b89e852d585209b8c02bb94312']='python.d/bind_rndc.conf'
['80f109ff293ac94222bf3959432751bd']='health.d/qos.conf'
@@ -228,6 +248,7 @@ declare -A configs_signatures=(
['8213d921b6a8382e27052fb42d81db3d']='python.d/freeradius.conf'
['8214bb8f4b005aa4691fcd38f7331e8f']='health.d/swap.conf'
['837480f77ba1a85677a36747fbc2cd2e']='python.d/sensors.conf'
+ ['8422e71761d22e817e3cfcb1befc6080']='health.d/mongodb.conf'
['8425a60ea3d28ed40bb0bac4c3f182e8']='python.d/sensors.conf'
['842b1ad5b89bfa5f421d9c5b72e001a4']='health.d/apache.conf'
['845023f9b4a526aa0e6493756dbe6034']='health.d/squid.conf'
@@ -235,6 +256,7 @@ declare -A configs_signatures=(
['8490f690d97adacc4e2096df82e7e8a5']='charts.d/cpufreq.conf'
['871bbeea33b83ea9755600b6d574919c']='python.d/web_log.conf'
['87224d2f2b87646f3c0d38cc1eb30112']='python.d/nsd.conf'
+ ['87642c568093daf3b2c30c5beffe2225']='python.d/elasticsearch.conf'
['8810140ce9c09af1d18b9602c4003904']='health_alarm_notify.conf'
['88f77865f75c9fb61c97d700bd4561ee']='python.d/mysql.conf'
['8989b5e2f4ef9cd278ef58be0fae4074']='health.d/disks.conf'
@@ -256,6 +278,7 @@ declare -A configs_signatures=(
['97eee7a30e6419df4537242e9d4a719d']='health.d/mysql.conf'
['97f337eb96213f3ede05e522e3743a6c']='python.d/memcached.conf'
['99a3de85d1e7826ed64a5f8576712e5d']='python.d.conf'
+ ['99b06e68f1da5917ae4cf60e901439f6']='health.d/ram.conf'
['99b6030ce25c8fee4598179c0f95fb0b']='health.d/redis.conf'
['99c1617448abbdc493976ab9bda5ce02']='apps_groups.conf'
['9a8a459a3841b78d4c6ef07428ad2fe1']='health.d/entropy.conf'
@@ -270,6 +293,7 @@ declare -A configs_signatures=(
['a09714b5942cf25a89ec3da1dbc18063']='health.d/ram.conf'
['a0b3a12389c9c56dfe35964b20b59836']='health.d/bind_rndc.conf'
['a0ee8f351f213c0e8af9eb7a4a09cb95']='apps_groups.conf'
+ ['a1b6dfe312b896b0b1ba471e8ac07f95']='python.d/isc_dhcpd.conf'
['a2944a309f8ce1a3195451856478d6ae']='python.d.conf'
['a2a647dc492dc2d6ed1f5c0fdc97a96e']='python.d/mongodb.conf'
['a305b400378d6492efd15f9940c2779b']='health.d/softnet.conf'
@@ -278,6 +302,7 @@ declare -A configs_signatures=(
['a4a8660728c6afcb528cc6b378897d6b']='health.d/squid.conf'
['a4be524cc5b7192878c292a17c767c28']='health.d/redis.conf'
['a5114d5b0d3816dba75024b9444f4b40']='health.d/disks.conf'
+ ['a5134d7cfbe27f5791e788c2add51abb']='apps_groups.conf'
['a55133f1b0be0a4255057849dd451b09']='health_alarm_notify.conf'
['a6d5ce2572bf7a1dce9e545fcd29273e']='health.d/apache.conf'
['a71d9082410200bf92e823675d78121c']='python.d/retroshare.conf'
@@ -294,17 +319,21 @@ declare -A configs_signatures=(
['a9ab68845db2fb695b7060273a6ac68e']='health_alarm_notify.conf'
['a9cd91675467c5426f5b51c47602c889']='apps_groups.conf'
['aa4bee249bfc0c4a88ac8c2ffb97aa0d']='health.d/squid.conf'
+ ['aa620b7017c8b864d80aa6c8acab01cf']='python.d/smartd_log.conf'
+ ['aa6c4a270e6276f2deddf127ee1a24f6']='statsd.d/example.conf'
['aa8b57a733c2035917acf81a8ebdfbe7']='health.d/haproxy.conf'
['aac44691a1cf95fa8f8990a79bab4ce1']='python.d/web_log.conf'
['abaf2e021f9f6ee5d1c4e4726f47348e']='health.d/ipc.conf'
['acaa6731a272f6d251afb357e99b518f']='apps_groups.conf'
['ade389c1b6efe0cff47c33e662731f0a']='python.d/squid.conf'
['ae5ac0a3521e50aa6f6eda2a330b4075']='python.d/example.conf'
+ ['af14667ee7993acea810f6d50923bdc9']='health.d/web_log.conf'
['af44cc53aa2bc5cc8935667119567522']='python.d.conf'
['afdae4646c755ff2d117527fbf761c8e']='health.d/disks.conf'
['b07eebc6f58d19721ac069171b911d2a']='health_alarm_notify.conf'
['b0c59b2bd7a10f6a3f2be6b4b27857db']='health.d/haproxy.conf'
['b0f0a0ac415e4b1a82187b80d211e83b']='python.d/mysql.conf'
+ ['b181dcca01a258d9792ad703583baed2']='statsd.d/example.conf'
['b185914d4f795e1732273dc4c7a35845']='health.d/memory.conf'
['b27f10a38a95edbbec20f44a4728b7c4']='python.d.conf'
['b32164929eda7449a9677044e11151bf']='python.d.conf'
@@ -312,6 +341,7 @@ declare -A configs_signatures=(
['b5b5a8d6d991fb1cef8d80afa23ba114']='python.d/cpufreq.conf'
['b636e5e603f9d93e52c7577ac8c6bf0c']='health.d/entropy.conf'
['b68706bb8101ef85192db92f865a5d80']='health_alarm_notify.conf'
+ ['b6ee82968de8fbf974c0d35b55fe6fae']='python.d/web_log.conf'
['b735732fbe993d8191d6b3317082efa2']='health.d/qos.conf'
['b7d769ce86a7aebba01315da5c0799e6']='health.d/ram.conf'
['b81b8f331161b0d48e03f6fbf6b6d062']='health.d/memcached.conf'
@@ -322,19 +352,26 @@ declare -A configs_signatures=(
['ba11ea2d2f632b2de4b1224bcdc54f07']='python.d/smartd_log.conf'
['bb51112d01ff20053196a57632df8962']='apps_groups.conf'
['bba2f3886587f137ea08a6e63dd3d376']='python.d.conf'
+ ['bcd94c4fa2f89c710ff807de061ab11c']='health.d/net.conf'
['bda5517ea01640cfdfa0a27549619d6a']='health.d/memcached.conf'
['bf66f113b2dd8d8fb444cbd5650f284c']='health_alarm_notify.conf'
['c004430f55310ae9ed489c4905ed02cb']='charts.d/apache.conf'
['c080e006f544c949baca33cc24a9c126']='health_alarm_notify.conf'
['c1a7e634b5b8aad523a0d115a93379cd']='health.d/memcached.conf'
['c3296c08260bcd556e74711c820817be']='health.d/cpu.conf'
+ ['c3661b68232e06de90bb5e63e725b8b6']='health_alarm_notify.conf'
['c61948101e0e6846679682794ee48c5b']='python.d/nginx.conf'
+ ['c6403d8b1bcfa52d3abb941be155fc03']='python.d.conf'
+ ['c84fd3292710091802e443c8e688dee1']='health_alarm_notify.conf'
['c88fb430f35b7d8f08775d84debffbd2']='python.d/phpfpm.conf'
+ ['c94cb4f4eeaa13c1dcee6248deb01829']='python.d/postgres.conf'
+ ['c9a16df512b4a9ce7fa65f5a69bda20a']='python.d/web_log.conf'
['c9b792755de59d842ba95f8c315d94c8']='health.d/swap.conf'
['ca026d7c779f0a7cb7787713c5be5c47']='charts.d.conf'
['ca08a9b18d38ae0a0f5081a7cdc96863']='health.d/swap.conf'
['ca0eb92bdd3de67582ea6db37462895f']='health.d/tcp_resets.conf'
['ca249db7a0637d55abb938d969f9b486']='python.d/postfix.conf'
+ ['ca9e52b3ee3c71d3d042dc531753a1fd']='apps_groups.conf'
['cb178b15427274d7def5b14bc4c09441']='health.d/net.conf'
['cb60badf376d246ad8ec9d3f524db430']='health.d/disks.conf'
['cb7f80cd2768c649d7448e01f8aa6579']='python.d.conf'
@@ -376,8 +413,12 @@ declare -A configs_signatures=(
['e0242003fd2e3f9ac1b9314e802ada79']='python.d/hddtemp.conf'
['e0ba3bc216ffc9933b4741dbb6b1f8c8']='health.d/web_log.conf'
['e0e96cc47ed61d6492416be5236cd4d3']='python.d/apache_cache.conf'
+ ['e0ffc0c34424b35666fddf7f61e05def']='health.d/tcp_resets.conf'
+ ['e1a8bf99d36683c10225100f207a2b59']='python.d/web_log.conf'
['e2f3388c06726154c10ec22bad5bc7ec']='fping.conf'
['e3023092e3b2bbb5351e0fe6682f4fe9']='health_alarm_notify.conf'
+ ['e3112d8e06fa77888aab02e8fcd22e25']='apps_groups.conf'
+ ['e3996f70a4b09315b4a64e3df7d34d43']='python.d/rabbitmq.conf'
['e3d100c2d0347c08efbf6245e05620c6']='python.d/fail2ban.conf'
['e3e5bc57335c489f01b8559f5c70e112']='python.d/squid.conf'
['e40947d22f7ed5359f12fc89e3512963']='python.d/dovecot.conf'
@@ -390,12 +431,18 @@ declare -A configs_signatures=(
['eaa7beb935cae9c48a40fb934eb105a7']='health.d/web_log.conf'
['eb5168f0b516bc982aac45e59da6e52e']='health.d/nginx.conf'
['eb748d6fb69d11b0d29c5794657e206c']='health.d/qos.conf'
+ ['eb9fedc3c1dface77312d9bf48f673a8']='stream.conf'
['ebd0612ccc5807524ebb2b647e3e56c9']='apps_groups.conf'
['ecd3aa97e2581f88eb466d6612690ef2']='charts.d/nginx.conf'
+ ['ed43efac299c31f8fd5e2abccff30071']='python.d/samba.conf'
+ ['ed80e6b2cfc8b08adea7027fc03daa68']='python.d.conf'
['ee5343881744e6a97e6ee5cdd329cfb8']='health.d/retroshare.conf'
['ef1861bf5725d91e773cbdba05687597']='python.d.conf'
['ef9916ea144878a9f37cbb6b1b29da10']='health.d/squid.conf'
+ ['f075be84c5bfac7e34de2a091841360c']='statsd.d/example.conf'
+ ['f0a86c5bae3c4b32b266dacbf74ca4a3']='python.d/web_log.conf'
['f1446cb3f1a905ee06defa2aa15ee806']='python.d/web_log.conf'
+ ['f1682835e3414f60284c13bf1662e50f']='health.d/web_log.conf'
['f2f1b8656f5011e965ac45b818cf668d']='apps_groups.conf'
['f42df9f13abfae2426519c6728b34882']='charts.d/example.conf'
['f4c5d88c34d3fb853498124177cc77f1']='python.d.conf'
@@ -410,6 +457,8 @@ declare -A configs_signatures=(
['f96acba4b14b0c1b50d0187a04416151']='health_alarm_notify.conf'
['f9be549a849d023595d19d5d74263e0f']='health.d/tcp_resets.conf'
['fa4396513b358d6ec6a7f5bfb08439b8']='health.d/net.conf'
+ ['fbdb6f5d3906d3d8ea4e28f6ba6965a6']='python.d/go_expvar.conf'
+ ['fc40b83f173bc4676d686867a8369a62']='python.d/dns_query_time.conf'
['fd3164e6e8cb6726706267eae49aa082']='health_alarm_notify.conf'
['fdd11640ba626cc2064c2fe3ea3eee4c']='health.d/cpu.conf'
['fde44f62c8d7e52f09705cd273fae6b1']='charts.d/tomcat.conf'
diff --git a/configure b/configure
index f638780b..6a9d15b9 100755
--- a/configure
+++ b/configure
@@ -1,11 +1,9 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.68 for netdata 1.6.0.
+# Generated by GNU Autoconf 2.69 for netdata 1.7.0.
#
#
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
-# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software
-# Foundation, Inc.
+# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
#
#
# This configure script is free software; the Free Software Foundation
@@ -134,6 +132,31 @@ export LANGUAGE
# CDPATH.
(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+# Use a proper internal environment variable to ensure we don't fall
+ # into an infinite loop, continuously re-executing ourselves.
+ if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
+ _as_can_reexec=no; export _as_can_reexec;
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+as_fn_exit 255
+ fi
+ # We don't want this to propagate to other subprocesses.
+ { _as_can_reexec=; unset _as_can_reexec;}
if test "x$CONFIG_SHELL" = x; then
as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
emulate sh
@@ -167,7 +190,8 @@ if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
else
exitcode=1; echo positional parameters were not saved.
fi
-test x\$exitcode = x0 || exit 1"
+test x\$exitcode = x0 || exit 1
+test -x / || exit 1"
as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
@@ -212,21 +236,25 @@ IFS=$as_save_IFS
if test "x$CONFIG_SHELL" != x; then :
- # We cannot yet assume a decent shell, so we have to provide a
- # neutralization value for shells without unset; and this also
- # works around shells that cannot unset nonexistent variables.
- # Preserve -v and -x to the replacement shell.
- BASH_ENV=/dev/null
- ENV=/dev/null
- (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
- export CONFIG_SHELL
- case $- in # ((((
- *v*x* | *x*v* ) as_opts=-vx ;;
- *v* ) as_opts=-v ;;
- *x* ) as_opts=-x ;;
- * ) as_opts= ;;
- esac
- exec "$CONFIG_SHELL" $as_opts "$as_myself" ${1+"$@"}
+ export CONFIG_SHELL
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
fi
if test x$as_have_required = xno; then :
@@ -328,6 +356,14 @@ $as_echo X"$as_dir" |
} # as_fn_mkdir_p
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
# as_fn_append VAR VALUE
# ----------------------
# Append the text in VALUE to the end of the definition contained in VAR. Take
@@ -449,6 +485,10 @@ as_cr_alnum=$as_cr_Letters$as_cr_digits
chmod +x "$as_me.lineno" ||
{ $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+ # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
+ # already done that, so ensure we don't try to do so again and fall
+ # in an infinite loop. This has already happened in practice.
+ _as_can_reexec=no; export _as_can_reexec
# Don't try to exec as it changes $[0], causing all sort of problems
# (the dirname of $[0] is not the place where we might find the
# original and so on. Autoconf is especially sensitive to this).
@@ -483,16 +523,16 @@ if (echo >conf$$.file) 2>/dev/null; then
# ... but there are two gotchas:
# 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
# 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
- # In both cases, we have to default to `cp -p'.
+ # In both cases, we have to default to `cp -pR'.
ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
elif ln conf$$.file conf$$ 2>/dev/null; then
as_ln_s=ln
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
rmdir conf$$.dir 2>/dev/null
@@ -504,28 +544,8 @@ else
as_mkdir_p=false
fi
-if test -x / >/dev/null 2>&1; then
- as_test_x='test -x'
-else
- if ls -dL / >/dev/null 2>&1; then
- as_ls_L_option=L
- else
- as_ls_L_option=
- fi
- as_test_x='
- eval sh -c '\''
- if test -d "$1"; then
- test -d "$1/.";
- else
- case $1 in #(
- -*)set "./$1";;
- esac;
- case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
- ???[sx]*):;;*)false;;esac;fi
- '\'' sh
- '
-fi
-as_executable_p=$as_test_x
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
# Sed expression to map a string onto a valid CPP name.
as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
@@ -557,8 +577,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='netdata'
PACKAGE_TARNAME='netdata'
-PACKAGE_VERSION='1.6.0'
-PACKAGE_STRING='netdata 1.6.0'
+PACKAGE_VERSION='1.7.0'
+PACKAGE_STRING='netdata 1.7.0'
PACKAGE_BUGREPORT=''
PACKAGE_URL=''
@@ -693,6 +713,10 @@ build_os
build_vendor
build_cpu
build
+AM_BACKSLASH
+AM_DEFAULT_VERBOSITY
+AM_DEFAULT_V
+AM_V
am__untar
am__tar
AMTAR
@@ -763,6 +787,7 @@ ac_subst_files=''
ac_user_opts='
enable_option_checking
enable_maintainer_mode
+enable_silent_rules
enable_dependency_tracking
enable_plugin_nfacct
enable_plugin_freeipmi
@@ -1261,8 +1286,6 @@ target=$target_alias
if test "x$host_alias" != x; then
if test "x$build_alias" = x; then
cross_compiling=maybe
- $as_echo "$as_me: WARNING: if you wanted to set the --build type, don't use --host.
- If a cross compiler is detected then cross compile mode will be used" >&2
elif test "x$build_alias" != "x$host_alias"; then
cross_compiling=yes
fi
@@ -1348,7 +1371,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures netdata 1.6.0 to adapt to many kinds of systems.
+\`configure' configures netdata 1.7.0 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1418,7 +1441,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of netdata 1.6.0:";;
+ short | recursive ) echo "Configuration of netdata 1.7.0:";;
esac
cat <<\_ACEOF
@@ -1426,10 +1449,15 @@ Optional Features:
--disable-option-checking ignore unrecognized --enable/--with options
--disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
--enable-FEATURE[=ARG] include FEATURE [ARG=yes]
- --enable-maintainer-mode enable make rules and dependencies not useful
- (and sometimes confusing) to the casual installer
- --disable-dependency-tracking speeds up one-time build
- --enable-dependency-tracking do not reject slow dependency extractors
+ --enable-maintainer-mode
+ enable make rules and dependencies not useful (and
+ sometimes confusing) to the casual installer
+ --enable-silent-rules less verbose build output (undo: "make V=1")
+ --disable-silent-rules verbose build output (undo: "make V=0")
+ --enable-dependency-tracking
+ do not reject slow dependency extractors
+ --disable-dependency-tracking
+ speeds up one-time build
--enable-plugin-nfacct enable nfacct plugin, requires root
--enable-plugin-freeipmi
enable freeipmi plugin
@@ -1554,10 +1582,10 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-netdata configure 1.6.0
-generated by GNU Autoconf 2.68
+netdata configure 1.7.0
+generated by GNU Autoconf 2.69
-Copyright (C) 2010 Free Software Foundation, Inc.
+Copyright (C) 2012 Free Software Foundation, Inc.
This configure script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it.
_ACEOF
@@ -1830,7 +1858,7 @@ $as_echo "$ac_try_echo"; } >&5
test ! -s conftest.err
} && test -s conftest$ac_exeext && {
test "$cross_compiling" = yes ||
- $as_test_x conftest$ac_exeext
+ test -x conftest$ac_exeext
}; then :
ac_retval=0
else
@@ -1995,7 +2023,8 @@ int
main ()
{
static int test_array [1 - 2 * !(0 < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1))];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2011,7 +2040,8 @@ main ()
{
static int test_array [1 - 2 * !(($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1)
< ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 2))];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2068,7 +2098,8 @@ int
main ()
{
static int test_array [1 - 2 * !((($ac_type) -1 >> ($2 / 2 - 1)) >> ($2 / 2 - 1) == 3)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2160,7 +2191,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) >= 0)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2176,7 +2208,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) <= $ac_mid)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2202,7 +2235,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) < 0)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2218,7 +2252,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) >= $ac_mid)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2252,7 +2287,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) <= $ac_mid)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2324,8 +2360,8 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by netdata $as_me 1.6.0, which was
-generated by GNU Autoconf 2.68. Invocation command line was
+It was created by netdata $as_me 1.7.0, which was
+generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -2705,7 +2741,7 @@ $as_echo "$as_me: ***************** MAINTAINER MODE *****************" >&6;}
PACKAGE_BUILT_DATE=$(date '+%d %b %Y')
fi
-PACKAGE_RPM_VERSION="1.6.0"
+PACKAGE_RPM_VERSION="1.7.0"
@@ -2746,7 +2782,7 @@ ac_config_headers="$ac_config_headers config.h"
-am__api_version='1.11'
+am__api_version='1.14'
# Find a good install program. We prefer a C program (faster),
# so one script is as good as another. But avoid the broken or
@@ -2785,7 +2821,7 @@ case $as_dir/ in #((
# by default.
for ac_prog in ginstall scoinst install; do
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then
if test $ac_prog = install &&
grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
# AIX install. It has an incompatible calling convention.
@@ -2843,9 +2879,6 @@ test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5
$as_echo_n "checking whether build environment is sane... " >&6; }
-# Just in case
-sleep 1
-echo timestamp > conftest.file
# Reject unsafe characters in $srcdir or the absolute working directory
# name. Accept space and tab only in the latter.
am_lf='
@@ -2856,32 +2889,40 @@ case `pwd` in
esac
case $srcdir in
*[\\\"\#\$\&\'\`$am_lf\ \ ]*)
- as_fn_error $? "unsafe srcdir value: \`$srcdir'" "$LINENO" 5;;
+ as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;;
esac
-# Do `set' in a subshell so we don't clobber the current shell's
+# Do 'set' in a subshell so we don't clobber the current shell's
# arguments. Must try -L first in case configure is actually a
# symlink; some systems play weird games with the mod time of symlinks
# (eg FreeBSD returns the mod time of the symlink's containing
# directory).
if (
- set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
- if test "$*" = "X"; then
- # -L didn't work.
- set X `ls -t "$srcdir/configure" conftest.file`
- fi
- rm -f conftest.file
- if test "$*" != "X $srcdir/configure conftest.file" \
- && test "$*" != "X conftest.file $srcdir/configure"; then
-
- # If neither matched, then we have a broken ls. This can happen
- # if, for instance, CONFIG_SHELL is bash and it inherits a
- # broken ls alias from the environment. This has actually
- # happened. Such a system could not be considered "sane".
- as_fn_error $? "ls -t appears to fail. Make sure there is not a broken
-alias in your environment" "$LINENO" 5
- fi
-
+ am_has_slept=no
+ for am_try in 1 2; do
+ echo "timestamp, slept: $am_has_slept" > conftest.file
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$*" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ if test "$*" != "X $srcdir/configure conftest.file" \
+ && test "$*" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ as_fn_error $? "ls -t appears to fail. Make sure there is not a broken
+ alias in your environment" "$LINENO" 5
+ fi
+ if test "$2" = conftest.file || test $am_try -eq 2; then
+ break
+ fi
+ # Just in case.
+ sleep 1
+ am_has_slept=yes
+ done
test "$2" = conftest.file
)
then
@@ -2893,6 +2934,16 @@ Check your system clock" "$LINENO" 5
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
+# If we didn't sleep, we still need to ensure time stamps of config.status and
+# generated files are strictly newer.
+am_sleep_pid=
+if grep 'slept: no' conftest.file >/dev/null 2>&1; then
+ ( sleep 1 ) &
+ am_sleep_pid=$!
+fi
+
+rm -f conftest.file
+
test "$program_prefix" != NONE &&
program_transform_name="s&^&$program_prefix&;$program_transform_name"
# Use a double $ so make ignores it.
@@ -2915,12 +2966,12 @@ if test x"${MISSING+set}" != xset; then
esac
fi
# Use eval to expand $SHELL
-if eval "$MISSING --run true"; then
- am_missing_run="$MISSING --run "
+if eval "$MISSING --is-lightweight"; then
+ am_missing_run="$MISSING "
else
am_missing_run=
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`missing' script is too old or missing" >&5
-$as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5
+$as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;}
fi
if test x"${install_sh}" != xset; then
@@ -2932,10 +2983,10 @@ if test x"${install_sh}" != xset; then
esac
fi
-# Installed binaries are usually stripped using `strip' when the user
-# run `make install-strip'. However `strip' might not be the right
+# Installed binaries are usually stripped using 'strip' when the user
+# run "make install-strip". However 'strip' might not be the right
# tool to use in cross-compilation environments, therefore Automake
-# will honor the `STRIP' environment variable to overrule this program.
+# will honor the 'STRIP' environment variable to overrule this program.
if test "$cross_compiling" != no; then
if test -n "$ac_tool_prefix"; then
# Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
@@ -2954,7 +3005,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_STRIP="${ac_tool_prefix}strip"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -2994,7 +3045,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_STRIP="strip"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3045,7 +3096,7 @@ do
test -z "$as_dir" && as_dir=.
for ac_prog in mkdir gmkdir; do
for ac_exec_ext in '' $ac_executable_extensions; do
- { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue
+ as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue
case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #(
'mkdir (GNU coreutils) '* | \
'mkdir (coreutils) '* | \
@@ -3074,12 +3125,6 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5
$as_echo "$MKDIR_P" >&6; }
-mkdir_p="$MKDIR_P"
-case $mkdir_p in
- [\\/$]* | ?:[\\/]*) ;;
- */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;;
-esac
-
for ac_prog in gawk mawk nawk awk
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
@@ -3098,7 +3143,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_AWK="$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3162,6 +3207,45 @@ else
fi
rmdir .tst 2>/dev/null
+# Check whether --enable-silent-rules was given.
+if test "${enable_silent_rules+set}" = set; then :
+ enableval=$enable_silent_rules;
+fi
+
+case $enable_silent_rules in # (((
+ yes) AM_DEFAULT_VERBOSITY=0;;
+ no) AM_DEFAULT_VERBOSITY=1;;
+ *) AM_DEFAULT_VERBOSITY=1;;
+esac
+am_make=${MAKE-make}
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5
+$as_echo_n "checking whether $am_make supports nested variables... " >&6; }
+if ${am_cv_make_support_nested_variables+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if $as_echo 'TRUE=$(BAR$(V))
+BAR0=false
+BAR1=true
+V=1
+am__doit:
+ @$(TRUE)
+.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then
+ am_cv_make_support_nested_variables=yes
+else
+ am_cv_make_support_nested_variables=no
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5
+$as_echo "$am_cv_make_support_nested_variables" >&6; }
+if test $am_cv_make_support_nested_variables = yes; then
+ AM_V='$(V)'
+ AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+else
+ AM_V=$AM_DEFAULT_VERBOSITY
+ AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
+fi
+AM_BACKSLASH='\'
+
if test "`cd $srcdir && pwd`" != "`pwd`"; then
# Use -I$(srcdir) only when $(srcdir) != ., so that make's output
# is not polluted with repeated "-I."
@@ -3184,7 +3268,7 @@ fi
# Define the identity of the package.
PACKAGE='netdata'
- VERSION='1.6.0'
+ VERSION='1.7.0'
cat >>confdefs.h <<_ACEOF
@@ -3212,18 +3296,70 @@ AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"}
MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"}
+# For better backward compatibility. To be removed once Automake 1.9.x
+# dies out for good. For more background, see:
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
+mkdir_p='$(MKDIR_P)'
+
# We need awk for the "check" target. The system "awk" is bad on
# some platforms.
# Always define AMTAR for backward compatibility. Yes, it's still used
# in the wild :-( We should find a proper way to deprecate it ...
AMTAR='$${TAR-tar}'
+
+# We'll loop over all known methods to create a tar archive until one works.
+_am_tools='gnutar pax cpio none'
+
am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'
+
+# POSIX will say in a future version that running "rm -f" with no argument
+# is OK; and we want to be able to make that assumption in our Makefile
+# recipes. So use an aggressive probe to check that the usage we want is
+# actually supported "in the wild" to an acceptable degree.
+# See automake bug#10828.
+# To make any issue more visible, cause the running configure to be aborted
+# by default if the 'rm' program in use doesn't match our expectations; the
+# user can still override this though.
+if rm -f && rm -fr && rm -rf; then : OK; else
+ cat >&2 <<'END'
+Oops!
+
+Your 'rm' program seems unable to run without file operands specified
+on the command line, even when the '-f' option is present. This is contrary
+to the behaviour of most rm programs out there, and not conforming with
+the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
+
+Please tell bug-automake@gnu.org about your system, including the value
+of your $PATH and any error possibly output before this message. This
+can help us improve future automake versions.
+
+END
+ if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
+ echo 'Configuration will proceed anyway, since you have set the' >&2
+ echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
+ echo >&2
+ else
+ cat >&2 <<'END'
+Aborting the configuration process, to ensure you take notice of the issue.
+
+You can download and install GNU coreutils to get an 'rm' implementation
+that behaves properly: <http://www.gnu.org/software/coreutils/>.
+
+If you want to complete the configuration process using your problematic
+'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
+to "yes", and re-run configure.
+
+END
+ as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5
+ fi
+fi
# Make sure we can run config.sub.
$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
@@ -3317,7 +3453,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_CC="${ac_tool_prefix}gcc"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3357,7 +3493,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_CC="gcc"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3410,7 +3546,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_CC="${ac_tool_prefix}cc"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3451,7 +3587,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
ac_prog_rejected=yes
continue
@@ -3509,7 +3645,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3553,7 +3689,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_CC="$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3999,8 +4135,7 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include <stdarg.h>
#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
+struct stat;
/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
struct buf { int x; };
FILE * (*rcsopen) (struct buf *, struct stat *, int);
@@ -4084,6 +4219,65 @@ ac_cpp='$CPP $CPPFLAGS'
ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5
+$as_echo_n "checking whether $CC understands -c and -o together... " >&6; }
+if ${am_cv_prog_cc_c_o+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ # Make sure it works both with $CC and with simple cc.
+ # Following AC_PROG_CC_C_O, we do the test twice because some
+ # compilers refuse to overwrite an existing .o file with -o,
+ # though they will create one.
+ am_cv_prog_cc_c_o=yes
+ for am_i in 1 2; do
+ if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5
+ ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } \
+ && test -f conftest2.$ac_objext; then
+ : OK
+ else
+ am_cv_prog_cc_c_o=no
+ break
+ fi
+ done
+ rm -f core conftest*
+ unset am_i
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5
+$as_echo "$am_cv_prog_cc_c_o" >&6; }
+if test "$am_cv_prog_cc_c_o" != yes; then
+ # Losing compiler, so override with the script.
+ # FIXME: It is wrong to rewrite CC.
+ # But if we don't then we get into trouble of one sort or another.
+ # A longer-term fix would be to have automake use am__CC in this case,
+ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+ CC="$am_aux_dir/compile $CC"
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
DEPDIR="${am__leading_dot}deps"
ac_config_commands="$ac_config_commands depfiles"
@@ -4103,7 +4297,7 @@ am__quote=
_am_result=none
# First try GNU make style include.
echo "include confinc" > confmf
-# Ignore all kinds of additional output from `make'.
+# Ignore all kinds of additional output from 'make'.
case `$am_make -s -f confmf 2> /dev/null` in #(
*the\ am__doit\ target*)
am__include=include
@@ -4159,8 +4353,8 @@ else
# We make a subdir and do the tests there. Otherwise we can end up
# making bogus files that we don't know about and never remove. For
# instance it was reported that on HP-UX the gcc test will end up
- # making a dummy file named `D' -- because `-MD' means `put the output
- # in D'.
+ # making a dummy file named 'D' -- because '-MD' means "put the output
+ # in D".
rm -rf conftest.dir
mkdir conftest.dir
# Copy depcomp to subdir because otherwise we won't find it if we're
@@ -4195,16 +4389,16 @@ else
: > sub/conftest.c
for i in 1 2 3 4 5 6; do
echo '#include "conftst'$i'.h"' >> sub/conftest.c
- # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
- # Solaris 8's {/usr,}/bin/sh.
- touch sub/conftst$i.h
+ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+ # Solaris 10 /bin/sh.
+ echo '/* dummy */' > sub/conftst$i.h
done
echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
- # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # We check with '-c' and '-o' for the sake of the "dashmstdout"
# mode. It turns out that the SunPro C++ compiler does not properly
- # handle `-M -o', and we need to detect this. Also, some Intel
- # versions had trouble with output in subdirs
+ # handle '-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs.
am__obj=sub/conftest.${OBJEXT-o}
am__minus_obj="-o $am__obj"
case $depmode in
@@ -4213,8 +4407,8 @@ else
test "$am__universal" = false || continue
;;
nosideeffect)
- # after this tag, mechanisms are not by side-effect, so they'll
- # only be used when explicitly requested
+ # After this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested.
if test "x$enable_dependency_tracking" = xyes; then
continue
else
@@ -4222,7 +4416,7 @@ else
fi
;;
msvc7 | msvc7msys | msvisualcpp | msvcmsys)
- # This compiler won't grok `-c -o', but also, the minuso test has
+ # This compiler won't grok '-c -o', but also, the minuso test has
# not run yet. These depmodes are late enough in the game, and
# so weak that their functioning should not be impacted.
am__obj=conftest.${OBJEXT-o}
@@ -4304,7 +4498,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -4347,7 +4541,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -4552,7 +4746,7 @@ do
for ac_prog in grep ggrep; do
for ac_exec_ext in '' $ac_executable_extensions; do
ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
- { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
+ as_fn_executable_p "$ac_path_GREP" || continue
# Check for GNU ac_path_GREP and select it if it is found.
# Check for GNU $ac_path_GREP
case `"$ac_path_GREP" --version 2>&1` in
@@ -4618,7 +4812,7 @@ do
for ac_prog in egrep; do
for ac_exec_ext in '' $ac_executable_extensions; do
ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
- { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
+ as_fn_executable_p "$ac_path_EGREP" || continue
# Check for GNU ac_path_EGREP and select it if it is found.
# Check for GNU $ac_path_EGREP
case `"$ac_path_EGREP" --version 2>&1` in
@@ -4825,8 +5019,8 @@ else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-# define __EXTENSIONS__ 1
- $ac_includes_default
+# define __EXTENSIONS__ 1
+ $ac_includes_default
int
main ()
{
@@ -5323,6 +5517,17 @@ _ACEOF
fi
done
+for ac_func in recvmmsg
+do :
+ ac_fn_c_check_func "$LINENO" "recvmmsg" "ac_cv_func_recvmmsg"
+if test "x$ac_cv_func_recvmmsg" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_RECVMMSG 1
+_ACEOF
+
+fi
+done
+
ac_fn_c_find_intX_t "$LINENO" "8" "ac_cv_c_int8_t"
case $ac_cv_c_int8_t in #(
@@ -5601,13 +5806,19 @@ int
main (int argc, char **argv)
{
volatile unsigned long ul1 = 1, ul2 = 0, ul3 = 2;
+ __atomic_load_n(&ul1, __ATOMIC_SEQ_CST);
__atomic_compare_exchange(&ul1, &ul2, &ul3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
__atomic_fetch_add(&ul1, 1, __ATOMIC_SEQ_CST);
__atomic_fetch_sub(&ul3, 1, __ATOMIC_SEQ_CST);
+ __atomic_or_fetch(&ul1, ul2, __ATOMIC_SEQ_CST);
+ __atomic_and_fetch(&ul1, ul2, __ATOMIC_SEQ_CST);
volatile unsigned long long ull1 = 1, ull2 = 0, ull3 = 2;
+ __atomic_load_n(&ull1, __ATOMIC_SEQ_CST);
__atomic_compare_exchange(&ull1, &ull2, &ull3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
__atomic_fetch_add(&ull1, 1, __ATOMIC_SEQ_CST);
__atomic_fetch_sub(&ull3, 1, __ATOMIC_SEQ_CST);
+ __atomic_or_fetch(&ull1, ull2, __ATOMIC_SEQ_CST);
+ __atomic_and_fetch(&ull1, ull2, __ATOMIC_SEQ_CST);
return 0;
}
@@ -5770,6 +5981,7 @@ $as_echo_n "checking operating system... " >&6; }
case "$host_os" in
freebsd*)
build_target=freebsd
+ CFLAGS="${CFLAGS} -I/usr/local/include"
;;
darwin*)
build_target=macos
@@ -5982,7 +6194,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ax_pthread_config="yes"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -6174,7 +6386,7 @@ fi
#handle absolute path differently from PATH based program lookup
case "x$CC" in #(
x/*) :
- if { test -f ${CC}_r && $as_test_x ${CC}_r; }; then :
+ if as_fn_executable_p ${CC}_r; then :
PTHREAD_CC="${CC}_r"
fi ;; #(
*) :
@@ -6196,7 +6408,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_PTHREAD_CC="$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -7149,7 +7361,113 @@ else
IPMIMONITORING_LIBS=$pkg_cv_IPMIMONITORING_LIBS
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
- have_ipmimonitoring=yes
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for
+ ipmi_monitoring_sensor_readings_by_record_id,
+ ipmi_monitoring_sensor_readings_by_sensor_type,
+ ipmi_monitoring_sensor_read_sensor_number,
+ ipmi_monitoring_sensor_read_sensor_name,
+ ipmi_monitoring_sensor_read_sensor_state,
+ ipmi_monitoring_sensor_read_sensor_units,
+ ipmi_monitoring_sensor_iterator_next,
+ ipmi_monitoring_ctx_sensor_config_file,
+ ipmi_monitoring_ctx_sdr_cache_directory,
+ ipmi_monitoring_ctx_errormsg,
+ ipmi_monitoring_ctx_create
+ in -lipmimonitoring" >&5
+$as_echo_n "checking for
+ ipmi_monitoring_sensor_readings_by_record_id,
+ ipmi_monitoring_sensor_readings_by_sensor_type,
+ ipmi_monitoring_sensor_read_sensor_number,
+ ipmi_monitoring_sensor_read_sensor_name,
+ ipmi_monitoring_sensor_read_sensor_state,
+ ipmi_monitoring_sensor_read_sensor_units,
+ ipmi_monitoring_sensor_iterator_next,
+ ipmi_monitoring_ctx_sensor_config_file,
+ ipmi_monitoring_ctx_sdr_cache_directory,
+ ipmi_monitoring_ctx_errormsg,
+ ipmi_monitoring_ctx_create
+ in -lipmimonitoring... " >&6; }
+if ${ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lipmimonitoring $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char
+ ipmi_monitoring_sensor_readings_by_record_id,
+ ipmi_monitoring_sensor_readings_by_sensor_type,
+ ipmi_monitoring_sensor_read_sensor_number,
+ ipmi_monitoring_sensor_read_sensor_name,
+ ipmi_monitoring_sensor_read_sensor_state,
+ ipmi_monitoring_sensor_read_sensor_units,
+ ipmi_monitoring_sensor_iterator_next,
+ ipmi_monitoring_ctx_sensor_config_file,
+ ipmi_monitoring_ctx_sdr_cache_directory,
+ ipmi_monitoring_ctx_errormsg,
+ ipmi_monitoring_ctx_create
+ ();
+int
+main ()
+{
+return
+ ipmi_monitoring_sensor_readings_by_record_id,
+ ipmi_monitoring_sensor_readings_by_sensor_type,
+ ipmi_monitoring_sensor_read_sensor_number,
+ ipmi_monitoring_sensor_read_sensor_name,
+ ipmi_monitoring_sensor_read_sensor_state,
+ ipmi_monitoring_sensor_read_sensor_units,
+ ipmi_monitoring_sensor_iterator_next,
+ ipmi_monitoring_ctx_sensor_config_file,
+ ipmi_monitoring_ctx_sdr_cache_directory,
+ ipmi_monitoring_ctx_errormsg,
+ ipmi_monitoring_ctx_create
+ ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____=yes
+else
+ ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____" >&5
+$as_echo "$ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____" >&6; }
+if test "x$ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____" = xyes; then :
+ ac_fn_c_check_header_mongrel "$LINENO" "ipmi_monitoring.h" "ac_cv_header_ipmi_monitoring_h" "$ac_includes_default"
+if test "x$ac_cv_header_ipmi_monitoring_h" = xyes; then :
+ ac_fn_c_check_header_mongrel "$LINENO" "ipmi_monitoring_bitmasks.h" "ac_cv_header_ipmi_monitoring_bitmasks_h" "$ac_includes_default"
+if test "x$ac_cv_header_ipmi_monitoring_bitmasks_h" = xyes; then :
+ have_ipmimonitoring=yes
+else
+ have_ipmimonitoring=no
+
+fi
+
+
+else
+ have_ipmimonitoring=no
+
+fi
+
+
+else
+ have_ipmimonitoring=no
+
+fi
+
fi
test "${enable_plugin_freeipmi}" = "yes" -a "${have_ipmimonitoring}" != "yes" && \
as_fn_error $? "ipmimonitoring required but not found. Try installing 'libipmimonitoring-dev' or 'libipmimonitoring-devel'" "$LINENO" 5
@@ -7624,6 +7942,14 @@ if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then
as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5
+$as_echo_n "checking that generated files are newer than configure... " >&6; }
+ if test -n "$am_sleep_pid"; then
+ # Hide warnings about reused PIDs.
+ wait $am_sleep_pid 2>/dev/null
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5
+$as_echo "done" >&6; }
if test -n "$EXEEXT"; then
am__EXEEXT_TRUE=
am__EXEEXT_FALSE='#'
@@ -7966,16 +8292,16 @@ if (echo >conf$$.file) 2>/dev/null; then
# ... but there are two gotchas:
# 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
# 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
- # In both cases, we have to default to `cp -p'.
+ # In both cases, we have to default to `cp -pR'.
ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
elif ln conf$$.file conf$$ 2>/dev/null; then
as_ln_s=ln
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
rmdir conf$$.dir 2>/dev/null
@@ -8035,28 +8361,16 @@ else
as_mkdir_p=false
fi
-if test -x / >/dev/null 2>&1; then
- as_test_x='test -x'
-else
- if ls -dL / >/dev/null 2>&1; then
- as_ls_L_option=L
- else
- as_ls_L_option=
- fi
- as_test_x='
- eval sh -c '\''
- if test -d "$1"; then
- test -d "$1/.";
- else
- case $1 in #(
- -*)set "./$1";;
- esac;
- case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
- ???[sx]*):;;*)false;;esac;fi
- '\'' sh
- '
-fi
-as_executable_p=$as_test_x
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
# Sed expression to map a string onto a valid CPP name.
as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
@@ -8077,8 +8391,8 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by netdata $as_me 1.6.0, which was
-generated by GNU Autoconf 2.68. Invocation command line was
+This file was extended by netdata $as_me 1.7.0, which was
+generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
CONFIG_HEADERS = $CONFIG_HEADERS
@@ -8143,11 +8457,11 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
-netdata config.status 1.6.0
-configured by $0, generated by GNU Autoconf 2.68,
+netdata config.status 1.7.0
+configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
-Copyright (C) 2010 Free Software Foundation, Inc.
+Copyright (C) 2012 Free Software Foundation, Inc.
This config.status script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it."
@@ -8238,7 +8552,7 @@ fi
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
if \$ac_cs_recheck; then
- set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
shift
\$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
CONFIG_SHELL='$SHELL'
@@ -8881,7 +9195,7 @@ $as_echo "$as_me: executing $ac_file commands" >&6;}
case $ac_file$ac_mode in
"depfiles":C) test x"$AMDEP_TRUE" != x"" || {
- # Autoconf 2.62 quotes --file arguments for eval, but not when files
+ # Older Autoconf quotes --file arguments for eval, but not when files
# are listed without --file. Let's play safe and only enable the eval
# if we detect the quoting.
case $CONFIG_FILES in
@@ -8894,7 +9208,7 @@ $as_echo "$as_me: executing $ac_file commands" >&6;}
# Strip MF so we end up with the name of the file.
mf=`echo "$mf" | sed -e 's/:.*$//'`
# Check whether this is an Automake generated Makefile or not.
- # We used to match only the files named `Makefile.in', but
+ # We used to match only the files named 'Makefile.in', but
# some people rename them; so instead we look at the file content.
# Grep'ing the first line is not enough: some people post-process
# each Makefile.in and add a new line on top of each file to say so.
@@ -8928,21 +9242,19 @@ $as_echo X"$mf" |
continue
fi
# Extract the definition of DEPDIR, am__include, and am__quote
- # from the Makefile without running `make'.
+ # from the Makefile without running 'make'.
DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
test -z "$DEPDIR" && continue
am__include=`sed -n 's/^am__include = //p' < "$mf"`
- test -z "am__include" && continue
+ test -z "$am__include" && continue
am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
- # When using ansi2knr, U may be empty or an underscore; expand it
- U=`sed -n 's/^U = //p' < "$mf"`
# Find all dependency output files, they are included files with
# $(DEPDIR) in their names. We invoke sed twice because it is the
# simplest approach to changing $(DEPDIR) to its actual value in the
# expansion.
for file in `sed -n "
s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
- sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do
+ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do
# Make sure the directory exists.
test -f "$dirpart/$file" && continue
fdir=`$as_dirname -- "$file" ||
diff --git a/configure.ac b/configure.ac
index 2c83830c..010e0e93 100644
--- a/configure.ac
+++ b/configure.ac
@@ -4,7 +4,7 @@
AC_PREREQ(2.60)
define([VERSION_MAJOR], [1])
-define([VERSION_MINOR], [6])
+define([VERSION_MINOR], [7])
define([VERSION_FIX], [0])
define([VERSION_NUMBER], VERSION_MAJOR[.]VERSION_MINOR[.]VERSION_FIX)
define([VERSION_SUFFIX], [])
@@ -126,6 +126,7 @@ AC_CHECK_TYPES([struct timespec, clockid_t], [], [], [[#include <time.h>]])
AC_SEARCH_LIBS([clock_gettime], [rt posix4])
AC_CHECK_FUNCS([clock_gettime])
AC_CHECK_FUNCS([sched_setscheduler sched_get_priority_min sched_get_priority_max nice])
+AC_CHECK_FUNCS([recvmmsg])
AC_TYPE_INT8_T
AC_TYPE_INT16_T
@@ -155,6 +156,7 @@ AC_MSG_CHECKING([operating system])
case "$host_os" in
freebsd*)
build_target=freebsd
+ CFLAGS="${CFLAGS} -I/usr/local/include"
;;
darwin*)
build_target=macos
@@ -341,7 +343,30 @@ AM_CONDITIONAL([ENABLE_PLUGIN_APPS], [test "${enable_plugin_apps}" = "yes"])
PKG_CHECK_MODULES(
[IPMIMONITORING],
[libipmimonitoring],
- [have_ipmimonitoring=yes],
+ [AC_CHECK_LIB([ipmimonitoring], [
+ ipmi_monitoring_sensor_readings_by_record_id,
+ ipmi_monitoring_sensor_readings_by_sensor_type,
+ ipmi_monitoring_sensor_read_sensor_number,
+ ipmi_monitoring_sensor_read_sensor_name,
+ ipmi_monitoring_sensor_read_sensor_state,
+ ipmi_monitoring_sensor_read_sensor_units,
+ ipmi_monitoring_sensor_iterator_next,
+ ipmi_monitoring_ctx_sensor_config_file,
+ ipmi_monitoring_ctx_sdr_cache_directory,
+ ipmi_monitoring_ctx_errormsg,
+ ipmi_monitoring_ctx_create
+ ],
+ [AC_CHECK_HEADER(
+ [ipmi_monitoring.h],
+ [AC_CHECK_HEADER(
+ [ipmi_monitoring_bitmasks.h],
+ [have_ipmimonitoring=yes],
+ [have_ipmimonitoring=no]
+ )],
+ [have_ipmimonitoring=no]
+ )],
+ [have_ipmimonitoring=no]
+ )],
[have_ipmimonitoring=no]
)
test "${enable_plugin_freeipmi}" = "yes" -a "${have_ipmimonitoring}" != "yes" && \
diff --git a/contrib/Makefile.am b/contrib/Makefile.am
index b58b40e2..15e9c000 100644
--- a/contrib/Makefile.am
+++ b/contrib/Makefile.am
@@ -21,6 +21,7 @@ dist_noinst_DATA = \
dist_noinst_SCRIPTS = \
debian/netdata.init \
+ nc-backend.sh \
$(NULL)
debian/changelog:
diff --git a/contrib/Makefile.in b/contrib/Makefile.in
index 29db296a..a10dfa97 100644
--- a/contrib/Makefile.in
+++ b/contrib/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11.3 from Makefile.am.
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
-# Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -17,6 +16,51 @@
VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -36,8 +80,8 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
subdir = contrib
-DIST_COMMON = $(dist_noinst_DATA) $(dist_noinst_SCRIPTS) \
- $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_noinst_SCRIPTS) $(dist_noinst_DATA)
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
$(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
@@ -54,12 +98,31 @@ CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
SCRIPTS = $(dist_noinst_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
SOURCES =
DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
DATA = $(dist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
@@ -223,6 +286,7 @@ dist_noinst_DATA = \
dist_noinst_SCRIPTS = \
debian/netdata.init \
+ nc-backend.sh \
$(NULL)
all: all-am
@@ -258,11 +322,11 @@ $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
-tags: TAGS
-TAGS:
+tags TAGS:
+
+ctags CTAGS:
-ctags: CTAGS
-CTAGS:
+cscope cscopelist:
distdir: $(DISTFILES)
@@ -398,15 +462,16 @@ uninstall-am:
.MAKE: install-am install-strip
-.PHONY: all all-am check check-am clean clean-generic distclean \
- distclean-generic distdir dvi dvi-am html html-am info info-am \
- install install-am install-data install-data-am install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am uninstall uninstall-am
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
debian/changelog:
diff --git a/contrib/debian/changelog b/contrib/debian/changelog
new file mode 100644
index 00000000..bfb07054
--- /dev/null
+++ b/contrib/debian/changelog
@@ -0,0 +1,3 @@
+netdata (1.7.0) UNRELEASED; urgency=medium
+ * Latest release
+ -- Netdata Team <> Sun, 16 Jul 2017 19:28:33 +0000
diff --git a/contrib/debian/compat b/contrib/debian/compat
new file mode 100644
index 00000000..ec635144
--- /dev/null
+++ b/contrib/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/contrib/debian/control b/contrib/debian/control
new file mode 100644
index 00000000..24c5f4c4
--- /dev/null
+++ b/contrib/debian/control
@@ -0,0 +1,25 @@
+Source: netdata
+Build-Depends: debhelper (>= 9),
+ dh-autoreconf,
+ dh-systemd (>= 1.5),
+ dpkg-dev (>= 1.13.19),
+ zlib1g-dev,
+ uuid-dev
+Section: net
+Priority: optional
+Maintainer: Costa Tsaousis <costa@tsaousis.gr>
+Standards-Version: 3.9.6
+Homepage: https://github.com/firehol/netdata/wiki
+
+Package: netdata
+Architecture: any
+Depends: adduser,
+ libcap2-bin (>= 1:2.0),
+ lsb-base (>= 3.1-23.2),
+ ${misc:Depends},
+ ${shlibs:Depends}
+Description: real-time charts for system monitoring
+ Netdata is a daemon that collects data in realtime (per second)
+ and presents a web site to view and analyze them. The presentation
+ is also real-time and full of interactive charts that precisely
+ render all collected values.
diff --git a/contrib/debian/control.wheezy b/contrib/debian/control.wheezy
new file mode 100644
index 00000000..4103908a
--- /dev/null
+++ b/contrib/debian/control.wheezy
@@ -0,0 +1,25 @@
+Source: netdata
+Build-Depends: debhelper (>= 9),
+ dh-autoreconf,
+ pkg-config,
+ dpkg-dev (>= 1.13.19),
+ zlib1g-dev,
+ uuid-dev
+Section: net
+Priority: optional
+Maintainer: Costa Tsaousis <costa@tsaousis.gr>
+Standards-Version: 3.9.6
+Homepage: https://github.com/firehol/netdata/wiki
+
+Package: netdata
+Architecture: any
+Depends: adduser,
+ libcap2-bin (>= 1:2.0),
+ lsb-base (>= 3.1-23.2),
+ ${misc:Depends},
+ ${shlibs:Depends}
+Description: real-time charts for system monitoring
+ Netdata is a daemon that collects data in realtime (per second)
+ and presents a web site to view and analyze them. The presentation
+ is also real-time and full of interactive charts that precisely
+ render all collected values.
diff --git a/contrib/debian/copyright b/contrib/debian/copyright
new file mode 100644
index 00000000..11a3d639
--- /dev/null
+++ b/contrib/debian/copyright
@@ -0,0 +1,10 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: Netdata
+Upstream-Contact: Costa Tsaousis <costa@tsaousis.gr>
+Source: https://github.com/firehol/netdata
+
+Files: *
+Copyright: 2014-2016, Costa Tsaousis
+License: GPL-3+
+ On Debian systems, the complete text of the GNU General Public
+ License version 3 can be found in /usr/share/common-licenses/GPL-3.
diff --git a/contrib/debian/netdata.conf b/contrib/debian/netdata.conf
new file mode 100644
index 00000000..a963d80b
--- /dev/null
+++ b/contrib/debian/netdata.conf
@@ -0,0 +1,16 @@
+# NetData Configuration
+
+# The current full configuration can be retrieved from the running
+# server at the URL
+#
+# http://localhost:19999/netdata.conf
+#
+# for example:
+#
+# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
+#
+
+[global]
+ run as user = netdata
+ web files owner = root
+ web files group = netdata
diff --git a/contrib/debian/netdata.default b/contrib/debian/netdata.default
new file mode 100644
index 00000000..9e7f8ae6
--- /dev/null
+++ b/contrib/debian/netdata.default
@@ -0,0 +1,5 @@
+# Extra arguments to pass to netdata
+#
+#EXTRA_OPTS=""
+#uncomment following line if you are building a wheezy-package
+#EXTRA_OPTS="-P /var/run/netdata.pid"
diff --git a/contrib/debian/netdata.docs b/contrib/debian/netdata.docs
new file mode 100644
index 00000000..56631abf
--- /dev/null
+++ b/contrib/debian/netdata.docs
@@ -0,0 +1 @@
+ChangeLog
diff --git a/contrib/debian/netdata.init b/contrib/debian/netdata.init
new file mode 100755
index 00000000..c1b2b74d
--- /dev/null
+++ b/contrib/debian/netdata.init
@@ -0,0 +1,56 @@
+#!/bin/sh
+# Start/stop the netdata daemon.
+#
+### BEGIN INIT INFO
+# Provides: netdata
+# Required-Start: $remote_fs
+# Required-Stop: $remote_fs
+# Should-Start: $network
+# Should-Stop: $network
+# Default-Start: 2 3 4 5
+# Default-Stop:
+# Short-Description: Real-time charts for system monitoring
+# Description: Netdata is a daemon that collects data in realtime (per second)
+# and presents a web site to view and analyze them. The presentation
+# is also real-time and full of interactive charts that precisely
+# render all collected values.
+### END INIT INFO
+
+PATH=/bin:/usr/bin:/sbin:/usr/sbin
+DESC="netdata daemon"
+NAME=netdata
+DAEMON=/usr/sbin/netdata
+PIDFILE=/var/run/netdata/netdata.pid
+SCRIPTNAME=/etc/init.d/"$NAME"
+
+test -f $DAEMON || exit 0
+
+. /lib/lsb/init-functions
+
+[ -r /etc/default/netdata ] && . /etc/default/netdata
+
+case "$1" in
+start) log_daemon_msg "Starting real-time system monitoring" "netdata"
+ start_daemon -p $PIDFILE $DAEMON $EXTRA_OPTS
+ log_end_msg $?
+ ;;
+stop) log_daemon_msg "Stopping real-time system monitoring" "netdata"
+ killproc -p $PIDFILE $DAEMON
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && [ -e "$PIDFILE" ] && rm -f $PIDFILE
+ log_end_msg $RETVAL
+ # wait for plugins to exit
+ sleep 1
+ ;;
+restart|force-reload) log_daemon_msg "Restarting real-time system monitoring" "netdata"
+ $0 stop
+ $0 start
+ ;;
+status)
+ status_of_proc -p $PIDFILE $DAEMON $NAME && exit 0 || exit $?
+ ;;
+*) log_action_msg "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}"
+ exit 2
+ ;;
+esac
+exit 0
diff --git a/contrib/debian/netdata.install b/contrib/debian/netdata.install
new file mode 100644
index 00000000..45d42b63
--- /dev/null
+++ b/contrib/debian/netdata.install
@@ -0,0 +1 @@
+debian/netdata.conf /etc/netdata/
diff --git a/contrib/debian/netdata.lintian-overrides b/contrib/debian/netdata.lintian-overrides
new file mode 100644
index 00000000..45b2d868
--- /dev/null
+++ b/contrib/debian/netdata.lintian-overrides
@@ -0,0 +1,16 @@
+
+# See Debian policy 10.9. apps.plugin has extra capabilities, so don't let
+# normal users run it.
+netdata: non-standard-executable-perm usr/lib/*/netdata/plugins.d/apps.plugin 0754 != 0755
+
+
+# FontAwesome is at least in the fonts-font-awesome package, but this is
+# not available in wheezy. glyphicons-halflings-regular isn't currently in
+# a Debian package. Therefore don't complain about shipping them with netdata
+# for the time being.
+netdata: duplicate-font-file usr/share/netdata/fonts/*
+netdata: font-in-non-font-package usr/share/netdata/fonts/*
+
+# Files here are marked as conffiles so that local updates to the html files
+# isn't clobbered on upgrade.
+netdata: non-etc-file-marked-as-conffile var/lib/netdata/www/*
diff --git a/contrib/debian/netdata.postinst.in b/contrib/debian/netdata.postinst.in
new file mode 100644
index 00000000..29615f54
--- /dev/null
+++ b/contrib/debian/netdata.postinst.in
@@ -0,0 +1,41 @@
+#! /bin/sh
+
+set -e
+
+case "$1" in
+ configure)
+ if [ -z "$2" ]; then
+ if ! getent group netdata >/dev/null; then
+ addgroup --quiet --system netdata
+ fi
+
+ if ! getent passwd netdata >/dev/null; then
+ adduser --quiet --system --ingroup netdata --home /var/lib/netdata --no-create-home netdata
+ fi
+
+ if ! dpkg-statoverride --list /var/lib/netdata >/dev/null 2>&1; then
+ dpkg-statoverride --update --add netdata netdata 0755 /var/lib/netdata
+ fi
+
+ if ! dpkg-statoverride --list /var/lib/netdata/www >/dev/null 2>&1; then
+ dpkg-statoverride --update --add root netdata 0755 /var/lib/netdata/www
+ fi
+
+ if ! dpkg-statoverride --list /var/cache/netdata >/dev/null 2>&1; then
+ dpkg-statoverride --update --add netdata netdata 0755 /var/cache/netdata
+ fi
+
+ fi
+
+ dpkg-statoverride --update --add --force root netdata 0775 /var/lib/netdata/registry
+ chown -R root:netdata /usr/share/netdata/*
+ chown -R root:netdata /usr/lib/@DEB_HOST_MULTIARCH@/netdata/plugins.d
+ setcap cap_dac_read_search,cap_sys_ptrace+ep /usr/lib/@DEB_HOST_MULTIARCH@/netdata/plugins.d/apps.plugin
+
+#PERMS#
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata.postrm b/contrib/debian/netdata.postrm
new file mode 100644
index 00000000..4ab4eead
--- /dev/null
+++ b/contrib/debian/netdata.postrm
@@ -0,0 +1,43 @@
+#! /bin/sh
+
+set -e
+
+case "$1" in
+ remove)
+ ;;
+
+ purge)
+ if dpkg-statoverride --list | grep -qw /var/cache/netdata; then
+ dpkg-statoverride --remove /var/cache/netdata
+ fi
+
+ if dpkg-statoverride --list | grep -qw /var/lib/netdata/www; then
+ dpkg-statoverride --remove /var/lib/netdata/www
+ fi
+
+ if dpkg-statoverride --list | grep -qw /var/lib/netdata; then
+ dpkg-statoverride --remove /var/lib/netdata
+ fi
+
+ if getent passwd netdata >/dev/null; then
+ if [ -x /usr/sbin/deluser ]; then
+ deluser --quiet --system netdata || echo "Unable to remove netdata user"
+ fi
+ fi
+
+ if getent group netdata >/dev/null; then
+ if [ -x /usr/sbin/delgroup ]; then
+ delgroup --quiet --system netdata || echo "Unable to remove netdata group"
+ fi
+ fi
+
+ ;;
+
+ *)
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
+
diff --git a/contrib/debian/netdata.service b/contrib/debian/netdata.service
new file mode 100644
index 00000000..e5d3a386
--- /dev/null
+++ b/contrib/debian/netdata.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=netdata real-time system monitoring
+After=network.target
+
+[Service]
+Type=simple
+EnvironmentFile=-/etc/default/netdata
+ExecStart=/usr/sbin/netdata -D $EXTRA_OPTS
+TimeoutStopSec=30
+Restart=always
+RestartSec=5
+
+[Install]
+WantedBy=multi-user.target
diff --git a/contrib/debian/rules b/contrib/debian/rules
new file mode 100755
index 00000000..ec4ec418
--- /dev/null
+++ b/contrib/debian/rules
@@ -0,0 +1,87 @@
+#!/usr/bin/make -f
+
+# Find the arch we are building for, as this determines
+# the location of plugins in /usr/lib
+DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture -qDEB_HOST_MULTIARCH)
+TOP = $(CURDIR)/debian/netdata
+
+%:
+ # For jessie and beyond
+ #
+ dh $@ --with autoreconf,systemd
+
+ # For wheezy or other non-systemd distributions use the following. You
+ # should also see contrib/README.md which gives details of updates to
+ # make to debian/control.
+ #
+ #dh $@ --with autoreconf
+
+override_dh_auto_configure:
+ dh_auto_configure -- --with-math --with-webdir=/var/lib/netdata/www
+
+debian/%.postinst: debian/%.postinst.in
+ sed 's/@DEB_HOST_MULTIARCH@/$(DEB_HOST_MULTIARCH)/g' $< > $@
+
+override_dh_install: debian/netdata.postinst
+ dh_install
+
+ # Remove unneeded .keep files
+ #
+ find "$(TOP)" -name .keep -exec rm '{}' ';'
+
+ # Move files that local user shouldn't be editing to /usr/share/netdata
+ #
+ mkdir -p "$(TOP)/usr/share/netdata"
+ for D in $$(find "$(TOP)/var/lib/netdata/www/" -maxdepth 1 -type d -printf '%f '); do \
+ echo Relocating $$D; \
+ mv "$(TOP)/var/lib/netdata/www/$$D" "$(TOP)/usr/share/netdata/$$D"; \
+ ln -s "/usr/share/netdata/$$D" "$(TOP)/var/lib/netdata/www/$$D"; \
+ done
+
+ # Update postinst to set correct group for www files on installation.
+ # Should probably be dpkg-statoverride really, but that gets *really*
+ # messy. We also set all web files in /var as conffiles so an upgrade
+ # doesn't splat them.
+ #
+ for D in $$(find "$(TOP)/var/lib/netdata/www/" -maxdepth 1 -type f -printf '%f '); do \
+ echo Updating postinst for $$D; \
+ sed -i "s/^#PERMS#/chgrp netdata \/var\/lib\/netdata\/www\/$$D\n#PERMS#/g" \
+ $(CURDIR)/debian/netdata.postinst; \
+ echo "/var/lib/netdata/www/$$D" >> $(CURDIR)/debian/netdata.conffiles; \
+ done
+ sed -i "/^#PERMS#/d" $(CURDIR)/debian/netdata.postinst
+
+override_dh_installdocs:
+ dh_installdocs
+
+ # Docs should not be under /usr/lib
+ #
+ mv $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/plugins.d/README.md \
+ $(TOP)/usr/share/doc/netdata/README.plugins.md
+ mv $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/charts.d/README.md \
+ $(TOP)/usr/share/doc/netdata/README.charts.md
+
+ # This doc is currently empty, so no point installing it.
+ #
+ rm $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/node.d/README.md
+
+override_dh_fixperms:
+ dh_fixperms
+
+ # apps.plugin should only be runnable by the netdata user. It will be
+ # given extra capabilities in the postinst script.
+ #
+ chmod 0754 $(TOP)/usr/lib/$(DEB_HOST_MULTIARCH)/netdata/plugins.d/apps.plugin
+
+override_dh_installlogrotate:
+ cp system/netdata.logrotate debian/netdata.logrotate
+ dh_installlogrotate
+
+override_dh_clean:
+ dh_clean
+
+ # Tidy up copied/generated files
+ #
+ -[ -r $(CURDIR)/debian/netdata.logrotate ] && rm $(CURDIR)/debian/netdata.logrotate
+ -[ -r $(CURDIR)/debian/netdata.postinst ] && rm $(CURDIR)/debian/netdata.postinst
+ -[ -r $(CURDIR)/debian/netdata.conffiles ] && rm $(CURDIR)/debian/netdata.conffiles
diff --git a/contrib/debian/source/format b/contrib/debian/source/format
new file mode 100644
index 00000000..89ae9db8
--- /dev/null
+++ b/contrib/debian/source/format
@@ -0,0 +1 @@
+3.0 (native)
diff --git a/contrib/nc-backend.sh b/contrib/nc-backend.sh
new file mode 100755
index 00000000..aac5c20b
--- /dev/null
+++ b/contrib/nc-backend.sh
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+
+MODE="${1}"
+MY_PORT="${2}"
+BACKEND_HOST="${3}"
+BACKEND_PORT="${4}"
+FILE="${NETDATA_NC_BACKEND_DIR-/tmp}/netdata-nc-backend-${MY_PORT}"
+
+log() {
+ logger --stderr --id=$$ --tag "netdata-nc-backend" "${*}"
+}
+
+mync() {
+ local ret
+
+ log "Running: nc ${*}"
+ nc "${@}"
+ ret=$?
+
+ log "nc stopped with return code ${ret}."
+
+ return ${ret}
+}
+
+listen_save_replay_forever() {
+ local file="${1}" port="${2}" real_backend_host="${3}" real_backend_port="${4}" ret delay=1 started ended
+
+ while [ 1 ]
+ do
+ log "Starting nc to listen on port ${port} and save metrics to ${file}"
+
+ started=$(date +%s)
+ mync -l -p "${port}" | tee -a -p --output-error=exit "${file}"
+ ended=$(date +%s)
+
+ if [ -s "${file}" ]
+ then
+ if [ ! -z "${real_backend_host}" -a ! -z "${real_backend_port}" ]
+ then
+ log "Attempting to send the metrics to the real backend at ${real_backend_host}:${real_backend_port}"
+
+ mync "${real_backend_host}" "${real_backend_port}" <"${file}"
+ ret=$?
+
+ if [ ${ret} -eq 0 ]
+ then
+ log "Successfuly sent the metrics to ${real_backend_host}:${real_backend_port}"
+ mv "${file}" "${file}.old"
+ touch "${file}"
+ else
+ log "Failed to send the metrics to ${real_backend_host}:${real_backend_port} (nc returned ${ret}) - appending more data to ${file}"
+ fi
+ else
+ log "No backend configured - appending more data to ${file}"
+ fi
+ fi
+
+ # prevent a CPU hungry infinite loop
+ # if nc cannot listen to port
+ if [ $((ended - started)) -lt 5 ]
+ then
+ log "nc has been stopped too fast."
+ delay=30
+ else
+ delay=1
+ fi
+
+ log "Waiting ${delay} seconds before listening again for data."
+ sleep ${delay}
+ done
+}
+
+if [ "${MODE}" = "start" ]
+ then
+
+ # start the listener, in exclusive mode
+ # only one can use the same file/port at a time
+ {
+ flock -n 9
+ if [ $? -ne 0 ]
+ then
+ log "Cannot get exclusive lock on file ${FILE}.lock - Am I running multiple times?"
+ exit 2
+ fi
+
+ # save our PID to the lock file
+ echo "$$" >"${FILE}.lock"
+
+ listen_save_replay_forever "${FILE}" ${MY_PORT} ${BACKEND_HOST} ${BACKEND_PORT}
+ ret=$?
+
+ log "listener exited."
+ exit ${ret}
+
+ } 9>>"${FILE}.lock"
+
+ # we can only get here if ${FILE}.lock cannot be created
+ log "Cannot create file ${FILE}."
+ exit 3
+
+elif [ "${MODE}" = "stop" ]
+ then
+
+ {
+ flock -n 9
+ if [ $? -ne 0 ]
+ then
+ pid=$(<${FILE}.lock)
+ log "Killing process ${pid}..."
+ kill -TERM -${pid}
+ exit 0
+ fi
+
+ log "File ${FILE}.lock has been locked by me but it shouldn't. Is a collector running?"
+ exit 4
+
+ } 9<"${FILE}.lock"
+
+ log "File ${FILE}.lock does not exist. Is a collector running?"
+ exit 5
+
+else
+
+ cat <<EOF
+Usage:
+
+ "${0}" start|stop PORT [BACKEND_HOST BACKEND_PORT]
+
+ PORT The port this script will listen
+ (configure netdata to use this as a second backend)
+
+ BACKEND_HOST The real backend host
+ BACKEND_PORT The real backend port
+
+ This script can act as fallback backend for netdata.
+ It will receive metrics from netdata, save them to
+ ${FILE}
+ and once netdata reconnects to the real-backend, this script
+ will push all metrics collected to the real-backend too and
+ wait for a failure to happen again.
+
+ Only one netdata can connect to this script at a time.
+ If you need fallback for multiple netdata, run this script
+ multiple times with different ports.
+
+ You can run me in the background with this:
+
+ screen -d -m "${0}" start PORT [BACKEND_HOST BACKEND_PORT]
+EOF
+ exit 1
+fi
diff --git a/coverity-scan.sh b/coverity-scan.sh
index 8ea43345..46a0c809 100755
--- a/coverity-scan.sh
+++ b/coverity-scan.sh
@@ -1,5 +1,8 @@
#/bin/bash
+cpus=$(grep ^processor </proc/cpuinfo| wc -l)
+[ -z "${cpus}" ] && cpus=1
+
token=
[ -f .coverity-token ] && token="$(<.coverity-token)"
[ -z "${token}" ] && \
@@ -30,7 +33,7 @@ make clean || exit 1
[ -f netdata-coverity-analysis.tgz ] && \
rm netdata-coverity-analysis.tgz
-"${covbuild}" --dir cov-int make -j4 || exit 1
+"${covbuild}" --dir cov-int make -j${cpus} || exit 1
tar czvf netdata-coverity-analysis.tgz cov-int || exit 1
diff --git a/depcomp b/depcomp
index bd0ac089..4ebd5b3a 100755
--- a/depcomp
+++ b/depcomp
@@ -1,10 +1,9 @@
#! /bin/sh
# depcomp - compile a program generating dependencies as side-effects
-scriptversion=2011-12-04.11; # UTC
+scriptversion=2013-05-30.07; # UTC
-# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006, 2007, 2009, 2010,
-# 2011 Free Software Foundation, Inc.
+# Copyright (C) 1999-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -28,9 +27,9 @@ scriptversion=2011-12-04.11; # UTC
case $1 in
'')
- echo "$0: No command. Try \`$0 --help' for more information." 1>&2
- exit 1;
- ;;
+ echo "$0: No command. Try '$0 --help' for more information." 1>&2
+ exit 1;
+ ;;
-h | --h*)
cat <<\EOF
Usage: depcomp [--help] [--version] PROGRAM [ARGS]
@@ -40,8 +39,8 @@ as side-effects.
Environment variables:
depmode Dependency tracking mode.
- source Source file read by `PROGRAMS ARGS'.
- object Object file output by `PROGRAMS ARGS'.
+ source Source file read by 'PROGRAMS ARGS'.
+ object Object file output by 'PROGRAMS ARGS'.
DEPDIR directory where to store dependencies.
depfile Dependency file to output.
tmpdepfile Temporary file to use when outputting dependencies.
@@ -57,6 +56,66 @@ EOF
;;
esac
+# Get the directory component of the given path, and save it in the
+# global variables '$dir'. Note that this directory component will
+# be either empty or ending with a '/' character. This is deliberate.
+set_dir_from ()
+{
+ case $1 in
+ */*) dir=`echo "$1" | sed -e 's|/[^/]*$|/|'`;;
+ *) dir=;;
+ esac
+}
+
+# Get the suffix-stripped basename of the given path, and save it the
+# global variable '$base'.
+set_base_from ()
+{
+ base=`echo "$1" | sed -e 's|^.*/||' -e 's/\.[^.]*$//'`
+}
+
+# If no dependency file was actually created by the compiler invocation,
+# we still have to create a dummy depfile, to avoid errors with the
+# Makefile "include basename.Plo" scheme.
+make_dummy_depfile ()
+{
+ echo "#dummy" > "$depfile"
+}
+
+# Factor out some common post-processing of the generated depfile.
+# Requires the auxiliary global variable '$tmpdepfile' to be set.
+aix_post_process_depfile ()
+{
+ # If the compiler actually managed to produce a dependency file,
+ # post-process it.
+ if test -f "$tmpdepfile"; then
+ # Each line is of the form 'foo.o: dependency.h'.
+ # Do two passes, one to just change these to
+ # $object: dependency.h
+ # and one to simply output
+ # dependency.h:
+ # which is needed to avoid the deleted-header problem.
+ { sed -e "s,^.*\.[$lower]*:,$object:," < "$tmpdepfile"
+ sed -e "s,^.*\.[$lower]*:[$tab ]*,," -e 's,$,:,' < "$tmpdepfile"
+ } > "$depfile"
+ rm -f "$tmpdepfile"
+ else
+ make_dummy_depfile
+ fi
+}
+
+# A tabulation character.
+tab=' '
+# A newline character.
+nl='
+'
+# Character ranges might be problematic outside the C locale.
+# These definitions help.
+upper=ABCDEFGHIJKLMNOPQRSTUVWXYZ
+lower=abcdefghijklmnopqrstuvwxyz
+digits=0123456789
+alpha=${upper}${lower}
+
if test -z "$depmode" || test -z "$source" || test -z "$object"; then
echo "depcomp: Variables source, object and depmode must be set" 1>&2
exit 1
@@ -69,6 +128,9 @@ tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`}
rm -f "$tmpdepfile"
+# Avoid interferences from the environment.
+gccflag= dashmflag=
+
# Some modes work just like other modes, but use different flags. We
# parameterize here, but still list the modes in the big case below,
# to make depend.m4 easier to write. Note that we *cannot* use a case
@@ -80,26 +142,32 @@ if test "$depmode" = hp; then
fi
if test "$depmode" = dashXmstdout; then
- # This is just like dashmstdout with a different argument.
- dashmflag=-xM
- depmode=dashmstdout
+ # This is just like dashmstdout with a different argument.
+ dashmflag=-xM
+ depmode=dashmstdout
fi
cygpath_u="cygpath -u -f -"
if test "$depmode" = msvcmsys; then
- # This is just like msvisualcpp but w/o cygpath translation.
- # Just convert the backslash-escaped backslashes to single forward
- # slashes to satisfy depend.m4
- cygpath_u='sed s,\\\\,/,g'
- depmode=msvisualcpp
+ # This is just like msvisualcpp but w/o cygpath translation.
+ # Just convert the backslash-escaped backslashes to single forward
+ # slashes to satisfy depend.m4
+ cygpath_u='sed s,\\\\,/,g'
+ depmode=msvisualcpp
fi
if test "$depmode" = msvc7msys; then
- # This is just like msvc7 but w/o cygpath translation.
- # Just convert the backslash-escaped backslashes to single forward
- # slashes to satisfy depend.m4
- cygpath_u='sed s,\\\\,/,g'
- depmode=msvc7
+ # This is just like msvc7 but w/o cygpath translation.
+ # Just convert the backslash-escaped backslashes to single forward
+ # slashes to satisfy depend.m4
+ cygpath_u='sed s,\\\\,/,g'
+ depmode=msvc7
+fi
+
+if test "$depmode" = xlc; then
+ # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency information.
+ gccflag=-qmakedep=gcc,-MF
+ depmode=gcc
fi
case "$depmode" in
@@ -122,8 +190,7 @@ gcc3)
done
"$@"
stat=$?
- if test $stat -eq 0; then :
- else
+ if test $stat -ne 0; then
rm -f "$tmpdepfile"
exit $stat
fi
@@ -131,13 +198,17 @@ gcc3)
;;
gcc)
+## Note that this doesn't just cater to obsosete pre-3.x GCC compilers.
+## but also to in-use compilers like IMB xlc/xlC and the HP C compiler.
+## (see the conditional assignment to $gccflag above).
## There are various ways to get dependency output from gcc. Here's
## why we pick this rather obscure method:
## - Don't want to use -MD because we'd like the dependencies to end
## up in a subdir. Having to rename by hand is ugly.
## (We might end up doing this anyway to support other compilers.)
## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like
-## -MM, not -M (despite what the docs say).
+## -MM, not -M (despite what the docs say). Also, it might not be
+## supported by the other compilers which use the 'gcc' depmode.
## - Using -M directly means running the compiler twice (even worse
## than renaming).
if test -z "$gccflag"; then
@@ -145,33 +216,31 @@ gcc)
fi
"$@" -Wp,"$gccflag$tmpdepfile"
stat=$?
- if test $stat -eq 0; then :
- else
+ if test $stat -ne 0; then
rm -f "$tmpdepfile"
exit $stat
fi
rm -f "$depfile"
echo "$object : \\" > "$depfile"
- alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
-## The second -e expression handles DOS-style file names with drive letters.
+ # The second -e expression handles DOS-style file names with drive
+ # letters.
sed -e 's/^[^:]*: / /' \
-e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile"
-## This next piece of magic avoids the `deleted header file' problem.
+## This next piece of magic avoids the "deleted header file" problem.
## The problem is that when a header file which appears in a .P file
## is deleted, the dependency causes make to die (because there is
## typically no way to rebuild the header). We avoid this by adding
## dummy dependencies for each header file. Too bad gcc doesn't do
## this for us directly.
- tr ' ' '
-' < "$tmpdepfile" |
-## Some versions of gcc put a space before the `:'. On the theory
+## Some versions of gcc put a space before the ':'. On the theory
## that the space means something, we add a space to the output as
## well. hp depmode also adds that space, but also prefixes the VPATH
## to the object. Take care to not repeat it in the output.
## Some versions of the HPUX 10.20 sed can't process this invocation
## correctly. Breaking it into two sed invocations is a workaround.
- sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \
- | sed -e 's/$/ :/' >> "$depfile"
+ tr ' ' "$nl" < "$tmpdepfile" \
+ | sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \
+ | sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile"
;;
@@ -189,8 +258,7 @@ sgi)
"$@" -MDupdate "$tmpdepfile"
fi
stat=$?
- if test $stat -eq 0; then :
- else
+ if test $stat -ne 0; then
rm -f "$tmpdepfile"
exit $stat
fi
@@ -198,43 +266,41 @@ sgi)
if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files
echo "$object : \\" > "$depfile"
-
# Clip off the initial element (the dependent). Don't try to be
# clever and replace this with sed code, as IRIX sed won't handle
# lines with more than a fixed number of characters (4096 in
# IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines;
- # the IRIX cc adds comments like `#:fec' to the end of the
+ # the IRIX cc adds comments like '#:fec' to the end of the
# dependency line.
- tr ' ' '
-' < "$tmpdepfile" \
- | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \
- tr '
-' ' ' >> "$depfile"
+ tr ' ' "$nl" < "$tmpdepfile" \
+ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' \
+ | tr "$nl" ' ' >> "$depfile"
echo >> "$depfile"
-
# The second pass generates a dummy entry for each header file.
- tr ' ' '
-' < "$tmpdepfile" \
- | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \
- >> "$depfile"
+ tr ' ' "$nl" < "$tmpdepfile" \
+ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \
+ >> "$depfile"
else
- # The sourcefile does not contain any dependencies, so just
- # store a dummy comment line, to avoid errors with the Makefile
- # "include basename.Plo" scheme.
- echo "#dummy" > "$depfile"
+ make_dummy_depfile
fi
rm -f "$tmpdepfile"
;;
+xlc)
+ # This case exists only to let depend.m4 do its work. It works by
+ # looking at the text of this script. This case will never be run,
+ # since it is checked for above.
+ exit 1
+ ;;
+
aix)
# The C for AIX Compiler uses -M and outputs the dependencies
# in a .u file. In older versions, this file always lives in the
- # current directory. Also, the AIX compiler puts `$object:' at the
+ # current directory. Also, the AIX compiler puts '$object:' at the
# start of each line; $object doesn't have directory information.
# Version 6 uses the directory in both cases.
- dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
- test "x$dir" = "x$object" && dir=
- base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+ set_dir_from "$object"
+ set_base_from "$object"
if test "$libtool" = yes; then
tmpdepfile1=$dir$base.u
tmpdepfile2=$base.u
@@ -247,9 +313,7 @@ aix)
"$@" -M
fi
stat=$?
-
- if test $stat -eq 0; then :
- else
+ if test $stat -ne 0; then
rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
exit $stat
fi
@@ -258,44 +322,100 @@ aix)
do
test -f "$tmpdepfile" && break
done
- if test -f "$tmpdepfile"; then
- # Each line is of the form `foo.o: dependent.h'.
- # Do two passes, one to just change these to
- # `$object: dependent.h' and one to simply `dependent.h:'.
- sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
- # That's a tab and a space in the [].
- sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
- else
- # The sourcefile does not contain any dependencies, so just
- # store a dummy comment line, to avoid errors with the Makefile
- # "include basename.Plo" scheme.
- echo "#dummy" > "$depfile"
+ aix_post_process_depfile
+ ;;
+
+tcc)
+ # tcc (Tiny C Compiler) understand '-MD -MF file' since version 0.9.26
+ # FIXME: That version still under development at the moment of writing.
+ # Make that this statement remains true also for stable, released
+ # versions.
+ # It will wrap lines (doesn't matter whether long or short) with a
+ # trailing '\', as in:
+ #
+ # foo.o : \
+ # foo.c \
+ # foo.h \
+ #
+ # It will put a trailing '\' even on the last line, and will use leading
+ # spaces rather than leading tabs (at least since its commit 0394caf7
+ # "Emit spaces for -MD").
+ "$@" -MD -MF "$tmpdepfile"
+ stat=$?
+ if test $stat -ne 0; then
+ rm -f "$tmpdepfile"
+ exit $stat
fi
+ rm -f "$depfile"
+ # Each non-empty line is of the form 'foo.o : \' or ' dep.h \'.
+ # We have to change lines of the first kind to '$object: \'.
+ sed -e "s|.*:|$object :|" < "$tmpdepfile" > "$depfile"
+ # And for each line of the second kind, we have to emit a 'dep.h:'
+ # dummy dependency, to avoid the deleted-header problem.
+ sed -n -e 's|^ *\(.*\) *\\$|\1:|p' < "$tmpdepfile" >> "$depfile"
rm -f "$tmpdepfile"
;;
-icc)
- # Intel's C compiler understands `-MD -MF file'. However on
- # icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c
- # ICC 7.0 will fill foo.d with something like
- # foo.o: sub/foo.c
- # foo.o: sub/foo.h
- # which is wrong. We want:
- # sub/foo.o: sub/foo.c
- # sub/foo.o: sub/foo.h
- # sub/foo.c:
- # sub/foo.h:
- # ICC 7.1 will output
+## The order of this option in the case statement is important, since the
+## shell code in configure will try each of these formats in the order
+## listed in this file. A plain '-MD' option would be understood by many
+## compilers, so we must ensure this comes after the gcc and icc options.
+pgcc)
+ # Portland's C compiler understands '-MD'.
+ # Will always output deps to 'file.d' where file is the root name of the
+ # source file under compilation, even if file resides in a subdirectory.
+ # The object file name does not affect the name of the '.d' file.
+ # pgcc 10.2 will output
# foo.o: sub/foo.c sub/foo.h
- # and will wrap long lines using \ :
+ # and will wrap long lines using '\' :
# foo.o: sub/foo.c ... \
# sub/foo.h ... \
# ...
+ set_dir_from "$object"
+ # Use the source, not the object, to determine the base name, since
+ # that's sadly what pgcc will do too.
+ set_base_from "$source"
+ tmpdepfile=$base.d
+
+ # For projects that build the same source file twice into different object
+ # files, the pgcc approach of using the *source* file root name can cause
+ # problems in parallel builds. Use a locking strategy to avoid stomping on
+ # the same $tmpdepfile.
+ lockdir=$base.d-lock
+ trap "
+ echo '$0: caught signal, cleaning up...' >&2
+ rmdir '$lockdir'
+ exit 1
+ " 1 2 13 15
+ numtries=100
+ i=$numtries
+ while test $i -gt 0; do
+ # mkdir is a portable test-and-set.
+ if mkdir "$lockdir" 2>/dev/null; then
+ # This process acquired the lock.
+ "$@" -MD
+ stat=$?
+ # Release the lock.
+ rmdir "$lockdir"
+ break
+ else
+ # If the lock is being held by a different process, wait
+ # until the winning process is done or we timeout.
+ while test -d "$lockdir" && test $i -gt 0; do
+ sleep 1
+ i=`expr $i - 1`
+ done
+ fi
+ i=`expr $i - 1`
+ done
+ trap - 1 2 13 15
+ if test $i -le 0; then
+ echo "$0: failed to acquire lock after $numtries attempts" >&2
+ echo "$0: check lockdir '$lockdir'" >&2
+ exit 1
+ fi
- "$@" -MD -MF "$tmpdepfile"
- stat=$?
- if test $stat -eq 0; then :
- else
+ if test $stat -ne 0; then
rm -f "$tmpdepfile"
exit $stat
fi
@@ -307,8 +427,8 @@ icc)
sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile"
# Some versions of the HPUX 10.20 sed can't process this invocation
# correctly. Breaking it into two sed invocations is a workaround.
- sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" |
- sed -e 's/$/ :/' >> "$depfile"
+ sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" \
+ | sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile"
;;
@@ -319,9 +439,8 @@ hp2)
# 'foo.d', which lands next to the object file, wherever that
# happens to be.
# Much of this is similar to the tru64 case; see comments there.
- dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
- test "x$dir" = "x$object" && dir=
- base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+ set_dir_from "$object"
+ set_base_from "$object"
if test "$libtool" = yes; then
tmpdepfile1=$dir$base.d
tmpdepfile2=$dir.libs/$base.d
@@ -332,8 +451,7 @@ hp2)
"$@" +Maked
fi
stat=$?
- if test $stat -eq 0; then :
- else
+ if test $stat -ne 0; then
rm -f "$tmpdepfile1" "$tmpdepfile2"
exit $stat
fi
@@ -343,77 +461,61 @@ hp2)
test -f "$tmpdepfile" && break
done
if test -f "$tmpdepfile"; then
- sed -e "s,^.*\.[a-z]*:,$object:," "$tmpdepfile" > "$depfile"
- # Add `dependent.h:' lines.
+ sed -e "s,^.*\.[$lower]*:,$object:," "$tmpdepfile" > "$depfile"
+ # Add 'dependent.h:' lines.
sed -ne '2,${
- s/^ *//
- s/ \\*$//
- s/$/:/
- p
- }' "$tmpdepfile" >> "$depfile"
+ s/^ *//
+ s/ \\*$//
+ s/$/:/
+ p
+ }' "$tmpdepfile" >> "$depfile"
else
- echo "#dummy" > "$depfile"
+ make_dummy_depfile
fi
rm -f "$tmpdepfile" "$tmpdepfile2"
;;
tru64)
- # The Tru64 compiler uses -MD to generate dependencies as a side
- # effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'.
- # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put
- # dependencies in `foo.d' instead, so we check for that too.
- # Subdirectories are respected.
- dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
- test "x$dir" = "x$object" && dir=
- base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
-
- if test "$libtool" = yes; then
- # With Tru64 cc, shared objects can also be used to make a
- # static library. This mechanism is used in libtool 1.4 series to
- # handle both shared and static libraries in a single compilation.
- # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d.
- #
- # With libtool 1.5 this exception was removed, and libtool now
- # generates 2 separate objects for the 2 libraries. These two
- # compilations output dependencies in $dir.libs/$base.o.d and
- # in $dir$base.o.d. We have to check for both files, because
- # one of the two compilations can be disabled. We should prefer
- # $dir$base.o.d over $dir.libs/$base.o.d because the latter is
- # automatically cleaned when .libs/ is deleted, while ignoring
- # the former would cause a distcleancheck panic.
- tmpdepfile1=$dir.libs/$base.lo.d # libtool 1.4
- tmpdepfile2=$dir$base.o.d # libtool 1.5
- tmpdepfile3=$dir.libs/$base.o.d # libtool 1.5
- tmpdepfile4=$dir.libs/$base.d # Compaq CCC V6.2-504
- "$@" -Wc,-MD
- else
- tmpdepfile1=$dir$base.o.d
- tmpdepfile2=$dir$base.d
- tmpdepfile3=$dir$base.d
- tmpdepfile4=$dir$base.d
- "$@" -MD
- fi
-
- stat=$?
- if test $stat -eq 0; then :
- else
- rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4"
- exit $stat
- fi
-
- for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4"
- do
- test -f "$tmpdepfile" && break
- done
- if test -f "$tmpdepfile"; then
- sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
- # That's a tab and a space in the [].
- sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
- else
- echo "#dummy" > "$depfile"
- fi
- rm -f "$tmpdepfile"
- ;;
+ # The Tru64 compiler uses -MD to generate dependencies as a side
+ # effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'.
+ # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put
+ # dependencies in 'foo.d' instead, so we check for that too.
+ # Subdirectories are respected.
+ set_dir_from "$object"
+ set_base_from "$object"
+
+ if test "$libtool" = yes; then
+ # Libtool generates 2 separate objects for the 2 libraries. These
+ # two compilations output dependencies in $dir.libs/$base.o.d and
+ # in $dir$base.o.d. We have to check for both files, because
+ # one of the two compilations can be disabled. We should prefer
+ # $dir$base.o.d over $dir.libs/$base.o.d because the latter is
+ # automatically cleaned when .libs/ is deleted, while ignoring
+ # the former would cause a distcleancheck panic.
+ tmpdepfile1=$dir$base.o.d # libtool 1.5
+ tmpdepfile2=$dir.libs/$base.o.d # Likewise.
+ tmpdepfile3=$dir.libs/$base.d # Compaq CCC V6.2-504
+ "$@" -Wc,-MD
+ else
+ tmpdepfile1=$dir$base.d
+ tmpdepfile2=$dir$base.d
+ tmpdepfile3=$dir$base.d
+ "$@" -MD
+ fi
+
+ stat=$?
+ if test $stat -ne 0; then
+ rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+ exit $stat
+ fi
+
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+ do
+ test -f "$tmpdepfile" && break
+ done
+ # Same post-processing that is required for AIX mode.
+ aix_post_process_depfile
+ ;;
msvc7)
if test "$libtool" = yes; then
@@ -424,8 +526,7 @@ msvc7)
"$@" $showIncludes > "$tmpdepfile"
stat=$?
grep -v '^Note: including file: ' "$tmpdepfile"
- if test "$stat" = 0; then :
- else
+ if test $stat -ne 0; then
rm -f "$tmpdepfile"
exit $stat
fi
@@ -443,14 +544,15 @@ msvc7)
p
}' | $cygpath_u | sort -u | sed -n '
s/ /\\ /g
-s/\(.*\)/ \1 \\/p
+s/\(.*\)/'"$tab"'\1 \\/p
s/.\(.*\) \\/\1:/
H
$ {
- s/.*/ /
+ s/.*/'"$tab"'/
G
p
}' >> "$depfile"
+ echo >> "$depfile" # make sure the fragment doesn't end with a backslash
rm -f "$tmpdepfile"
;;
@@ -478,7 +580,7 @@ dashmstdout)
shift
fi
- # Remove `-o $object'.
+ # Remove '-o $object'.
IFS=" "
for arg
do
@@ -498,18 +600,18 @@ dashmstdout)
done
test -z "$dashmflag" && dashmflag=-M
- # Require at least two characters before searching for `:'
+ # Require at least two characters before searching for ':'
# in the target name. This is to cope with DOS-style filenames:
- # a dependency such as `c:/foo/bar' could be seen as target `c' otherwise.
+ # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise.
"$@" $dashmflag |
- sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile"
+ sed "s|^[$tab ]*[^:$tab ][^:][^:]*:[$tab ]*|$object: |" > "$tmpdepfile"
rm -f "$depfile"
cat < "$tmpdepfile" > "$depfile"
- tr ' ' '
-' < "$tmpdepfile" | \
-## Some versions of the HPUX 10.20 sed can't process this invocation
-## correctly. Breaking it into two sed invocations is a workaround.
- sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+ # Some versions of the HPUX 10.20 sed can't process this sed invocation
+ # correctly. Breaking it into two sed invocations is a workaround.
+ tr ' ' "$nl" < "$tmpdepfile" \
+ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \
+ | sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile"
;;
@@ -562,11 +664,12 @@ makedepend)
# makedepend may prepend the VPATH from the source file name to the object.
# No need to regex-escape $object, excess matching of '.' is harmless.
sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile"
- sed '1,2d' "$tmpdepfile" | tr ' ' '
-' | \
-## Some versions of the HPUX 10.20 sed can't process this invocation
-## correctly. Breaking it into two sed invocations is a workaround.
- sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+ # Some versions of the HPUX 10.20 sed can't process the last invocation
+ # correctly. Breaking it into two sed invocations is a workaround.
+ sed '1,2d' "$tmpdepfile" \
+ | tr ' ' "$nl" \
+ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \
+ | sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile" "$tmpdepfile".bak
;;
@@ -583,7 +686,7 @@ cpp)
shift
fi
- # Remove `-o $object'.
+ # Remove '-o $object'.
IFS=" "
for arg
do
@@ -602,10 +705,10 @@ cpp)
esac
done
- "$@" -E |
- sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
- -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' |
- sed '$ s: \\$::' > "$tmpdepfile"
+ "$@" -E \
+ | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
+ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
+ | sed '$ s: \\$::' > "$tmpdepfile"
rm -f "$depfile"
echo "$object : \\" > "$depfile"
cat < "$tmpdepfile" >> "$depfile"
@@ -637,23 +740,23 @@ msvisualcpp)
shift
;;
"-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI")
- set fnord "$@"
- shift
- shift
- ;;
+ set fnord "$@"
+ shift
+ shift
+ ;;
*)
- set fnord "$@" "$arg"
- shift
- shift
- ;;
+ set fnord "$@" "$arg"
+ shift
+ shift
+ ;;
esac
done
"$@" -E 2>/dev/null |
sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile"
rm -f "$depfile"
echo "$object : \\" > "$depfile"
- sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile"
- echo " " >> "$depfile"
+ sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile"
+ echo "$tab" >> "$depfile"
sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile"
rm -f "$tmpdepfile"
;;
diff --git a/diagrams/netdata-overview.xml b/diagrams/netdata-overview.xml
new file mode 100644
index 00000000..d8a0ab1b
--- /dev/null
+++ b/diagrams/netdata-overview.xml
@@ -0,0 +1 @@
+<mxfile userAgent="Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0" version="6.8.10" editor="www.draw.io" type="github"><diagram name="Page-1" id="6533187f-1b6b-8515-a257-c05e29b4d991">7X1Jc9tI2uZvmYMiqg5Q5L4cvZSqKuJzt6ftnp4+VSQSCQlVJMEGSMvqw/z2yQQBCkuSBKnkIsp2hE2CJJZ8n3z35QZ/mH7/tVDzh095YiY3CCTfb/DHG4QQ4MD+5448rY5ACNDqyH2RJfWx5wNfsv+a+mD9w/tllpiy88VFnk8W2bx7UOezmdGLzjFVFPlj92tpPuleda7uzeDAF60mw6P/ypLFQ30UA/D8wW8mu39YNI/cfBIr/dd9kS9n9QVvEE6rP6uPp6o5Wf398kEl+WPrEP7lBn8o8nyxejX9/sFM3Oo267b63d2GT9c3XpjZwvODf5am+Hv8p1szBCYqtoSrb9P9bJLN/lq9f1gs3FK/cz9Ed/fZ4mEZ3+p8at+kWWEecnvRu5lZJGqh7Kt4ksf2v6kqF6awLyxd0tvEvpg/LR7yWfWyMIsit49bmFv38eqKzX1TuX729TOVi6eGHvZnc/cym1aEe1/9/66cr2gP7BHVvEmz78ae9P03UywyS9H/cQ/5OS+zRZbP7OdxvljY53j+wrtJdu8+WOTz5sz2XXcB9CRfJrerZVjaJbQPsLALXK+IKkuzKO0LxBjCgFSvKBKMYvtSYKEkISwiOBYRhIZHkpo4QkgLHDMqONO389m9vfiQeDU93b2a761D9Ur9avKpXdYn+5X6U0bZ6ifN1qMQrw48toAs6y89tDBMahCoeu/cr8/9DB9H4xV5mrctQB0TYI/ZX9nzgSjNi+j3/GsPQ2wvDO0ESHX379f7+UM+yYvqVM2O3oShARaHcA0KM8AczABHXCS6DTMF0ghDhBICFJI8DgczCBsG1uBMcDTAmUBDmAl2+TCbmrK09ImzIolm+SJLLY0dOsoe4ISBcSzjOCWKUB5HBL5VBPIVAjmmCLQQKAAEEbX8RnOaxkYGRCDFsANALIb4s9/y8DkSAoADySvqS31Tk6Vpg7EFgMeHbGG+zJV27x+t7mSX4mExddiF9mVpBeRfpkXlD/wj/MURK80mk+b4LJ858NwXKsns8rW//uHO/nFft1SrlSrL/S0canBo+30rn/H7RJUPDhfVZeemyOwKmMLdWeboUwEkd+8WbnWxe1shsPqNe7dWW9yb+4kFRv3aAiXTzZeq52kUKFxf97Na2GvNqosjAMOgAXelHkVDMEDIoQcNzcE94HAk5rMojGkrUc+6E54/zNP5dAf34ftxn1eiQ1UvBWBJgpVpCzegk8hoKg03EiexCMhaeqKNED5EE+IeMOHjyjY2WdS7276+X1RPuzoWNwfeff60PlasD87nk1qE2VN+sreU6bL5mlPf+6dzCPVcJcm+NYfKhVqUiVvpIl/k2kF5/dPW1zy//Onrh8/2d5ZpIfDPj59/3vTDEIK8ucut+wZv1xvtAWt2mhXTq3ZQhWT7fl92zrQwcdpj5/Z4ooxIdY9vEx/f3sb0AwBfyC7ypSBDmQo8MnWtDR4X+QOcrkDaohb7zzJvPojKainfOSnD5t+fP2zO8sVyXDV1Em877H07pAXVjxZysbIIQeAfprXRjgfr1mWiv1vyfsvMYxfkOIWMpQRDCKmCOBJiK8b3RbJAMWbMg2RqRELGIDmEDbJ2utRwZQx44OqxQSS/YBtkixrwTRWzrHyItNIPZjvB+X4OlesxRSCs9AXNERCqrS9gTiJIUgSNICmGAX0uEPZsEUggHSBRePhmc+y1ATFV2QTFarYDg2/VHIZohcGYo4S0MYg0imIGY82g4LFhITEIeReDCLIBBqUHg1ycQnR7dMLDpTnxSfN0biX57XyyvM9mBwj0nXprc6icO9wfepd2ez3mxV/VIlko6afW9VdnPsttTfNZtsiLri405n5CqDMV5aLPNeW2cZSVIXaoGtNWStBQebEcBGntU3cSFju/epCNSii4xRgTyYAVSbS7ZwGAQ0uzCYq1Ny07ib59gk1rZUw2n2YXv28rvXVeZJWa/+Cw+2Pj3j1fPPr986ffd9jZ6EU2yEVsXojkRW/YNuG/5u5ys3zx4LS3rdhoAWNN41NbrzifVyGWKrZn7zkqzSxxyJrlyQ5bB0K0FVnhPDiScaw8dq+B1vLlJ7N7ad/b7fFPUg8Ij+6mCQGPf5aO6n+rI72P9lfR58Ld5oNZljuAALYD4bW6wEVlTkhugGlH16RUMEpBrFNtbd1E43DmhAV6B2KEejyBxGNN0EsO726xaM33bLodXHy77vlasSVX2BKYG9HGlkWVVbdJzBLB01jTgKYq6JuquHHStdDFPfEVemRT9VjgSvJvRlu1bSu+2Hb16JXiy+lCFl8xgyJu56YIjtMoxkhhJiSIcRrSFQJoH19DdxwEvhyoIwfwXgSwjUl2TkNavVBTk1TZdbfTZAfa9uNmV+Sbq4Spc9lK0vEP6xhFqSE6Saw+mZqQ/mGAekl5yJOU18TaOv7hILkqp2d4ar47QsHoG0VgRTsBMU0IbYtcQaCMlFunRFOFEhAwowH1GCIdRspgw/w6zuEgAYpBstTAr1fvjxckTwVYJd4z7tcu9SMY97uX5OX5ZAGWhPWXBJxzScglLIndy5e0JvgS1oTA/po0jrKzrAm6iDUB/TWB51wTeAlrguVg75xzTcBFrInorQmQZ1wTfBGSmMH+kngMuJMtyUVIYtQvuAByGN8/2prAl4uZfsCGur8+Jzur/gQCEumumqBDWU2px9UE8f6rNi5xd3ekzz6zL9LXpO2CZJ1wODZbd1ys8bF+fncPs7yYqsnwLvYO+1Lfs/yUF/dqZr+UOIy5W3rIS2cs2SUF+kEV7jVQM/dxkk3NrHShoZ93PPAJI5+fzDQvnqJ/mP8s7ZfsDS56oQnUguWLYp2ieV//2reVUsM2xD65jAE4DgPyZVty7uE/8iRZ8R60vyw7eJDxsEpRKJ2DY+/M+T4Gj3KD5rsrrHG7FtT3al+9+/z7Aff7kpv15ilYkLlfp46tLFb7PP/m3EZgns1NuefyhdjCv9SrVSce9UOLofavJ1p8GtE3RvIJz3Y9RPKN8fn4jPc9kSVGbYNs1mwDpqaOMrO4nHfBNNgG6wMrjACdTyb2UVYFM6ffQt79/pOLhTu6JEmUzyaO1EprU7pdvnDJHgvncl1rByOk5WBPXbg214M0Zz4NmHkwzfdP+A+lzI3k3f+TzZbfw6DsZQllng1wCXluP01VtsK8izQUuf55z/sKITJ+n20TGdvjGScwmnt1i4J5/JK+sHqY3LQBw/d5Eo6QV+rdX3eFMe+/fNzNAs+VT3rx26x8KvVicsguOynoBea7QX8srwg/I8I/Kf33Lz/wfe34lvSM+JYefPdWoJ0t2+4Jse4wUWUm9bNg+4H4aZYk7ozrBN3p93vXWux21cwLrf53p3XLAG6pu17VbwzKW8orpBQ1zlyHjMItzftZvtAP9S3sny7uqbVO00SG0mUx7iYpCTxMCfF5Uw6wzobe3M22WIV2z+5y9ntU09Ftr5qUGxVb32YKzAHX6hl4tvieHR+HMMY9GdOIZZqYdOHhQsVyNqtKNhwZneFWGJWU1ZOUmUt3B/XDbe2O0NqGlXulw3sKY9dPxdUX3AaY55lzUtpv0/c39KPblctFXtZeijGbdJUtU66axHx1bz5G5GaY8x5CqpNu3oHEQ6633gmdBL7wXvuQotzv2W+6VFyqLP9pmcxdvpeen8Po8vXwgC+ytTa139jcsCMApCHgB0ry9cFLba7hBc0qinN7CKzDAzi22sizcDgHin81M1OoSfT7LM2jKHpenU7QaHvB/J4uuZNUxNF+pF4cL1IfJioUGOdN8vXFMu/qBv8sLwr+zaJ1wP+ykOk5wI/6aW9A+DzSV4z+55zvi8X/6hbPBP/NOfId6L8s2ngO6GM+gP7xstZeos0UZpovWhaVW1pT2Afd2gCvhYDnbBtQtNt7faiItO4gBp7zc6qEFvvBZJFN10k6z9346rh31Qbv7FXWhdEm++avs0bbG+O9hiA44/CWS3tKQATkvKes+LJPCfJgVhwnQuJ1IJ/Hwfbncjr/ffau7WaD1C7dTe1nQ2zlZ6u+/5tR7sqUesjsc8Rt6hK3ua+cx/kmeeJMqDBaa497Se7xLwCfLRbCrYrEhZMdrd2rGL2E7JNs/tsGISW08QupWFBCA6WsQdALicrRNjcIQGemGbNAR4xQRTWAUeOxOkvASLu6uPmBntHzBkF4L/FDehpeCF+9+P6ZhyOIOKKS4kdU5FBKg/NFRYaURj8ofTRKi35g86yUHlHe8IPSB1Ja9h3fZ6X0iBrSH5Q+WN+Sl0TqEXVbr4HU4pZgH7V1goSX2k3Rh5fainY++9i6sFHlIgwQ+skNnJwTByNKGn/g4Cg4YPCScHAl6UwQvD4gQNCMB1sjYdhQ7mRIGFP4/RqQQG+RHALh/O41MsK9djpiX4m9Li6U2P1+KGOJfUgz0vMHPNV8Xl58s2pXH1PXhVXxx0vIvD5ro+p3lmqjGsyj7WNTXkOPatTvsXLiHtUjOPKV+NUuVfxieD7xO2ieciW0vlTp2++odEzpu5vYmwuiXiY8BsL8f+dfdsjZHX3uAyUXLbT9TLv5p+agxKKzFoh6w2gerIQIow16b11ODPwqZQDsV1AcTwacvr3s9Kn8z2S7Ikevs5u23Yl3AiQyJt052ylPI2GI1lqqhMCA3bRZ47Jb9y5maKhONn1Cu3ULr7R7cfmfpYXVdnjtN17xtcBrNWvZUIpFe9ayYARE9umVxgkHKQ3ZrH0wDID42mQ2Jkz43sSnh1dhkmzHIJOVJXZ18GIVvFIJUNppfO3GIrIEk5QkqVYhxyIiILrowr5mvdLXFOOSRwFsGxWfl4v7wuwCGNoLYNfTfN0FpBwGWcxwB4NMsSgRKdAxZSDVAZuvr+fkrCUoGtZIIewbzSlfJwaNNYgsrUujCqskbwUiv0JOJ6siOPsyiTW3sCLY8rcVyoxV2xIIkhiKGKU84JAJInucjuBhhG2NvBc2x7oIlLnZkc6I2jUVjL9ZXscdr4NAppR1ZjulxL4iBqdYJhqHVOdEvx6UQjGUt9w3eueVanMWPUX+/WkHBLcXeF0vBKt2bQIiSpTszjphbho7J0phQVOWhINgP1ncMsJhUeZavbsGlS+bp7tYIHmr+IMr/AlIuw4TzeNIwlTrWGgmYUCTozecE1KfxYE89uwBbtaLgF/+bT77w3XpWJZ/TPL7HUjcb+rTq9D3rKSVNZuDKew4TlKpI0mFNTENJLGRASd09osZsae2CWHhkbSvFGfOsrVE34Gv7TWrrxJf0j6cfZkgps3Kc9LYEzJOVAQSRmViJEoJDIivpmHz2mptIo3taJFHkTt2ffex4GWJqdWOGZ18v6jCFcnRFYOTmqC4K0eTNNJSImgSDlHMwwGwn8Vg9bhh8jDknqmJ7JUatLP7bLaDvbH92NsVAbCKfEGsMYpR25al2hoSFCtFTZok1AT02/UHM2Hu6e7iqzs9oAvd6QC4cYzsc0+qu3zuRq7My2qW7HZEiit04Fm4VaEKyFFsMGvzu1jTSMXcyceUWziGjIThvuXgmz/p9Z3A4+LtKPXojuuYWXJIQXorjebTuuvKu0I/VJ1MjplIWh+I4vXNb90bL8om3dwYZVMrlX4nzkFuTACY8t4sWeprnYE9TJEfmSmeqlvnu4kqpi9vo/Cb6w/kmhJ+aiVGh4DoQ3XiaNo77SaMyhf1HNzcMnNTk81TYJT2ugicGKM7k99kvYVayW8fV62p/mGSZd0uHb2MEOtEs02paacgRH9mmJcQ3BeTCl8bElh8/fb16+eb/myg3awgxA7/xy9fvkb2wtE3uGNvv6iv3GVCqufdp94c1jCdoc+vI90MRliBcYf+li+ytG7/tp+sCgFQ5URkNOvew1agbo9PvEohxEjXQUvI0H8GfWGoMPH43ULoctojeLuQ3TQtyFxN86D/2OYSqk1FV32at7uT+TK8Q0Cgl3BPpMeg87aZC1obeXrDR75NN5WsegbeUaQwsFLIOepJ7TegmkWxEBhBRWTCAjrqRdPXoAYZ8kzFQ954oziuRDwD7PhesHstvqgqKxuSNIFKtV2fENGIGqqxZpwrEzCNRzYlmjWmsPDo7cyjt9NLzuI5DFNvNeTjKjct7DSSaaI7LtDURKm2rCyBMOYoYOoOBLKHu4bmndwJX6fH6+Nl+1WYXA/uUJWhDRmQqenmUmAQgTROEmS/Q2MRUIT2YYc9sGu+0zEqg1Q47VTV1zriW9XVQ9C4F07GHleUtyvCOjM/ZPnruqDpB0VfIC363kVPBpTwGNgQ7k/RSxINELzVdGK8CssaKDjsZAGQWEaYGwV1yjRWAdM5B53LEBvWVUDssa8IO5FwuJwpAqFbmp6/jp72VAPmmUR4ql4qEFxJ0wRw6xvDd3Zai14Cxnlp/Uq7VdY9Iy96U8teZhfjw8xCbz1mOK/sWgU41fiHwtxnZXWvx+ta97LWetOn6HlG09TMlrtvNXh+z/SpUcXcDUTZwkx36GJwe/7bq8zx6cdYfaaSt6kUP5HKAy/Helo85v9ST+96LBI1LBLB2zCx9DaPLfNlqBCV7AYPKBmSmnkoTY8h8uD1TmE4v8zDqNvHh9LTNWAeUvp6pzCcn9JU0LNRmgoD41jGcUoUoTyuG6q+fkpfav9H1uXf0lOicLT2jwNie+eb9YjdUEcvi8nT+8I5vJz6tkNPqvxSFRb8WtM7Cam3J37tvvJQ4e7uDt3JMRrVkURvv1UC9GSH8Gb/dDzSR0yO/PK3T5+32QItjbtannmRlWa0oWCV68e8+OsoGvzGupZm2PRdOZvOq4qW22nS1eqHSN6eFW0PZPOyzWwqh+ZuHB8hVc0HbXAn7+4C2QI9lCJfOTv1spjTGAMXNLvryqdp9htrQI9/DGIfFEKYC8M9igaEf7BrOqk63/cAYB9x0ZMopsz+q+LqC26B5nk2W1S3R9/f0I+OSstFXtZEGQOPVUiktHs/m91/dW8+Rj5LLgQpBuNU2LDNjk/uH0fsD3dgucgLNzF7nZq8+tcjEtK8eFRFMrJJ8pXRUcKu+sbksFW/z/wO2qW33Qi7pknhpCxQVvl9cLJ0bFPtnlPzGA66AGPNh/jdnn0eTthLxrHyOH0MtIYEHyPsg/COXmtoCDzZfRtE+nHqo4YUGZGbbg2Ib2uLwMySWux+tHqg/utm41wOstJci6f/6xjGrSC8OfDv6gCq0hiqA59NkdnHcwtfsZZyoYpFc5nc3ltz7C6bTJ5vpPVurV88w+Ff9Rr7rJXGxzsAzgf+Ef7SKCyf1cLe06w6KwL1RevHc9ka5nu2qB+ufvfvzrv+Y23ElLV1Cm38WheENUXs89+bxSY61p4Hk9ybrdhsIc8HvOZYYSZ2638zndv0gbG+wmcnDFrQR73CQAZ751g9cv2zZ0wPz4R7U4+Y7FVtrNZlcCYLH/XU+lotrrbccv9C9fuNd9avuRN8zx9YftD5hX2xuuvn7b2m3OlTZcqJ0n9tqy0agnC/rPBryZaR2JludybWsaZJVYzA6mwZQFmUpEIaoCEwTAVsfga6WForoZ2iJ09TqgMshJMn8M6X5UO8nEzMYj/8vdlM3lX/UQGxSTvZWpjDSCapTmOYIp4GzOQlvXE4GA8VakR9LVsuuSlajb8kK3VeJHuBj1xnexZRIUtShQRp54hTgSIjiaE4NTHUIdtRNWMvGs4GPZzNl236KjibpU2RLBdP+2ELXSO28KplrcIG43a5lZAKRBRrmVjBmaIkYI4p5j2LrOmx0uZaTcz0lI1/QmBrYSbmvlBb68GH0Novw/lVQMsqZK6LnjUwCeSVQNS8hhbALJJKUQssAxigAaElu20cfQqZr4VZ87OLRtZjNsny/XC1XyPkV4Ery7JWXY41SGXc7lYmgRAR55Zh0ZQCTIJ2Oe6mkyDgyYr3uS5JkEq93S6kRuxealhoHRM6cXuDcDPdGe+mS3ttPW93A3mMKEQTTm8R/N/5ckDzgBGEiUkXh8UPEGje1zcGA9ADNgGxtYY6HDNDThUToiMyvPrctkUT/5qGMI1HsPSwvHlVva+VMrDDm7WbyUFjJmNoJFJxzZtH57Z8+HB3VwX5PQkA68/6mDtKk69eIgtrtNk27nx9yPeXBCNwNyJysJnXbwzezPKZafDVAt5AJuypBfQlxrTUytyukKbm2R9TNbNQnFa06dMfAC4/shPRuHGZNCmFngJpX8HEEeKUVUTRU2Lg+HpUP7qrMSiqu9g9SXjvppDe2oVh5DNAccXhT1lm0/nE3NovmDR1ov9b9UY9mtIS4H8d9dqDFe6y0ecqj9ssP/nCdKPdLx5HXTRHfvrws/32h3z+VF8YoGqw5V4DoS/oCT/kZVWG87VU+bLMytf7JD/tuPWXnV1X60TAol6n2/vikDHgmyzh3mbZatrS7T23LkfvDSCOJOuFLKmgt4wxTgQgTHDPfEWfLQrFifIZqK9Ee0+5463qe7AP4/Bjvhl3A6lRi2WVeGW1Ufdcs8T96zpjL+ztrj7K05sqMavLpYcSzMeRfVLNs/PbuF88VBedm1lUJxSgD27TGzWJFtm0fjs3RZoXVt9xn9f3/dC0SG73Mt57b11e9lioXYBlP3A/gD0VW7IpwireI2rTW1k6tULdIkUnV8af57I5L8aTWzN2vS8kNwX3QwD9WsrRqSmDfI7BqTbkptzsTO0YAYMR9T6H21+bTKBj22Xqv5Z13q740R8tbnQi4wv2Crrk0NdKfNbX/gn4uwnMRvhV34ZjZzWRRksao04MGogkYhQqII00WqtX69hhPccOYUMB42t809QshcXdiJLvy3Xs5GmaaXPr4FWu/h3SHQGO3ouT8ZQOaTnw+IrDTJoZQVp0QplBgft7OpJbWn9zRF/9/0eqiunwpj6+I1bBPxHpYc944p6KrkYb6YiTo2zrEdVbb0OcrNpeJkTz7qRaykUEgJYmYUAjKF+tOBn062N8qMd4vcgBGjAOgTcuUHD9wCMV8BCnyjX2bQWoEgQjDJiKBSEpj19vgEp2UxUkGHZ7Yh415jj8boR99MNMbl2hZ932ypE5vaUcEgghRm7+cvd0403mvjq09bRHNJ+5z7raXGcHnGceKJ8/r+0zA5ZdVH67oWuv8i0+uTwJkM3KhVrdnlsbN4MKrN7bzwsH0adyYaYrhjO+M1hnNE3kTrOcz/PC8aLhXTtvW5SYRZ2u4ndGds64chUOTvRfU+T2uKv2z+6XhRp7ujQvKodjWblP5+tiw3LtlWxWYdMSDJ438z5py/+5iSzGsji3s6bVjMPmTBVdaoq4/R/nVfmsJZ/9589ldeMwKo19dHf4J51PJs1ygoVblG9ZubR89r/Vg/08kmr+p0hdNtHGB1gtpoNn6qo0weIhX5arVZyu5zbOTYWt+n4/NPCbGeP64K6f6MbtQvDh8z9vag+2O21p4T0xFZ2L8V0ofI9X3VV/jFLvYY3T6LOtO6lz688P/ul9c9f/ePdpjaSZI0aSlX/Z/3635/q7+2Sx3nWPD1n1bNU5i+VsM+RGPeCkzFvLW4X0djzuNDZJUjnHvRvngw8Q5vvCjY7d+qMxa92JF+zetj/9+vl/HLKtZH//8z7r1Ivjjv7p7e1tTcQ1L16RaLVrHx9MYbYwyuMFJg4PzkERRt0aeLKlpydRqHDc6ad0P5r4j0nem6qJ3mqNF6m8oHY3avtB23owgkZQo9RqN4nCDAfsyC26+jxcx8E6rfyRB2D9ct+LymrfgjmrzN3nSdzFHN6vtOt6MIer6i+Lg0QkHYvVeU5wzBS1AhjCkOnuRKAe5qjHaQY8HtMGrK8NcrMy6cFte0usa4WbtAagfWkooETFriBM09ozp6mIeMKgMkwrHbLYUMhuviVEaOifh77ecGHGnA6bbo3wljk6zsc/fGWczewlay3npo5bb1wU3itlgrhxpnT6lnjK5A6YSnb6DZeV+o/kQc/72w69yW1nNQtS+yXtDuwkzkMcR8QwA2KSUEPjjdtuDdrd+66BlK8RjgdQ+DhFTN5WdiFm1n/87UO7q2M3i+sSbZHnYNjmApk9yNuw1aGeGKgR7unZRWnN7LzoTwp6m61OrEooK2aBNNIEdFRCSiPEBREgVjw1AWU07E2GgRQO4wqQegIL5JKHZG5so6ofVLEoVx1Vl4uqj2oXfDtaTRxbPcC0px5QMPQ7eM3Cptr+yuiB95tc+jpqnWVVQ4/jFMUaVGpB2jgcqIq4AURZBUFovHmnNyryiK3ejmmdHDh/6zZLbvkP//n5y2hH5dc8n2z0HJ8Lmdu9ZRehgUC8WwPZA0nfu4h5umlzuh3dPsPYdGfkT2qul/My8cmM/ZwKr4NHMVK1kEFYE4J0NzUURpTHlAojFYpBQAcV7+bWWMUDDaWfrz/RkWbYNqfdJ7/mevTRqvWeQIQJCbvJwVBEKgUwThjXsULhEGBlYxcBXAwR4BtizPf3UA6pfUA21RVRG1XUpgzFnSCIoCmNMNQ85cAAu/ED9vURrE9tX5RtSO3GZ/YiarP9qf1aWHfVsxAlMTW8w7oxN1GMEsNVEseCh+xZ2OvlDrmngYLw+HoPmLY2JOWI4bGvlZTVsGAkNUyIaJMScRpJSTQXVMM4TQN2ReK90CT3uZx8kyUDpLiSERV418qDZdWP+S4mGtjXut3sllMYxYylKiGcWfMwoAeI96I0wkNt6VG5eIDOJ00T97dIbbu3V/6+lIGUdZo0JpZNKxULbJUvKUjAtAPRVLiu97YcxuSkZ2+z/c23YaLoAVV310NtWtlTGMJYkg61ieXkDEhmgIo5QAH7Jg46DPioDaFHKh8QbBySe1yx27WSuzKeMDI0he38DmHtpQhKbThKJUZxQFZOBtq0Z297AoEHTIQeEhu9ZWKTVTJPYqiOOwo3gWmkLPeMlbCfURlQbhPep/ZQbvsccSHkNj2g4O16qE2roD6WVBnT9ovIGMSR4KlBSlMBoQ5HbcbIbmr7tLQDslGH1H7TXjBalZZhJSlJujWNVidPY20sq09lQgJaYJz1LTDP8HJfw2MewC/ibfPzdqjNVtQ2KVUdL5hj7JRgA2KOpRXpAXXy/t6Wnp6n0OcGO2Dew3GbWdcHqglmv+dfVyd9rhNrLcPVhElo5TbF1OVOgq4wIBEwmNLUEBaTkGo97rZSgaJxvnXCJB4vOUF7A8YTJjlD6vhT+Z9JF03sKmeC0MolgHWqEtFx9ymMI4CtlAExhloFVC1Eb3wjRB5h422BLfdnP2OCbiObpFyrABIrBGiiWYefWMRHJmEJTQGOYUpCzhvqCSDhGcDOfMplgKDbyL4p10rtKlKDYxkb3akCoTqJIE+VxAnmKmQViEBdF6CUnqm7HtnBAgRqxjRK8UzB7LQL6LURGHYOcPkud2qaTdwD/mYm34wj9IbEmE4jNvenlXezbt+wTy+CznzNuvNBe76m+1V3vubzqEt60xp1uY3a7aGWzQZqz7CsSXUh7Q4E7bkhAerNmRzb40D2JvNJfpyBlf07lrVRdOAwyeE+GGFRDxto7LcNDmygUe2M1mxZetOeLNuCK2yD9ZbTPvabzdsfLjsK1NSDaXRRoOZ93zqEB4JawP72YONQfQjwRqS4nGigjGqPkrE4q5DslgTKW+rmGbcHvszywtH0/Sxf6If6FnqdehJqREJ8wBcoxszVYnh6+EieAM7DSFaMum4bhoeKdKBSjSFhrzebhVXuVkIh6CYmSQniiMUaSEONYiSgc33tNn0OintCKb5slgCtBdm1ZrNI7IpL7jiIjQttt/MbLFUjpIHRhGnNRcCMwn6YRHjr7D2+1EZGv8iX+qYzClnlGiOIc4m7o3MRipIYGAJpIhMQ0JTtj/OypuywPd/RTNk3nLtkqV2ZskSoNO1mqlGjIxhLQAwmiLCAnnMs4W5q+5IOZQg2/aZzl9gqmyXlMBYdxwUiIEoJwYQDJZUKKJQpH1DbI5Q9nosQe9vb0+7tULtKZiFWPmPKupUAMEoFYoBiqVOUhIyB9jPV0NBP5R3OGaLTKn/TPmiGV4I75Rq2WblMUxgpzlKoTZpgBAPGtAgdQW7s2d2YByA3etPkZisDK+aAq37IgVFJklimIoYBtXLWdwmyJpjUCTp56s5D1AvwEe63KyY3rckNUt1JVpOU2n8AwRQRI0J2O+O0r5Z7rDDpM8JCUPtaXSOWVG4VrMBDIlWOUWvY2NNxEiXAvkxMIpEIKJchGPhGwEhOfUDLiF2DNMd2lIHC11Fm1QW3OdeyOZy0Ks2Xnqaaw84zL82bee5zHK3uKYncqIVMm15vFL69N8qrKTrfn4P0Y9SgUdHapl6YwdBjkhT4QZPCzhLBIQh2Yji3UMKxwZrqV59Nkdk1c27+PcKSmjGREMQIVVQD14V/9aV2TKcJ311ITEfgrrNQNkTdO6TDe9os6I8TChfS4W/YD3GWijkO+vG6poytjVJPDRUJoMyIA/wQr0KZOUucZ6CXAjyUKtTjLj6gd96QlAf4GF4FKc8S5xlYlKBZzh15TSEmpDS5tq9AGxjkc4RL2hgl8hsGdikivzeWUsqevjg+i6PPShpQhBf5Yr80unPCjZ8bb416dCF469sxB+ONi75B1FcvAuLt9aSrnR1vjVS/FLzhvlgEB6apcYJ7Z6JHS1MTPpddx+Oj10B5dupgwxKQmKGf53m8SaLMtDND6fK7CPtSlX9h7q+7n3yhav1ObjXN93DwwaYAo5nUzDyueRjG1bLLv+dpC2kXazbK5Ud8Lr/BjJ71JBTvfJ7m2Oqie47XCXPHrRlbwN7sTTVsKs2LqZp1xgr5bnH3TKAQ3suvDyaqrBClJ6aM8jRSkb3TaDUtNrKrG6XVFKi2J1Ns92Q+PmQL88XuC/f+sVDz7j7s5X6maYq09kmRhMWupjNMAkLPPQTXwws6Aw08O+OA7iljnJBihBPyRNm8fy6n899n79o5vZDeWuWrTupFbJXUW33/N6PclSntcTp248/6vbhMXzJI/kaemjkfj6Qhui/0By4e0kbpwsfAJjN4m1WDD2eJKWoeVLMdd9x5HYw9OjVWeYnULIniZZnNTFlG7mPLe6xEuYMuieDOOS6OPfcVEt82SifZ/LddKuYePo4O6MjQVQWlB3Kwb2Ic5OHYA2J6WUye3hcWGaaSsNtZeQWetZ0wmkBrjHkIZKmD7uQYArW5jVWEK24TgFCipyuvc5Lag5a4J959gAo1IJU8wEV8LQEBUSUA3wkKaZyYTrsOwVSkCUsYUJLBJKAXWfYbLxFPQNwXm+zX+bxAYbYG7SxZzVBtzVPdpt1N8lmSz26nT1Gt0N1mefVJW0tbeSs2amlXCyPpNLs7HRtBiKnyW00NIyt6Iy6kTCBBGqqmrUOf04RQMrpmGPKUoMDmrB1+37fwQ3d52NOqYaPssDVqb/8sDzDGPGbX6NyQnWbXs/9g1Q9+o/9gT8PwZXe4mm09e3q2Cx3+97u1zl6nL7HIhoK7+uMR3NT9HSOcw+8h0kwhaO2hdRPSTpbhcfrJyxGpaK+0/PI0Jvh6jH3jmwLDgo8jT8PSy9LJspacLbcL2rFulA/ViaOPvdOusfNGx2JJN1Duzsg4gSImLVEsDdYRRxQTFMc60ep4ohj2BlPsIYuPM2RPXo7n53UyEta3pT2DB45Uqi1HmNI/aLdtN0p6NuKtaxJ+UO9A6vW7JFAwDC8dWYR/+T+/2gOxSu5dfjVIi0qeu7NlOpAw/9XMTKEWLqP7fX0d1IEReqPSfFV9IiWOQZUa1oy+EwmNI2yMgpDHcUI3GdbdWCkOZCb0OioS6pHvzCPf0f4lDWPEOwQjMnx+cJmtbR+65aNnMBTCMBI1v62cDrflQ5+FvFHfHGNVk0wSgzSBnXpFA2HEiIk1xZwxHXBSlQC95Jc1UHaUn9Agkd8zdFw1U6204yM90LE3CjoOVqCDiqWd7PQYgwhLCIkEnCIcsM9v1Q22Azo0FEvM4706YDzaHqCzi9bWliwg8HfXXjFz/sinfFk4SOgimy8C6VJWvJjsm1Olmv7S9cUjp7lFVtpNJlHnis9w3a/n9BXBFVVwTZBJVKcbCxY0YirhIjYk4VpvV7PsWzRQukJ5VTDsymjiKST1BUkPKAoepXTBN930ga+aPiQk7TUephBFNIkBx4YjIwPOOVj/5HnOwRABTcZMxz0fLnAahkWZqcomkdV6s9RS0mGgz4ng9ujKK0vPORBiq84Dqd3CSUeGJoZFzHIjK0eVUuty9efUENA3cu4n9ip9g2dzdrm9sUw359kn07xlu9Rp7fskBG1K2z1GqIvjbkEA8nFT3/SnIKGu02uohYrjbDH9T3+fvdEwCeOr8skUCt5p6yERlvatAgJRpSQKOXAM9RJika+5oq9PZlNTeZGgq3n6fFk+5C6BeStbf6sKJqh4OeAAm6Q7Y1gmEWI8ZRAbSEOOh2e94C9shs51vHSeOTkHdAQbpTCiAzLtrlKwkwoMiFMVp51ugAlyVVJMxYKQlMfxsdNwkdfnGQB7suv/8Y1W9PmHDyguPr0kvc//MN/n31TR422dJXkrvI0zKtyD30lIUBKnKiJS6KaphEARFAllmiVCkpDz02WvogChYVgM+loR0KM6ewKjbpYnpsJcWuSzbNmXpmi/qNipuyE8Pj7e1jdeP/P/czupyBf2oiaJpibJqgIF92sHrPrLrgQq72BlAAwPfDbXIaEeVjAZBsAhREOskCC6vnPJ5fmi9fVfXcTok6Wt+9H/Bw==</diagram></mxfile> \ No newline at end of file
diff --git a/install-sh b/install-sh
index a9244eb0..377bb868 100755
--- a/install-sh
+++ b/install-sh
@@ -1,7 +1,7 @@
#!/bin/sh
# install - install a program, script, or datafile
-scriptversion=2011-01-19.21; # UTC
+scriptversion=2011-11-20.07; # UTC
# This originates from X11R5 (mit/util/scripts/install.sh), which was
# later released in X11R6 (xc/config/util/install.sh) with the
@@ -35,7 +35,7 @@ scriptversion=2011-01-19.21; # UTC
# FSF changes to this file are in the public domain.
#
# Calling this script install-sh is preferred over install.sh, to prevent
-# `make' implicit rules from creating a file called install from it
+# 'make' implicit rules from creating a file called install from it
# when there is no Makefile.
#
# This script is compatible with the BSD install script, but was written
@@ -156,7 +156,7 @@ while test $# -ne 0; do
-s) stripcmd=$stripprog;;
-t) dst_arg=$2
- # Protect names problematic for `test' and other utilities.
+ # Protect names problematic for 'test' and other utilities.
case $dst_arg in
-* | [=\(\)!]) dst_arg=./$dst_arg;;
esac
@@ -190,7 +190,7 @@ if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then
fi
shift # arg
dst_arg=$arg
- # Protect names problematic for `test' and other utilities.
+ # Protect names problematic for 'test' and other utilities.
case $dst_arg in
-* | [=\(\)!]) dst_arg=./$dst_arg;;
esac
@@ -202,7 +202,7 @@ if test $# -eq 0; then
echo "$0: no input file specified." >&2
exit 1
fi
- # It's OK to call `install-sh -d' without argument.
+ # It's OK to call 'install-sh -d' without argument.
# This can happen when creating conditional directories.
exit 0
fi
@@ -240,7 +240,7 @@ fi
for src
do
- # Protect names problematic for `test' and other utilities.
+ # Protect names problematic for 'test' and other utilities.
case $src in
-* | [=\(\)!]) src=./$src;;
esac
@@ -354,7 +354,7 @@ do
if test -z "$dir_arg" || {
# Check for POSIX incompatibilities with -m.
# HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
- # other-writeable bit of parent directory when it shouldn't.
+ # other-writable bit of parent directory when it shouldn't.
# FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
ls_ld_tmpdir=`ls -ld "$tmpdir"`
case $ls_ld_tmpdir in
diff --git a/installer/functions.sh b/installer/functions.sh
index 36d10ec7..c847ba1c 100644
--- a/installer/functions.sh
+++ b/installer/functions.sh
@@ -1,5 +1,9 @@
# no shebang necessary - this is a library to be sourced
+# make sure we have a UID
+[ -z "${UID}" ] && UID="$(id -u)"
+
+
# -----------------------------------------------------------------------------
# checking the availability of commands
@@ -81,7 +85,7 @@ setup_terminal() {
return 0
}
-setup_terminal
+setup_terminal || echo >/dev/null
progress() {
echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- "
@@ -120,13 +124,13 @@ systemctl_cmd="$(which_cmd systemctl)"
service() {
local cmd="${1}" action="${2}"
- if [ ! -z "${service_cmd}" ]
+ if [ ! -z "${systemctl_cmd}" ]
then
- run "${service_cmd}" "${cmd}" "${action}"
+ run "${systemctl_cmd}" "${action}" "${cmd}"
return $?
- elif [ ! -z "${systemctl_cmd}" ]
+ elif [ ! -z "${service_cmd}" ]
then
- run "${systemctl_cmd}" "${action}" "${cmd}"
+ run "${service_cmd}" "${cmd}" "${action}"
return $?
fi
return 1
@@ -142,9 +146,22 @@ run_failed() {
printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n"
}
+ESCAPED_PRINT_METHOD=
+printf "%q " test >/dev/null 2>&1
+[ $? -eq 0 ] && ESCAPED_PRINT_METHOD="printfq"
+escaped_print() {
+ if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]
+ then
+ printf "%q " "${@}"
+ else
+ printf "%s" "${*}"
+ fi
+ return 0
+}
+
run_logfile="/dev/null"
run() {
- local user="${USER}" dir="$(basename "${PWD}")" info info_console
+ local user="${USER--}" dir="${PWD}" info info_console
if [ "${UID}" = "0" ]
then
@@ -156,11 +173,11 @@ run() {
fi
printf >> "${run_logfile}" "${info}"
- printf >> "${run_logfile}" "%q " "${@}"
+ escaped_print >> "${run_logfile}" "${@}"
printf >> "${run_logfile}" " ... "
printf >&2 "${info_console}${TPUT_BOLD}${TPUT_YELLOW}"
- printf >&2 "%q " "${@}"
+ escaped_print >&2 "${@}"
printf >&2 "${TPUT_RESET}\n"
"${@}"
@@ -178,10 +195,53 @@ run() {
return ${ret}
}
+getent_cmd="$(which_cmd getent)"
+portable_check_user_exists() {
+ local username="${1}" found=
+
+ if [ ! -z "${getent_cmd}" ]
+ then
+ "${getent_cmd}" passwd "${username}" >/dev/null 2>&1
+ return $?
+ fi
+
+ found="$(cut -d ':' -f 1 </etc/passwd | grep "^${username}$")"
+ [ "${found}" = "${username}" ] && return 0
+ return 1
+}
+
+portable_check_group_exists() {
+ local groupname="${1}" found=
+
+ if [ ! -z "${getent_cmd}" ]
+ then
+ "${getent_cmd}" group "${groupname}" >/dev/null 2>&1
+ return $?
+ fi
+
+ found="$(cut -d ':' -f 1 </etc/group | grep "^${groupname}$")"
+ [ "${found}" = "${groupname}" ] && return 0
+ return 1
+}
+
+portable_check_user_in_group() {
+ local username="${1}" groupname="${2}" users=
+
+ if [ ! -z "${getent_cmd}" ]
+ then
+ users="$(getent group "${groupname}" | cut -d ':' -f 4)"
+ else
+ users="$(grep "^${groupname}:" </etc/group | cut -d ':' -f 4)"
+ fi
+
+ [[ ",${users}," =~ ,${username}, ]] && return 0
+ return 1
+}
+
portable_add_user() {
local username="${1}"
- getent passwd "${username}" > /dev/null 2>&1
+ portable_check_user_exists "${username}"
[ $? -eq 0 ] && echo >&2 "User '${username}' already exists." && return 0
echo >&2 "Adding ${username} user account ..."
@@ -214,7 +274,7 @@ portable_add_user() {
portable_add_group() {
local groupname="${1}"
- getent group "${groupname}" > /dev/null 2>&1
+ portable_check_group_exists "${groupname}"
[ $? -eq 0 ] && echo >&2 "Group '${groupname}' already exists." && return 0
echo >&2 "Adding ${groupname} user group ..."
@@ -244,12 +304,11 @@ portable_add_group() {
portable_add_user_to_group() {
local groupname="${1}" username="${2}"
- getent group "${groupname}" > /dev/null 2>&1
+ portable_check_group_exists "${groupname}"
[ $? -ne 0 ] && echo >&2 "Group '${groupname}' does not exist." && return 1
# find the user is already in the group
- local users=$(getent group "${groupname}" | cut -d ':' -f 4)
- if [[ ",${users}," =~ ,${username}, ]]
+ if portable_check_user_in_group "${username}" "${groupname}"
then
# username is already there
echo >&2 "User '${username}' is already in group '${groupname}'."
@@ -342,3 +401,258 @@ issystemd() {
# else, it is not systemd
return 1
}
+
+install_non_systemd_init() {
+ [ "${UID}" != 0 ] && return 1
+
+ local key="unknown"
+ if [ -f /etc/os-release ]
+ then
+ source /etc/os-release || return 1
+ key="${ID}-${VERSION_ID}"
+
+ elif [ -f /etc/redhat-release ]
+ then
+ key=$(</etc/redhat-release)
+ fi
+
+ if [ -d /etc/init.d -a ! -f /etc/init.d/netdata ]
+ then
+ if [ "${key}" = "gentoo" ]
+ then
+ echo >&2 "Installing OpenRC init file..."
+ run cp system/netdata-openrc /etc/init.d/netdata && \
+ run chmod 755 /etc/init.d/netdata && \
+ run rc-update add netdata default && \
+ return 0
+
+ elif [ "${key}" = "debian-7" \
+ -o "${key}" = "ubuntu-12.04" \
+ -o "${key}" = "ubuntu-14.04" \
+ ]
+ then
+ echo >&2 "Installing LSB init file..."
+ run cp system/netdata-lsb /etc/init.d/netdata && \
+ run chmod 755 /etc/init.d/netdata && \
+ run update-rc.d netdata defaults && \
+ run update-rc.d netdata enable && \
+ return 0
+ elif [[ "${key}" =~ ^(amzn-201[567]|CentOS release 6|Red Hat Enterprise Linux Server release 6).* ]]
+ then
+ echo >&2 "Installing init.d file..."
+ run cp system/netdata-init-d /etc/init.d/netdata && \
+ run chmod 755 /etc/init.d/netdata && \
+ run chkconfig netdata on && \
+ return 0
+ else
+ echo >&2 "I don't know what init file to install on system '${key}'. Open a github issue to help us fix it."
+ return 1
+ fi
+ elif [ -f /etc/init.d/netdata ]
+ then
+ echo >&2 "file '/etc/init.d/netdata' already exists."
+ return 0
+ else
+ echo >&2 "I don't know what init file to install on system '${key}'. Open a github issue to help us fix it."
+ fi
+
+ return 1
+}
+
+NETDATA_START_CMD="netdata"
+NETDATA_STOP_CMD="killall netdata"
+
+install_netdata_service() {
+ if [ "${UID}" -eq 0 ]
+ then
+ if issystemd
+ then
+ # systemd is running on this system
+ NETDATA_START_CMD="systemctl start netdata"
+ NETDATA_STOP_CMD="systemctl stop netdata"
+
+ if [ ! -f /etc/systemd/system/netdata.service ]
+ then
+ echo >&2 "Installing systemd service..."
+ run cp system/netdata.service /etc/systemd/system/netdata.service && \
+ run systemctl daemon-reload && \
+ run systemctl enable netdata && \
+ return 0
+ else
+ echo >&2 "file '/etc/systemd/system/netdata.service' already exists."
+ return 0
+ fi
+ else
+ install_non_systemd_init
+ local ret=$?
+
+ if [ ${ret} -eq 0 ]
+ then
+ NETDATA_START_CMD="service netdata start"
+ NETDATA_STOP_CMD="service netdata stop"
+ fi
+
+ return ${ret}
+ fi
+ fi
+
+ return 1
+}
+
+
+# -----------------------------------------------------------------------------
+# stop netdata
+
+pidisnetdata() {
+ if [ -d /proc/self ]
+ then
+ [ -z "$1" -o ! -f "/proc/$1/stat" ] && return 1
+ [ "$(cat "/proc/$1/stat" | cut -d '(' -f 2 | cut -d ')' -f 1)" = "netdata" ] && return 0
+ return 1
+ fi
+ return 0
+}
+
+stop_netdata_on_pid() {
+ local pid="${1}" ret=0 count=0
+
+ pidisnetdata ${pid} || return 0
+
+ printf >&2 "Stopping netdata on pid ${pid} ..."
+ while [ ! -z "$pid" -a ${ret} -eq 0 ]
+ do
+ if [ ${count} -gt 45 ]
+ then
+ echo >&2 "Cannot stop the running netdata on pid ${pid}."
+ return 1
+ fi
+
+ count=$(( count + 1 ))
+
+ run kill ${pid} 2>/dev/null
+ ret=$?
+
+ test ${ret} -eq 0 && printf >&2 "." && sleep 2
+ done
+
+ echo >&2
+ if [ ${ret} -eq 0 ]
+ then
+ echo >&2 "SORRY! CANNOT STOP netdata ON PID ${pid} !"
+ return 1
+ fi
+
+ echo >&2 "netdata on pid ${pid} stopped."
+ return 0
+}
+
+stop_all_netdata() {
+ local p myns ns
+
+ myns="$(readlink /proc/self/ns/pid 2>/dev/null)"
+
+ # echo >&2 "Stopping a (possibly) running netdata (namespace '${myns}')..."
+
+ for p in \
+ $(cat /var/run/netdata.pid 2>/dev/null) \
+ $(cat /var/run/netdata/netdata.pid 2>/dev/null) \
+ $(pidof netdata 2>/dev/null)
+ do
+ ns="$(readlink /proc/${p}/ns/pid 2>/dev/null)"
+
+ if [ -z "${myns}" -o -z "${ns}" -o "${myns}" = "${ns}" ]
+ then
+ stop_netdata_on_pid ${p}
+ fi
+ done
+}
+
+# -----------------------------------------------------------------------------
+# restart netdata
+
+restart_netdata() {
+ local netdata="${1}"
+ shift
+
+ local started=0
+
+ progress "Start netdata"
+
+ if [ "${UID}" -eq 0 ]
+ then
+ service netdata stop
+ stop_all_netdata
+ service netdata restart && started=1
+
+ if [ ${started} -eq 0 ]
+ then
+ service netdata start && started=1
+ fi
+ fi
+
+ if [ ${started} -eq 0 ]
+ then
+ # still not started...
+
+ run stop_all_netdata
+ run "${netdata}" "${@}"
+ return $?
+ fi
+
+ return 0
+}
+
+# -----------------------------------------------------------------------------
+# install netdata logrotate
+
+install_netdata_logrotate() {
+ if [ ${UID} -eq 0 ]
+ then
+ if [ -d /etc/logrotate.d ]
+ then
+ if [ ! -f /etc/logrotate.d/netdata ]
+ then
+ run cp system/netdata.logrotate /etc/logrotate.d/netdata
+ fi
+
+ if [ -f /etc/logrotate.d/netdata ]
+ then
+ run chmod 644 /etc/logrotate.d/netdata
+ fi
+
+ return 0
+ fi
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# add netdata user and group
+
+NETDATA_ADDED_TO_DOCKER=0
+NETDATA_ADDED_TO_NGINX=0
+NETDATA_ADDED_TO_VARNISH=0
+NETDATA_ADDED_TO_HAPROXY=0
+NETDATA_ADDED_TO_ADM=0
+NETDATA_ADDED_TO_NSD=0
+NETDATA_ADDED_TO_PROXY=0
+NETDATA_ADDED_TO_SQUID=0
+add_netdata_user_and_group() {
+ if [ ${UID} -eq 0 ]
+ then
+ portable_add_group netdata || return 1
+ portable_add_user netdata || return 1
+ portable_add_user_to_group docker netdata && NETDATA_ADDED_TO_DOCKER=1
+ portable_add_user_to_group nginx netdata && NETDATA_ADDED_TO_NGINX=1
+ portable_add_user_to_group varnish netdata && NETDATA_ADDED_TO_VARNISH=1
+ portable_add_user_to_group haproxy netdata && NETDATA_ADDED_TO_HAPROXY=1
+ portable_add_user_to_group adm netdata && NETDATA_ADDED_TO_ADM=1
+ portable_add_user_to_group nsd netdata && NETDATA_ADDED_TO_NSD=1
+ portable_add_user_to_group proxy netdata && NETDATA_ADDED_TO_PROXY=1
+ portable_add_user_to_group squid netdata && NETDATA_ADDED_TO_SQUID=1
+ return 0
+ fi
+
+ return 1
+}
diff --git a/kickstart-static64.sh b/kickstart-static64.sh
new file mode 100755
index 00000000..98c224f4
--- /dev/null
+++ b/kickstart-static64.sh
@@ -0,0 +1,232 @@
+#!/usr/bin/env sh
+
+umask 022
+
+# ---------------------------------------------------------------------------------------------------------------------
+# library functions copied from installer/functions.sh
+
+which_cmd() {
+ which "${1}" 2>/dev/null || \
+ command -v "${1}" 2>/dev/null
+}
+
+check_cmd() {
+ which_cmd "${1}" >/dev/null 2>&1 && return 0
+ return 1
+}
+
+setup_terminal() {
+ TPUT_RESET=""
+ TPUT_BLACK=""
+ TPUT_RED=""
+ TPUT_GREEN=""
+ TPUT_YELLOW=""
+ TPUT_BLUE=""
+ TPUT_PURPLE=""
+ TPUT_CYAN=""
+ TPUT_WHITE=""
+ TPUT_BGBLACK=""
+ TPUT_BGRED=""
+ TPUT_BGGREEN=""
+ TPUT_BGYELLOW=""
+ TPUT_BGBLUE=""
+ TPUT_BGPURPLE=""
+ TPUT_BGCYAN=""
+ TPUT_BGWHITE=""
+ TPUT_BOLD=""
+ TPUT_DIM=""
+ TPUT_UNDERLINED=""
+ TPUT_BLINK=""
+ TPUT_INVERTED=""
+ TPUT_STANDOUT=""
+ TPUT_BELL=""
+ TPUT_CLEAR=""
+
+ # Is stderr on the terminal? If not, then fail
+ test -t 2 || return 1
+
+ if check_cmd tput
+ then
+ if [ $(( $(tput colors 2>/dev/null) )) -ge 8 ]
+ then
+ # Enable colors
+ TPUT_RESET="$(tput sgr 0)"
+ TPUT_BLACK="$(tput setaf 0)"
+ TPUT_RED="$(tput setaf 1)"
+ TPUT_GREEN="$(tput setaf 2)"
+ TPUT_YELLOW="$(tput setaf 3)"
+ TPUT_BLUE="$(tput setaf 4)"
+ TPUT_PURPLE="$(tput setaf 5)"
+ TPUT_CYAN="$(tput setaf 6)"
+ TPUT_WHITE="$(tput setaf 7)"
+ TPUT_BGBLACK="$(tput setab 0)"
+ TPUT_BGRED="$(tput setab 1)"
+ TPUT_BGGREEN="$(tput setab 2)"
+ TPUT_BGYELLOW="$(tput setab 3)"
+ TPUT_BGBLUE="$(tput setab 4)"
+ TPUT_BGPURPLE="$(tput setab 5)"
+ TPUT_BGCYAN="$(tput setab 6)"
+ TPUT_BGWHITE="$(tput setab 7)"
+ TPUT_BOLD="$(tput bold)"
+ TPUT_DIM="$(tput dim)"
+ TPUT_UNDERLINED="$(tput smul)"
+ TPUT_BLINK="$(tput blink)"
+ TPUT_INVERTED="$(tput rev)"
+ TPUT_STANDOUT="$(tput smso)"
+ TPUT_BELL="$(tput bel)"
+ TPUT_CLEAR="$(tput clear)"
+ fi
+ fi
+
+ return 0
+}
+setup_terminal || echo >/dev/null
+
+progress() {
+ echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- "
+}
+
+run_ok() {
+ printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} ${*} \n\n"
+}
+
+run_failed() {
+ printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n"
+}
+
+ESCAPED_PRINT_METHOD=
+printf "%q " test >/dev/null 2>&1
+[ $? -eq 0 ] && ESCAPED_PRINT_METHOD="printfq"
+escaped_print() {
+ if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]
+ then
+ printf "%q " "${@}"
+ else
+ printf "%s" "${*}"
+ fi
+ return 0
+}
+
+run_logfile="/dev/null"
+run() {
+ local user="${USER--}" dir="${PWD}" info info_console
+
+ if [ "${UID}" = "0" ]
+ then
+ info="[root ${dir}]# "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# "
+ else
+ info="[${user} ${dir}]$ "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ "
+ fi
+
+ printf >> "${run_logfile}" "${info}"
+ escaped_print >> "${run_logfile}" "${@}"
+ printf >> "${run_logfile}" " ... "
+
+ printf >&2 "${info_console}${TPUT_BOLD}${TPUT_YELLOW}"
+ escaped_print >&2 "${@}"
+ printf >&2 "${TPUT_RESET}\n"
+
+ "${@}"
+
+ local ret=$?
+ if [ ${ret} -ne 0 ]
+ then
+ run_failed
+ printf >> "${run_logfile}" "FAILED with exit code ${ret}\n"
+ else
+ run_ok
+ printf >> "${run_logfile}" "OK\n"
+ fi
+
+ return ${ret}
+}
+
+
+# ---------------------------------------------------------------------------------------------------------------------
+
+fatal() {
+ printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n"
+ exit 1
+}
+
+# ---------------------------------------------------------------------------------------------------------------------
+
+if [ "$(uname -m)" != "x86_64" ]
+ then
+ fatal "Static binary versions of netdata are available only for 64bit Intel/AMD CPUs (x86_64), but yours is: $(uname -m)."
+fi
+
+if [ "$(uname -s)" != "Linux" ]
+ then
+ fatal "Static binary versions of netdata are available only for Linux, but this system is $(uname -s)"
+fi
+
+curl="$(which_cmd curl)"
+wget="$(which_cmd wget)"
+
+# ---------------------------------------------------------------------------------------------------------------------
+
+progress "Checking the latest version of static build..."
+
+BASE='https://raw.githubusercontent.com/firehol/binary-packages/master'
+
+LATEST=
+if [ ! -z "${curl}" -a -x "${curl}" ]
+then
+ LATEST="$(run ${curl} "${BASE}/netdata-latest.gz.run")"
+elif [ ! -z "${wget}" -a -x "${wget}" ]
+then
+ LATEST="$(run ${wget} -O - "${BASE}/netdata-latest.gz.run")"
+else
+ fatal "curl or wget are needed for this script to work."
+fi
+
+if [ -z "${LATEST}" ]
+ then
+ fatal "Cannot find the latest static binary version of netdata."
+fi
+
+# ---------------------------------------------------------------------------------------------------------------------
+
+progress "Downloading static netdata binary: ${LATEST}"
+
+ret=1
+if [ ! -z "${curl}" -a -x "${curl}" ]
+then
+ run ${curl} "${BASE}/${LATEST}" >"/tmp/${LATEST}"
+ ret=$?
+elif [ ! -z "${wget}" -a -x "${wget}" ]
+then
+ run ${wget} -O "/tmp/${LATEST}" "${BASE}/${LATEST}"
+ ret=$?
+else
+ fatal "curl or wget are needed for this script to work."
+fi
+
+if [ ${ret} -ne 0 -o ! -s "/tmp/${LATEST}" ]
+ then
+ fatal "Failed to download the latest static binary version of netdata."
+fi
+
+# ---------------------------------------------------------------------------------------------------------------------
+
+opts=
+if [ "${1}" = "--dont-wait" -o "${1}" = "--non-interactive" ]
+then
+ opts="--accept"
+fi
+
+progress "Installing netdata"
+
+sudo=
+[ "${UID}" != "0" ] && sudo="sudo"
+run ${sudo} sh "/tmp/${LATEST}" ${opts}
+
+if [ $? -eq 0 ]
+ then
+ rm "/tmp/${LATEST}"
+else
+ echo >&2 "NOTE: did not remove: /tmp/${LATEST}"
+fi
diff --git a/kickstart.sh b/kickstart.sh
new file mode 100755
index 00000000..cabe0146
--- /dev/null
+++ b/kickstart.sh
@@ -0,0 +1,374 @@
+#!/usr/bin/env sh
+#
+# Run me with:
+#
+# bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+#
+# or (to install all netdata dependencies):
+#
+# bash <(curl -Ss https://my-netdata.io/kickstart.sh) all
+#
+# Other options:
+# --src-dir PATH keep netdata.git at PATH/netdata.git
+# --dont-wait do not prompt for user input
+# --non-interactive do not prompt for user input
+# --no-updates do not install script for daily updates
+#
+# This script will:
+#
+# 1. install all netdata compilation dependencies
+# using the package manager of the system
+#
+# 2. download netdata source code in /usr/src/netdata.git
+#
+# 3. install netdata
+
+umask 022
+
+[ -z "${UID}" ] && UID="$(id -u)"
+
+# ---------------------------------------------------------------------------------------------------------------------
+# library functions copied from installer/functions.sh
+
+which_cmd() {
+ which "${1}" 2>/dev/null || \
+ command -v "${1}" 2>/dev/null
+}
+
+check_cmd() {
+ which_cmd "${1}" >/dev/null 2>&1 && return 0
+ return 1
+}
+
+setup_terminal() {
+ TPUT_RESET=""
+ TPUT_BLACK=""
+ TPUT_RED=""
+ TPUT_GREEN=""
+ TPUT_YELLOW=""
+ TPUT_BLUE=""
+ TPUT_PURPLE=""
+ TPUT_CYAN=""
+ TPUT_WHITE=""
+ TPUT_BGBLACK=""
+ TPUT_BGRED=""
+ TPUT_BGGREEN=""
+ TPUT_BGYELLOW=""
+ TPUT_BGBLUE=""
+ TPUT_BGPURPLE=""
+ TPUT_BGCYAN=""
+ TPUT_BGWHITE=""
+ TPUT_BOLD=""
+ TPUT_DIM=""
+ TPUT_UNDERLINED=""
+ TPUT_BLINK=""
+ TPUT_INVERTED=""
+ TPUT_STANDOUT=""
+ TPUT_BELL=""
+ TPUT_CLEAR=""
+
+ # Is stderr on the terminal? If not, then fail
+ test -t 2 || return 1
+
+ if check_cmd tput
+ then
+ if [ $(( $(tput colors 2>/dev/null) )) -ge 8 ]
+ then
+ # Enable colors
+ TPUT_RESET="$(tput sgr 0)"
+ TPUT_BLACK="$(tput setaf 0)"
+ TPUT_RED="$(tput setaf 1)"
+ TPUT_GREEN="$(tput setaf 2)"
+ TPUT_YELLOW="$(tput setaf 3)"
+ TPUT_BLUE="$(tput setaf 4)"
+ TPUT_PURPLE="$(tput setaf 5)"
+ TPUT_CYAN="$(tput setaf 6)"
+ TPUT_WHITE="$(tput setaf 7)"
+ TPUT_BGBLACK="$(tput setab 0)"
+ TPUT_BGRED="$(tput setab 1)"
+ TPUT_BGGREEN="$(tput setab 2)"
+ TPUT_BGYELLOW="$(tput setab 3)"
+ TPUT_BGBLUE="$(tput setab 4)"
+ TPUT_BGPURPLE="$(tput setab 5)"
+ TPUT_BGCYAN="$(tput setab 6)"
+ TPUT_BGWHITE="$(tput setab 7)"
+ TPUT_BOLD="$(tput bold)"
+ TPUT_DIM="$(tput dim)"
+ TPUT_UNDERLINED="$(tput smul)"
+ TPUT_BLINK="$(tput blink)"
+ TPUT_INVERTED="$(tput rev)"
+ TPUT_STANDOUT="$(tput smso)"
+ TPUT_BELL="$(tput bel)"
+ TPUT_CLEAR="$(tput clear)"
+ fi
+ fi
+
+ return 0
+}
+setup_terminal || echo >/dev/null
+
+progress() {
+ echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- "
+}
+
+run_ok() {
+ printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} ${*} \n\n"
+}
+
+run_failed() {
+ printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n"
+}
+
+ESCAPED_PRINT_METHOD=
+printf "%q " test >/dev/null 2>&1
+[ $? -eq 0 ] && ESCAPED_PRINT_METHOD="printfq"
+escaped_print() {
+ if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]
+ then
+ printf "%q " "${@}"
+ else
+ printf "%s" "${*}"
+ fi
+ return 0
+}
+
+run_logfile="/dev/null"
+run() {
+ local user="${USER--}" dir="${PWD}" info info_console
+
+ if [ "${UID}" = "0" ]
+ then
+ info="[root ${dir}]# "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# "
+ else
+ info="[${user} ${dir}]$ "
+ info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ "
+ fi
+
+ printf >> "${run_logfile}" "${info}"
+ escaped_print >> "${run_logfile}" "${@}"
+ printf >> "${run_logfile}" " ... "
+
+ printf >&2 "${info_console}${TPUT_BOLD}${TPUT_YELLOW}"
+ escaped_print >&2 "${@}"
+ printf >&2 "${TPUT_RESET}\n"
+
+ "${@}"
+
+ local ret=$?
+ if [ ${ret} -ne 0 ]
+ then
+ run_failed
+ printf >> "${run_logfile}" "FAILED with exit code ${ret}\n"
+ else
+ run_ok
+ printf >> "${run_logfile}" "OK\n"
+ fi
+
+ return ${ret}
+}
+
+
+# ---------------------------------------------------------------------------------------------------------------------
+# collect system information
+
+fatal() {
+ printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n"
+ exit 1
+}
+
+export PATH="${PATH}:/usr/local/bin:/usr/local/sbin"
+
+curl="$(which_cmd curl)"
+wget="$(which_cmd wget)"
+bash="$(which_cmd bash)"
+
+if [ -z "${BASH_VERSION}" ]
+then
+ # we don't run under bash
+ if [ ! -z "${bash}" -a -x "${bash}" ]
+ then
+ BASH_MAJOR_VERSION=$(${bash} -c 'echo "${BASH_VERSINFO[0]}"')
+ fi
+else
+ # we run under bash
+ BASH_MAJOR_VERSION="${BASH_VERSINFO[0]}"
+fi
+
+HAS_BASH4=1
+if [ -z "${BASH_MAJOR_VERSION}" ]
+then
+ echo >&2 "No BASH is available on this system"
+ HAS_BASH4=0
+elif [ $((BASH_MAJOR_VERSION)) -lt 4 ]
+then
+ echo >&2 "No BASH v4+ is available on this system (installed bash is v${BASH_MAJOR_VERSION}"
+ HAS_BASH4=0
+fi
+
+SYSTEM="$(uname -s)"
+OS="$(uname -o)"
+MACHINE="$(uname -m)"
+
+cat <<EOF
+System : ${SYSTEM}
+Operating System : ${OS}
+Machine : ${MACHINE}
+BASH major version: ${BASH_MAJOR_VERSION}
+EOF
+
+sudo=""
+[ "${UID}" -ne "0" ] && sudo="sudo"
+
+
+# ---------------------------------------------------------------------------------------------------------------------
+# install required system packages
+
+INTERACTIVE=1
+PACKAGES_INSTALLER_OPTIONS="netdata"
+NETDATA_INSTALLER_OPTIONS=""
+NETDATA_UPDATES="-u"
+SOURCE_DST="/usr/src"
+while [ ! -z "${1}" ]
+do
+ if [ "${1}" = "all" ]
+ then
+ PACKAGES_INSTALLER_OPTIONS="netdata-all"
+ shift 1
+ elif [ "${1}" = "--dont-wait" -o "${1}" = "--non-interactive" ]
+ then
+ INTERACTIVE=0
+ shift 1
+ elif [ "${1}" = "--src-dir" ]
+ then
+ SOURCE_DST="${2}"
+ # echo >&2 "netdata source will be installed at ${SOURCE_DST}/netdata.git"
+ shift 2
+ elif [ "${1}" = "--no-updates" ]
+ then
+ # echo >&2 "netdata will not auto-update"
+ NETDATA_UPDATES=
+ shift 1
+ else
+ break
+ fi
+done
+
+if [ "${INTERACTIVE}" = "0" ]
+then
+ PACKAGES_INSTALLER_OPTIONS="--dont-wait --non-interactive ${PACKAGES_INSTALLER_OPTIONS}"
+ NETDATA_INSTALLER_OPTIONS="--dont-wait"
+fi
+
+# echo "PACKAGES_INSTALLER_OPTIONS=${PACKAGES_INSTALLER_OPTIONS}"
+# echo "NETDATA_INSTALLER_OPTIONS=${NETDATA_INSTALLER_OPTIONS} ${*}"
+
+if [ "${OS}" = "GNU/Linux" -o "${SYSTEM}" = "Linux" ]
+then
+ if [ "${HAS_BASH4}" = "1" ]
+ then
+ tmp="$(mktemp /tmp/netdata-kickstart-XXXXXX)"
+ url="https://raw.githubusercontent.com/firehol/netdata-demo-site/master/install-required-packages.sh"
+
+ progress "Downloading script to detect required packages..."
+ if [ ! -z "${curl}" ]
+ then
+ run ${curl} "${url}" >"${tmp}" || fatal "Cannot download ${url}"
+ elif [ ! -z "${wget}" ]
+ then
+ run "${wget}" -O - "${url}" >"${tmp}" || fatal "Cannot download ${url}"
+ else
+ rm "${tmp}"
+ fatal "I need curl or wget to proceed, but neither is available on this system."
+ fi
+
+ ask=0
+ if [ -s "${tmp}" ]
+ then
+ progress "Running downloaded script to detect required packages..."
+ run ${sudo} "${bash}" "${tmp}" ${PACKAGES_INSTALLER_OPTIONS} || ask=1
+ rm "${tmp}"
+ else
+ rm "${tmp}"
+ fatal "Downloaded script is empty..."
+ fi
+
+ if [ "${ask}" = "1" ]
+ then
+ echo >&2 "It failed to install all the required packages, but I can try to install netdata."
+ read -p "Press ENTER to continue to netdata installation > "
+ progress "OK, let's give it a try..."
+ fi
+ else
+ echo >&2 "WARNING"
+ echo >&2 "Cannot detect the packages to be installed in this system, without BASH v4+."
+ echo >&2 "We can only attempt to install netdata..."
+ echo >&2
+ fi
+else
+ echo >&2 "WARNING"
+ echo >&2 "Cannot detect the packages to be installed on a ${SYSTEM} - ${OS} system."
+ echo >&2 "We can only attempt to install netdata..."
+ echo >&2
+fi
+
+
+# ---------------------------------------------------------------------------------------------------------------------
+# download netdata source
+
+# this has to checked after we have installed the required packages
+git="$(which_cmd git)"
+
+NETDATA_SOURCE_DIR=
+if [ ! -z "${git}" -a -x "${git}" ]
+then
+ [ ! -d "${SOURCE_DST}" ] && run ${sudo} mkdir -p "${SOURCE_DST}"
+
+ if [ ! -d "${SOURCE_DST}/netdata.git" ]
+ then
+ progress "Downloading netdata source code..."
+ run ${sudo} ${git} clone https://github.com/firehol/netdata.git "${SOURCE_DST}/netdata.git" || fatal "Cannot download netdata source"
+ cd "${SOURCE_DST}/netdata.git" || fatal "Cannot cd to netdata source tree"
+ else
+ progress "Updating netdata source code..."
+ cd "${SOURCE_DST}/netdata.git" || fatal "Cannot cd to netdata source tree"
+ run ${sudo} ${git} fetch --all || fatal "Cannot fetch netdata source updates"
+ run ${sudo} ${git} reset --hard origin/master || fatal "Cannot update netdata source tree"
+ fi
+ NETDATA_SOURCE_DIR="${SOURCE_DST}/netdata.git"
+else
+ fatal "Cannot find the command 'git' to download the netdata source code."
+fi
+
+
+# ---------------------------------------------------------------------------------------------------------------------
+# install netdata from source
+
+if [ ! -z "${NETDATA_SOURCE_DIR}" -a -d "${NETDATA_SOURCE_DIR}" ]
+then
+ cd "${NETDATA_SOURCE_DIR}" || fatal "Cannot cd to netdata source tree"
+
+ install=0
+ if [ -x netdata-updater.sh ]
+ then
+ # attempt to run the updater, to respect any compilation settings already in place
+ progress "Re-installing netdata..."
+ run ${sudo} ./netdata-updater.sh -f || install=1
+ else
+ install=1
+ fi
+
+ if [ "${install}" = "1" ]
+ then
+ if [ -x netdata-installer.sh ]
+ then
+ progress "Installing netdata..."
+ run ${sudo} ./netdata-installer.sh ${NETDATA_UPDATES} ${NETDATA_INSTALLER_OPTIONS} "${@}" || \
+ fatal "netdata-installer.sh exited with error"
+ else
+ fatal "Cannot install netdata from source (the source directory does not include netdata-installer.sh)."
+ fi
+ fi
+else
+ fatal "Cannot install netdata from source, on this system (cannot download the source code)."
+fi
diff --git a/m4/ax_c___atomic.m4 b/m4/ax_c___atomic.m4
index 131929ae..dd5ee3d1 100644
--- a/m4/ax_c___atomic.m4
+++ b/m4/ax_c___atomic.m4
@@ -10,13 +10,19 @@ AC_DEFUN([AC_C___ATOMIC],
main (int argc, char **argv)
{
volatile unsigned long ul1 = 1, ul2 = 0, ul3 = 2;
+ __atomic_load_n(&ul1, __ATOMIC_SEQ_CST);
__atomic_compare_exchange(&ul1, &ul2, &ul3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
__atomic_fetch_add(&ul1, 1, __ATOMIC_SEQ_CST);
__atomic_fetch_sub(&ul3, 1, __ATOMIC_SEQ_CST);
+ __atomic_or_fetch(&ul1, ul2, __ATOMIC_SEQ_CST);
+ __atomic_and_fetch(&ul1, ul2, __ATOMIC_SEQ_CST);
volatile unsigned long long ull1 = 1, ull2 = 0, ull3 = 2;
+ __atomic_load_n(&ull1, __ATOMIC_SEQ_CST);
__atomic_compare_exchange(&ull1, &ull2, &ull3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
__atomic_fetch_add(&ull1, 1, __ATOMIC_SEQ_CST);
__atomic_fetch_sub(&ull3, 1, __ATOMIC_SEQ_CST);
+ __atomic_or_fetch(&ull1, ull2, __ATOMIC_SEQ_CST);
+ __atomic_and_fetch(&ull1, ull2, __ATOMIC_SEQ_CST);
return 0;
}
]])],
diff --git a/makeself/build-x86_64-static.sh b/makeself/build-x86_64-static.sh
new file mode 100755
index 00000000..0516beae
--- /dev/null
+++ b/makeself/build-x86_64-static.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env sh
+
+set -e
+
+DOCKER_CONTAINER_NAME="netdata-package-x86_64-static"
+
+if ! sudo docker inspect "${DOCKER_CONTAINER_NAME}" >/dev/null 2>&1
+then
+ # To run interactively:
+ # sudo docker run -it netdata-package-x86_64-static /bin/sh
+ # (add -v host-dir:guest-dir:rw arguments to mount volumes)
+ #
+ # To remove images in order to re-create:
+ # sudo docker rm -v $(sudo docker ps -a -q -f status=exited)
+ # sudo docker rmi netdata-package-x86_64-static
+ #
+ # This command maps the current directory to
+ # /usr/src/netdata.git
+ # inside the container and runs the script setup-x86_64-static.sh
+ # (also inside the container)
+ #
+ sudo docker run -v $(pwd):/usr/src/netdata.git:rw alpine:3.5 \
+ /bin/sh /usr/src/netdata.git/makeself/setup-x86_64-static.sh
+
+ # save the changes made permanently
+ id=$(sudo docker ps -l -q)
+ sudo docker commit ${id} "${DOCKER_CONTAINER_NAME}"
+fi
+
+# Run the build script inside the container
+sudo docker run -a stdin -a stdout -a stderr -i -t -v \
+ $(pwd):/usr/src/netdata.git:rw \
+ "${DOCKER_CONTAINER_NAME}" \
+ /bin/sh /usr/src/netdata.git/makeself/build.sh
+
+if [ "${USER}" ]
+ then
+ sudo chown -R "${USER}" .
+fi
diff --git a/makeself/build.sh b/makeself/build.sh
new file mode 100755
index 00000000..7896425d
--- /dev/null
+++ b/makeself/build.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env sh
+
+# First run setup-x86_64-static.sh under alpine linux to install
+# the required packages. build-x86_64-static.sh will do this for you
+# using docker.
+
+cd $(dirname "$0") || exit 1
+
+# if we don't run inside the netdata repo
+# download it and run from it
+if [ ! -f ../netdata-installer.sh ]
+then
+ git clone https://github.com/firehol/netdata.git netdata.git || exit 1
+ cd netdata.git/makeself || exit 1
+ ./build.sh "$@"
+ exit $?
+fi
+
+cat >&2 <<EOF
+
+This program will create a self-extracting shell package containing
+a statically linked netdata, able to run on any 64bit Linux system,
+without any dependencies from the target system.
+
+It can be used to have netdata running in no-time, or in cases the
+target Linux system cannot compile netdata.
+
+EOF
+
+# read -p "Press ENTER to continue > "
+
+if [ ! -d tmp ]
+ then
+ mkdir tmp || exit 1
+fi
+
+./run-all-jobs.sh "$@"
+exit $?
diff --git a/makeself/functions.sh b/makeself/functions.sh
new file mode 100755
index 00000000..48835f0f
--- /dev/null
+++ b/makeself/functions.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+
+# -----------------------------------------------------------------------------
+
+# allow running the jobs by hand
+[ -z "${NETDATA_INSTALL_PATH}" ] && export NETDATA_INSTALL_PATH="${1-/opt/netdata}"
+[ -z "${NETDATA_MAKESELF_PATH}" ] && export NETDATA_MAKESELF_PATH="$(dirname "${0}")/.."
+[ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ] && export NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}"
+[ -z "${NETDATA_SOURCE_PATH}" ] && export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/.."
+[ -z "${PROCESSORS}" ] && export PROCESSORS=$(cat /proc/cpuinfo 2>/dev/null | grep ^processor | wc -l)
+[ -z "${PROCESSORS}" -o $((PROCESSORS)) -lt 1 ] && export PROCESSORS=1
+export NULL=
+
+# make sure the path does not end with /
+if [ "${NETDATA_INSTALL_PATH:$(( ${#NETDATA_INSTALL_PATH} - 1)):1}" = "/" ]
+ then
+ export NETDATA_INSTALL_PATH="${NETDATA_INSTALL_PATH:0:$(( ${#NETDATA_INSTALL_PATH} - 1))}"
+fi
+
+# find the parent directory
+export NETDATA_INSTALL_PARENT="$(dirname "${NETDATA_INSTALL_PATH}")"
+
+
+# debug
+echo "ME=${0}"
+echo "NETDATA_INSTALL_PARENT=${NETDATA_INSTALL_PARENT}"
+echo "NETDATA_INSTALL_PATH=${NETDATA_INSTALL_PATH}"
+echo "NETDATA_MAKESELF_PATH=${NETDATA_MAKESELF_PATH}"
+echo "NETDATA_SOURCE_PATH=${NETDATA_SOURCE_PATH}"
+echo "PROCESSORS=${PROCESSORS}"
+
+# bash strict mode
+set -euo pipefail
+
+# -----------------------------------------------------------------------------
+
+fetch() {
+ local dir="${1}" url="${2}"
+ local tar="${dir}.tar.gz"
+
+ if [ ! -f "${NETDATA_MAKESELF_PATH}/tmp/${tar}" ]
+ then
+ run wget -O "${NETDATA_MAKESELF_PATH}/tmp/${tar}" "${url}"
+ fi
+
+ if [ ! -d "${NETDATA_MAKESELF_PATH}/tmp/${dir}" ]
+ then
+ cd "${NETDATA_MAKESELF_PATH}/tmp"
+ run tar -zxvpf "${tar}"
+ cd -
+ fi
+
+ run cd "${NETDATA_MAKESELF_PATH}/tmp/${dir}"
+}
+
+# -----------------------------------------------------------------------------
+
+# load the functions of the netdata-installer.sh
+. "${NETDATA_SOURCE_PATH}/installer/functions.sh"
diff --git a/makeself/install-or-update.sh b/makeself/install-or-update.sh
new file mode 100755
index 00000000..da63c64b
--- /dev/null
+++ b/makeself/install-or-update.sh
@@ -0,0 +1,162 @@
+#!/usr/bin/env bash
+
+. $(dirname "${0}")/functions.sh
+
+export LC_ALL=C
+umask 002
+
+# Be nice on production environments
+renice 19 $$ >/dev/null 2>/dev/null
+
+
+# -----------------------------------------------------------------------------
+progress "Checking new configuration files"
+
+declare -A configs_signatures=()
+. system/configs.signatures
+
+if [ ! -d etc/netdata ]
+ then
+ run mkdir -p etc/netdata
+fi
+
+md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)"
+for x in $(find etc.new -type f)
+do
+ # find it relative filename
+ f="${x/etc.new\/netdata\//}"
+ t="${x/etc.new\//etc\/}"
+ d=$(dirname "${t}")
+
+ #echo >&2 "x: ${x}"
+ #echo >&2 "t: ${t}"
+ #echo >&2 "d: ${d}"
+
+ if [ ! -d "${d}" ]
+ then
+ run mkdir -p "${d}"
+ fi
+
+ if [ ! -f "${t}" ]
+ then
+ run cp "${x}" "${t}"
+ continue
+ fi
+
+ if [ ! -z "${md5sum}" ]
+ then
+ # find the checksum of the existing file
+ md5="$(cat "${t}" | ${md5sum} | cut -d ' ' -f 1)"
+ #echo >&2 "md5: ${md5}"
+
+ # check if it matches
+ if [ "${configs_signatures[${md5}]}" = "${f}" ]
+ then
+ run cp "${x}" "${t}"
+ fi
+ fi
+
+ if ! [[ "${x}" =~ .*\.orig ]]
+ then
+ run mv "${x}" "${t}.orig"
+ fi
+done
+
+run rm -rf etc.new
+
+
+# -----------------------------------------------------------------------------
+progress "Add user netdata to required user groups"
+
+NETDATA_USER="root"
+NETDATA_GROUP="root"
+add_netdata_user_and_group
+if [ $? -eq 0 ]
+ then
+ NETDATA_USER="netdata"
+ NETDATA_GROUP="netdata"
+else
+ run_failed "Failed to add netdata user and group"
+fi
+
+
+# -----------------------------------------------------------------------------
+progress "Install logrotate configuration for netdata"
+
+install_netdata_logrotate || run_failed "Cannot install logrotate file for netdata."
+
+
+# -----------------------------------------------------------------------------
+progress "Install netdata at system init"
+
+install_netdata_service || run_failed "Cannot install netdata init service."
+
+
+# -----------------------------------------------------------------------------
+progress "creating quick links"
+
+dir_should_be_link() {
+ local p="${1}" t="${2}" d="${3}" old
+
+ old="${PWD}"
+ cd "${p}" || return 0
+
+ if [ -e "${d}" ]
+ then
+ if [ -h "${d}" ]
+ then
+ run rm "${d}"
+ else
+ run mv -f "${d}" "${d}.old.$$"
+ fi
+ fi
+
+ run ln -s "${t}" "${d}"
+ cd "${old}"
+}
+
+dir_should_be_link . bin sbin
+dir_should_be_link usr ../bin bin
+dir_should_be_link usr ../bin sbin
+dir_should_be_link usr . local
+
+dir_should_be_link . etc/netdata netdata-configs
+dir_should_be_link . usr/share/netdata/web netdata-web-files
+dir_should_be_link . usr/libexec/netdata netdata-plugins
+dir_should_be_link . var/lib/netdata netdata-dbs
+dir_should_be_link . var/cache/netdata netdata-metrics
+dir_should_be_link . var/log/netdata netdata-logs
+
+
+# -----------------------------------------------------------------------------
+progress "fix permissions"
+
+run chmod g+rx,o+rx /opt
+run chown -R ${NETDATA_USER}:${NETDATA_GROUP} /opt/netdata
+
+
+# -----------------------------------------------------------------------------
+progress "fix plugin permissions"
+
+for x in apps.plugin freeipmi.plugin
+do
+ f="usr/libexec/netdata/plugins.d/${x}"
+
+ if [ -f "${f}" ]
+ then
+ run chown root:${NETDATA_GROUP} "${f}"
+ run chmod 4750 "${f}"
+ fi
+done
+
+
+# -----------------------------------------------------------------------------
+progress "starting netdata"
+
+restart_netdata "/opt/netdata/bin/netdata"
+if [ $? -eq 0 ]
+ then
+ netdata_banner "is installed and running now!"
+else
+ netdata_banner "is installed now!"
+fi
diff --git a/makeself/jobs/10-prepare-destination.install.sh b/makeself/jobs/10-prepare-destination.install.sh
new file mode 100755
index 00000000..58c8c25f
--- /dev/null
+++ b/makeself/jobs/10-prepare-destination.install.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+. $(dirname "${0}")/../functions.sh "${@}" || exit 1
+
+[ -d "${NETDATA_INSTALL_PATH}.old" ] && run rm -rf "${NETDATA_INSTALL_PATH}.old"
+[ -d "${NETDATA_INSTALL_PATH}" ] && run mv -f "${NETDATA_INSTALL_PATH}" "${NETDATA_INSTALL_PATH}.old"
+
+run mkdir -p "${NETDATA_INSTALL_PATH}/bin"
+run mkdir -p "${NETDATA_INSTALL_PATH}/usr"
+run cd "${NETDATA_INSTALL_PATH}"
+run ln -s bin sbin
+run cd "${NETDATA_INSTALL_PATH}/usr"
+run ln -s ../bin bin
+run ln -s ../sbin sbin
+run ln -s . local
+
diff --git a/makeself/jobs/50-bash-4.4.install.sh b/makeself/jobs/50-bash-4.4.install.sh
new file mode 100755
index 00000000..07c84b6d
--- /dev/null
+++ b/makeself/jobs/50-bash-4.4.install.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+
+. $(dirname "${0}")/../functions.sh "${@}" || exit 1
+
+fetch "bash-4.4" "http://ftp.gnu.org/gnu/bash/bash-4.4.tar.gz"
+
+run ./configure \
+ --prefix=${NETDATA_INSTALL_PATH} \
+ --enable-static-link \
+ --disable-nls \
+ --without-bash-malloc \
+# --disable-rpath \
+# --enable-alias \
+# --enable-arith-for-command \
+# --enable-array-variables \
+# --enable-brace-expansion \
+# --enable-casemod-attributes \
+# --enable-casemod-expansions \
+# --enable-command-timing \
+# --enable-cond-command \
+# --enable-cond-regexp \
+# --enable-directory-stack \
+# --enable-dparen-arithmetic \
+# --enable-function-import \
+# --enable-glob-asciiranges-default \
+# --enable-help-builtin \
+# --enable-job-control \
+# --enable-net-redirections \
+# --enable-process-substitution \
+# --enable-progcomp \
+# --enable-prompt-string-decoding \
+# --enable-readline \
+# --enable-select \
+
+
+run make clean
+run make -j${PROCESSORS}
+
+cat >examples/loadables/Makefile <<EOF
+all:
+clean:
+install:
+EOF
+
+run make install
+
+run strip ${NETDATA_INSTALL_PATH}/bin/bash
diff --git a/makeself/jobs/50-curl-7.53.1.install.sh b/makeself/jobs/50-curl-7.53.1.install.sh
new file mode 100755
index 00000000..0e375a91
--- /dev/null
+++ b/makeself/jobs/50-curl-7.53.1.install.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+. $(dirname "${0}")/../functions.sh "${@}" || exit 1
+
+fetch "curl-curl-7_53_1" "https://github.com/curl/curl/archive/curl-7_53_1.tar.gz"
+
+export LDFLAGS="-static"
+export PKG_CONFIG="pkg-config --static"
+
+run ./buildconf
+
+run ./configure \
+ --prefix=${NETDATA_INSTALL_PATH} \
+ --enable-optimize \
+ --disable-shared \
+ --enable-static \
+ --enable-http \
+ --enable-proxy \
+ --enable-ipv6 \
+ --enable-cookies \
+ ${NULL}
+
+# Curl autoconf does not honour the curl_LDFLAGS environment variable
+run sed -i -e "s/curl_LDFLAGS =/curl_LDFLAGS = -all-static/" src/Makefile
+
+run make clean
+run make -j${PROCESSORS}
+run make install
+
+run strip ${NETDATA_INSTALL_PATH}/bin/curl
diff --git a/makeself/jobs/50-fping-4.0.install.sh b/makeself/jobs/50-fping-4.0.install.sh
new file mode 100755
index 00000000..dbc91c51
--- /dev/null
+++ b/makeself/jobs/50-fping-4.0.install.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+
+. $(dirname "${0}")/../functions.sh "${@}" || exit 1
+
+fetch "fping-4.0" "https://github.com/schweikert/fping/releases/download/v4.0/fping-4.0.tar.gz"
+
+export CFLAGS="-static"
+
+run ./configure \
+ --prefix=${NETDATA_INSTALL_PATH} \
+ --enable-ipv4 \
+ --enable-ipv6 \
+ ${NULL}
+
+cat >doc/Makefile <<EOF
+all:
+clean:
+install:
+EOF
+
+run make clean
+run make -j${PROCESSORS}
+run make install
+
+run strip ${NETDATA_INSTALL_PATH}/bin/fping
diff --git a/makeself/jobs/70-netdata-git.install.sh b/makeself/jobs/70-netdata-git.install.sh
new file mode 100755
index 00000000..873830f9
--- /dev/null
+++ b/makeself/jobs/70-netdata-git.install.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+. ${NETDATA_MAKESELF_PATH}/functions.sh "${@}" || exit 1
+
+cd "${NETDATA_SOURCE_PATH}" || exit 1
+
+export CFLAGS="-O3 -static"
+
+run ./netdata-installer.sh --install "${NETDATA_INSTALL_PARENT}" \
+ --dont-wait \
+ --dont-start-it \
+ ${NULL}
+
+run strip ${NETDATA_INSTALL_PATH}/bin/netdata
+run strip ${NETDATA_INSTALL_PATH}/usr/libexec/netdata/plugins.d/apps.plugin
+
diff --git a/makeself/jobs/99-makeself.install.sh b/makeself/jobs/99-makeself.install.sh
new file mode 100755
index 00000000..465a3195
--- /dev/null
+++ b/makeself/jobs/99-makeself.install.sh
@@ -0,0 +1,121 @@
+#!/usr/bin/env bash
+
+. $(dirname "${0}")/../functions.sh "${@}" || exit 1
+
+
+# -----------------------------------------------------------------------------
+# copy the files needed by makeself installation
+
+run mkdir -p "${NETDATA_INSTALL_PATH}/system"
+run cd "${NETDATA_SOURCE_PATH}" || exit 1
+
+cp \
+ makeself/post-installer.sh \
+ makeself/install-or-update.sh \
+ installer/functions.sh \
+ configs.signatures \
+ system/netdata-init-d \
+ system/netdata-lsb \
+ system/netdata-openrc \
+ system/netdata.logrotate \
+ system/netdata.service \
+ "${NETDATA_INSTALL_PATH}/system/"
+
+
+# -----------------------------------------------------------------------------
+# create a wrapper to start our netdata with a modified path
+
+mkdir -p "${NETDATA_INSTALL_PATH}/bin/srv"
+
+mv "${NETDATA_INSTALL_PATH}/bin/netdata" \
+ "${NETDATA_INSTALL_PATH}/bin/srv/netdata" || exit 1
+
+cat >"${NETDATA_INSTALL_PATH}/bin/netdata" <<EOF
+#!${NETDATA_INSTALL_PATH}/bin/bash
+export PATH="${NETDATA_INSTALL_PATH}/bin:\${PATH}"
+exec "${NETDATA_INSTALL_PATH}/bin/srv/netdata" "\${@}"
+EOF
+chmod 755 "${NETDATA_INSTALL_PATH}/bin/netdata"
+
+
+# -----------------------------------------------------------------------------
+# move etc to protect the destination when unpacked
+
+if [ -d "${NETDATA_INSTALL_PATH}/etc" ]
+ then
+ if [ -d "${NETDATA_INSTALL_PATH}/etc.new" ]
+ then
+ rm -rf "${NETDATA_INSTALL_PATH}/etc.new" || exit 1
+ fi
+
+ mv "${NETDATA_INSTALL_PATH}/etc" \
+ "${NETDATA_INSTALL_PATH}/etc.new" || exit 1
+fi
+
+
+# -----------------------------------------------------------------------------
+# remove the links to allow untaring the archive
+
+rm "${NETDATA_INSTALL_PATH}/sbin" \
+ "${NETDATA_INSTALL_PATH}/usr/bin" \
+ "${NETDATA_INSTALL_PATH}/usr/sbin" \
+ "${NETDATA_INSTALL_PATH}/usr/local"
+
+
+# -----------------------------------------------------------------------------
+# create the makeself archive
+
+"${NETDATA_MAKESELF_PATH}/makeself.sh" \
+ --gzip \
+ --complevel 9 \
+ --notemp \
+ --needroot \
+ --target "${NETDATA_INSTALL_PATH}" \
+ --header "${NETDATA_MAKESELF_PATH}/makeself-header.sh" \
+ --lsm "${NETDATA_MAKESELF_PATH}/makeself.lsm" \
+ --license "${NETDATA_MAKESELF_PATH}/makeself-license.txt" \
+ --help-header "${NETDATA_MAKESELF_PATH}/makeself-help-header.txt" \
+ "${NETDATA_INSTALL_PATH}" \
+ "${NETDATA_INSTALL_PATH}.gz.run" \
+ "netdata, the real-time performance and health monitoring system" \
+ ./system/post-installer.sh \
+ ${NULL}
+
+
+# -----------------------------------------------------------------------------
+# copy it to the netdata build dir
+
+NOWNER="unknown"
+ORIGIN="$(git config --get remote.origin.url || echo "unknown")"
+if [[ "${ORIGIN}" =~ ^git@github.com:.*/netdata.*$ ]]
+ then
+ NOWNER="${ORIGIN/git@github.com:/}"
+ NOWNER="${NOWNER/\/netdata*/}"
+
+elif [[ "${ORIGIN}" =~ ^https://github.com/.*/netdata.*$ ]]
+ then
+ NOWNER="${ORIGIN/https:\/\/github.com\//}"
+ NOWNER="${NOWNER/\/netdata*/}"
+fi
+
+# make sure it does not have any slashes in it
+NOWNER="${NOWNER//\//_}"
+
+if [ "${NOWNER}" = "firehol" ]
+ then
+ NOWNER=
+else
+ NOWNER="-${NOWNER}"
+fi
+
+VERSION="$(git describe || echo "undefined")"
+[ -z "${VERSION}" ] && VERSION="undefined"
+
+FILE="netdata-${VERSION}-$(uname -m)-$(date +"%Y%m%d-%H%M%S")${NOWNER}.gz.run"
+
+cp "${NETDATA_INSTALL_PATH}.gz.run" "${FILE}"
+echo >&2 "Self-extracting installer copied to '${FILE}'"
+
+[ -f netdata-latest.gz.run ] && rm netdata-latest.gz.run
+ln -s "${FILE}" netdata-latest.gz.run
+echo >&2 "Self-extracting installer linked to 'netdata-latest.gz.run'"
diff --git a/makeself/makeself-header.sh b/makeself/makeself-header.sh
new file mode 100755
index 00000000..93d937b3
--- /dev/null
+++ b/makeself/makeself-header.sh
@@ -0,0 +1,554 @@
+cat << EOF > "$archname"
+#!/bin/sh
+# This script was generated using Makeself $MS_VERSION
+
+ORIG_UMASK=\`umask\`
+if test "$KEEP_UMASK" = n; then
+ umask 077
+fi
+
+CRCsum="$CRCsum"
+MD5="$MD5sum"
+TMPROOT=\${TMPDIR:=/tmp}
+USER_PWD="\$PWD"; export USER_PWD
+
+label="$LABEL"
+script="$SCRIPT"
+scriptargs="$SCRIPTARGS"
+licensetxt="$LICENSE"
+helpheader='$HELPHEADER'
+targetdir="$archdirname"
+filesizes="$filesizes"
+keep="$KEEP"
+nooverwrite="$NOOVERWRITE"
+quiet="n"
+accept="n"
+nodiskspace="n"
+export_conf="$EXPORT_CONF"
+
+print_cmd_arg=""
+if type printf > /dev/null; then
+ print_cmd="printf"
+elif test -x /usr/ucb/echo; then
+ print_cmd="/usr/ucb/echo"
+else
+ print_cmd="echo"
+fi
+
+if test -d /usr/xpg4/bin; then
+ PATH=/usr/xpg4/bin:\$PATH
+ export PATH
+fi
+
+unset CDPATH
+
+MS_Printf()
+{
+ \$print_cmd \$print_cmd_arg "\$1"
+}
+
+MS_PrintLicense()
+{
+ if test x"\$licensetxt" != x; then
+ echo "\$licensetxt"
+ if test x"\$accept" != xy; then
+ while true
+ do
+ MS_Printf "Please type y to accept, n otherwise: "
+ read yn
+ if test x"\$yn" = xn; then
+ keep=n
+ eval \$finish; exit 1
+ break;
+ elif test x"\$yn" = xy; then
+ break;
+ fi
+ done
+ fi
+ fi
+}
+
+MS_diskspace()
+{
+ (
+ df -kP "\$1" | tail -1 | awk '{ if (\$4 ~ /%/) {print \$3} else {print \$4} }'
+ )
+}
+
+MS_dd()
+{
+ blocks=\`expr \$3 / 1024\`
+ bytes=\`expr \$3 % 1024\`
+ dd if="\$1" ibs=\$2 skip=1 obs=1024 conv=sync 2> /dev/null | \\
+ { test \$blocks -gt 0 && dd ibs=1024 obs=1024 count=\$blocks ; \\
+ test \$bytes -gt 0 && dd ibs=1 obs=1024 count=\$bytes ; } 2> /dev/null
+}
+
+MS_dd_Progress()
+{
+ if test x"\$noprogress" = xy; then
+ MS_dd \$@
+ return \$?
+ fi
+ file="\$1"
+ offset=\$2
+ length=\$3
+ pos=0
+ bsize=4194304
+ while test \$bsize -gt \$length; do
+ bsize=\`expr \$bsize / 4\`
+ done
+ blocks=\`expr \$length / \$bsize\`
+ bytes=\`expr \$length % \$bsize\`
+ (
+ dd ibs=\$offset skip=1 2>/dev/null
+ pos=\`expr \$pos \+ \$bsize\`
+ MS_Printf " 0%% " 1>&2
+ if test \$blocks -gt 0; then
+ while test \$pos -le \$length; do
+ dd bs=\$bsize count=1 2>/dev/null
+ pcent=\`expr \$length / 100\`
+ pcent=\`expr \$pos / \$pcent\`
+ if test \$pcent -lt 100; then
+ MS_Printf "\b\b\b\b\b\b\b" 1>&2
+ if test \$pcent -lt 10; then
+ MS_Printf " \$pcent%% " 1>&2
+ else
+ MS_Printf " \$pcent%% " 1>&2
+ fi
+ fi
+ pos=\`expr \$pos \+ \$bsize\`
+ done
+ fi
+ if test \$bytes -gt 0; then
+ dd bs=\$bytes count=1 2>/dev/null
+ fi
+ MS_Printf "\b\b\b\b\b\b\b" 1>&2
+ MS_Printf " 100%% " 1>&2
+ ) < "\$file"
+}
+
+MS_Help()
+{
+ cat << EOH >&2
+\${helpheader}Makeself version $MS_VERSION
+ 1) Getting help or info about \$0 :
+ \$0 --help Print this message
+ \$0 --info Print embedded info : title, default target directory, embedded script ...
+ \$0 --lsm Print embedded lsm entry (or no LSM)
+ \$0 --list Print the list of files in the archive
+ \$0 --check Checks integrity of the archive
+
+ 2) Running \$0 :
+ \$0 [options] [--] [additional arguments to embedded script]
+ with following options (in that order)
+ --confirm Ask before running embedded script
+ --quiet Do not print anything except error messages
+ --accept Accept the license
+ --noexec Do not run embedded script
+ --keep Do not erase target directory after running
+ the embedded script
+ --noprogress Do not show the progress during the decompression
+ --nox11 Do not spawn an xterm
+ --nochown Do not give the extracted files to the current user
+ --nodiskspace Do not check for available disk space
+ --target dir Extract directly to a target directory
+ directory path can be either absolute or relative
+ --tar arg1 [arg2 ...] Access the contents of the archive through the tar command
+ -- Following arguments will be passed to the embedded script
+EOH
+}
+
+MS_Check()
+{
+ OLD_PATH="\$PATH"
+ PATH=\${GUESS_MD5_PATH:-"\$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"}
+ MD5_ARG=""
+ MD5_PATH=\`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum\`
+ test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which md5 || command -v md5 || type md5\`
+ test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which digest || command -v digest || type digest\`
+ PATH="\$OLD_PATH"
+
+ if test x"\$quiet" = xn; then
+ MS_Printf "Verifying archive integrity..."
+ fi
+ offset=\`head -n $SKIP "\$1" | wc -c | tr -d " "\`
+ verb=\$2
+ i=1
+ for s in \$filesizes
+ do
+ crc=\`echo \$CRCsum | cut -d" " -f\$i\`
+ if test -x "\$MD5_PATH"; then
+ if test x"\`basename \$MD5_PATH\`" = xdigest; then
+ MD5_ARG="-a md5"
+ fi
+ md5=\`echo \$MD5 | cut -d" " -f\$i\`
+ if test x"\$md5" = x00000000000000000000000000000000; then
+ test x"\$verb" = xy && echo " \$1 does not contain an embedded MD5 checksum." >&2
+ else
+ md5sum=\`MS_dd_Progress "\$1" \$offset \$s | eval "\$MD5_PATH \$MD5_ARG" | cut -b-32\`;
+ if test x"\$md5sum" != x"\$md5"; then
+ echo "Error in MD5 checksums: \$md5sum is different from \$md5" >&2
+ exit 2
+ else
+ test x"\$verb" = xy && MS_Printf " MD5 checksums are OK." >&2
+ fi
+ crc="0000000000"; verb=n
+ fi
+ fi
+ if test x"\$crc" = x0000000000; then
+ test x"\$verb" = xy && echo " \$1 does not contain a CRC checksum." >&2
+ else
+ sum1=\`MS_dd_Progress "\$1" \$offset \$s | CMD_ENV=xpg4 cksum | awk '{print \$1}'\`
+ if test x"\$sum1" = x"\$crc"; then
+ test x"\$verb" = xy && MS_Printf " CRC checksums are OK." >&2
+ else
+ echo "Error in checksums: \$sum1 is different from \$crc" >&2
+ exit 2;
+ fi
+ fi
+ i=\`expr \$i + 1\`
+ offset=\`expr \$offset + \$s\`
+ done
+ if test x"\$quiet" = xn; then
+ echo " All good."
+ fi
+}
+
+UnTAR()
+{
+ if test x"\$quiet" = xn; then
+ tar \$1 "$UNTAR_EXTRA" -vf - 2>&1 || { echo Extraction failed. > /dev/tty; kill -15 \$$; }
+ else
+
+ tar \$1 "$UNTAR_EXTRA" -f - 2>&1 || { echo Extraction failed. > /dev/tty; kill -15 \$$; }
+ fi
+}
+
+finish=true
+xterm_loop=
+noprogress=$NOPROGRESS
+nox11=$NOX11
+copy=$COPY
+ownership=y
+verbose=n
+
+initargs="\$@"
+
+while true
+do
+ case "\$1" in
+ -h | --help)
+ MS_Help
+ exit 0
+ ;;
+ -q | --quiet)
+ quiet=y
+ noprogress=y
+ shift
+ ;;
+ --accept)
+ accept=y
+ shift
+ ;;
+ --info)
+ echo Identification: "\$label"
+ echo Target directory: "\$targetdir"
+ echo Uncompressed size: $USIZE KB
+ echo Compression: $COMPRESS
+ echo Date of packaging: $DATE
+ echo Built with Makeself version $MS_VERSION on $OSTYPE
+ echo Build command was: "$MS_COMMAND"
+ if test x"\$script" != x; then
+ echo Script run after extraction:
+ echo " " \$script \$scriptargs
+ fi
+ if test x"$copy" = xcopy; then
+ echo "Archive will copy itself to a temporary location"
+ fi
+ if test x"$NEED_ROOT" = xy; then
+ echo "Root permissions required for extraction"
+ fi
+ if test x"$KEEP" = xy; then
+ echo "directory \$targetdir is permanent"
+ else
+ echo "\$targetdir will be removed after extraction"
+ fi
+ exit 0
+ ;;
+ --dumpconf)
+ echo LABEL=\"\$label\"
+ echo SCRIPT=\"\$script\"
+ echo SCRIPTARGS=\"\$scriptargs\"
+ echo archdirname=\"$archdirname\"
+ echo KEEP=$KEEP
+ echo NOOVERWRITE=$NOOVERWRITE
+ echo COMPRESS=$COMPRESS
+ echo filesizes=\"\$filesizes\"
+ echo CRCsum=\"\$CRCsum\"
+ echo MD5sum=\"\$MD5\"
+ echo OLDUSIZE=$USIZE
+ echo OLDSKIP=`expr $SKIP + 1`
+ exit 0
+ ;;
+ --lsm)
+cat << EOLSM
+EOF
+eval "$LSM_CMD"
+cat << EOF >> "$archname"
+EOLSM
+ exit 0
+ ;;
+ --list)
+ echo Target directory: \$targetdir
+ offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\`
+ for s in \$filesizes
+ do
+ MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | UnTAR t
+ offset=\`expr \$offset + \$s\`
+ done
+ exit 0
+ ;;
+ --tar)
+ offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\`
+ arg1="\$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ for s in \$filesizes
+ do
+ MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | tar "\$arg1" - "\$@"
+ offset=\`expr \$offset + \$s\`
+ done
+ exit 0
+ ;;
+ --check)
+ MS_Check "\$0" y
+ exit 0
+ ;;
+ --confirm)
+ verbose=y
+ shift
+ ;;
+ --noexec)
+ script=""
+ shift
+ ;;
+ --keep)
+ keep=y
+ shift
+ ;;
+ --target)
+ keep=y
+ targetdir=\${2:-.}
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --noprogress)
+ noprogress=y
+ shift
+ ;;
+ --nox11)
+ nox11=y
+ shift
+ ;;
+ --nochown)
+ ownership=n
+ shift
+ ;;
+ --nodiskspace)
+ nodiskspace=y
+ shift
+ ;;
+ --xwin)
+ if test "$NOWAIT" = n; then
+ finish="echo Press Return to close this window...; read junk"
+ fi
+ xterm_loop=1
+ shift
+ ;;
+ --phase2)
+ copy=phase2
+ shift
+ ;;
+ --)
+ shift
+ break ;;
+ -*)
+ echo Unrecognized flag : "\$1" >&2
+ MS_Help
+ exit 1
+ ;;
+ *)
+ break ;;
+ esac
+done
+
+if test x"\$quiet" = xy -a x"\$verbose" = xy; then
+ echo Cannot be verbose and quiet at the same time. >&2
+ exit 1
+fi
+
+if test x"$NEED_ROOT" = xy -a \`id -u\` -ne 0; then
+ echo "Administrative privileges required for this archive (use su or sudo)" >&2
+ exit 1
+fi
+
+if test x"\$copy" \!= xphase2; then
+ MS_PrintLicense
+fi
+
+case "\$copy" in
+copy)
+ tmpdir=\$TMPROOT/makeself.\$RANDOM.\`date +"%y%m%d%H%M%S"\`.\$\$
+ mkdir "\$tmpdir" || {
+ echo "Could not create temporary directory \$tmpdir" >&2
+ exit 1
+ }
+ SCRIPT_COPY="\$tmpdir/makeself"
+ echo "Copying to a temporary location..." >&2
+ cp "\$0" "\$SCRIPT_COPY"
+ chmod +x "\$SCRIPT_COPY"
+ cd "\$TMPROOT"
+ exec "\$SCRIPT_COPY" --phase2 -- \$initargs
+ ;;
+phase2)
+ finish="\$finish ; rm -rf \`dirname \$0\`"
+ ;;
+esac
+
+if test x"\$nox11" = xn; then
+ if tty -s; then # Do we have a terminal?
+ :
+ else
+ if test x"\$DISPLAY" != x -a x"\$xterm_loop" = x; then # No, but do we have X?
+ if xset q > /dev/null 2>&1; then # Check for valid DISPLAY variable
+ GUESS_XTERMS="xterm gnome-terminal rxvt dtterm eterm Eterm xfce4-terminal lxterminal kvt konsole aterm terminology"
+ for a in \$GUESS_XTERMS; do
+ if type \$a >/dev/null 2>&1; then
+ XTERM=\$a
+ break
+ fi
+ done
+ chmod a+x \$0 || echo Please add execution rights on \$0
+ if test \`echo "\$0" | cut -c1\` = "/"; then # Spawn a terminal!
+ exec \$XTERM -title "\$label" -e "\$0" --xwin "\$initargs"
+ else
+ exec \$XTERM -title "\$label" -e "./\$0" --xwin "\$initargs"
+ fi
+ fi
+ fi
+ fi
+fi
+
+if test x"\$targetdir" = x.; then
+ tmpdir="."
+else
+ if test x"\$keep" = xy; then
+ if test x"\$nooverwrite" = xy && test -d "\$targetdir"; then
+ echo "Target directory \$targetdir already exists, aborting." >&2
+ exit 1
+ fi
+ if test x"\$quiet" = xn; then
+ echo "Creating directory \$targetdir" >&2
+ fi
+ tmpdir="\$targetdir"
+ dashp="-p"
+ else
+ tmpdir="\$TMPROOT/selfgz\$\$\$RANDOM"
+ dashp=""
+ fi
+ mkdir \$dashp \$tmpdir || {
+ echo 'Cannot create target directory' \$tmpdir >&2
+ echo 'You should try option --target dir' >&2
+ eval \$finish
+ exit 1
+ }
+fi
+
+location="\`pwd\`"
+if test x"\$SETUP_NOCHECK" != x1; then
+ MS_Check "\$0"
+fi
+offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\`
+
+if test x"\$verbose" = xy; then
+ MS_Printf "About to extract $USIZE KB in \$tmpdir ... Proceed ? [Y/n] "
+ read yn
+ if test x"\$yn" = xn; then
+ eval \$finish; exit 1
+ fi
+fi
+
+if test x"\$quiet" = xn; then
+ MS_Printf "Uncompressing \$label"
+fi
+res=3
+if test x"\$keep" = xn; then
+ trap 'echo Signal caught, cleaning up >&2; cd \$TMPROOT; /bin/rm -rf \$tmpdir; eval \$finish; exit 15' 1 2 3 15
+fi
+
+if test x"\$nodiskspace" = xn; then
+ leftspace=\`MS_diskspace \$tmpdir\`
+ if test -n "\$leftspace"; then
+ if test "\$leftspace" -lt $USIZE; then
+ echo
+ echo "Not enough space left in "\`dirname \$tmpdir\`" (\$leftspace KB) to decompress \$0 ($USIZE KB)" >&2
+ echo "Use --nodiskspace option to skip this check and proceed anyway" >&2
+ if test x"\$keep" = xn; then
+ echo "Consider setting TMPDIR to a directory with more free space."
+ fi
+ eval \$finish; exit 1
+ fi
+ fi
+fi
+
+for s in \$filesizes
+do
+ if MS_dd_Progress "\$0" \$offset \$s | eval "$GUNZIP_CMD" | ( cd "\$tmpdir"; umask \$ORIG_UMASK ; UnTAR xp ) 1>/dev/null; then
+ if test x"\$ownership" = xy; then
+ (cd "\$tmpdir"; chown -R \`id -u\` .; chgrp -R \`id -g\` .)
+ fi
+ else
+ echo >&2
+ echo "Unable to decompress \$0" >&2
+ eval \$finish; exit 1
+ fi
+ offset=\`expr \$offset + \$s\`
+done
+if test x"\$quiet" = xn; then
+ echo
+fi
+
+cd "\$tmpdir"
+res=0
+if test x"\$script" != x; then
+ if test x"\$export_conf" = x"y"; then
+ MS_BUNDLE="\$0"
+ MS_LABEL="\$label"
+ MS_SCRIPT="\$script"
+ MS_SCRIPTARGS="\$scriptargs"
+ MS_ARCHDIRNAME="\$archdirname"
+ MS_KEEP="\$KEEP"
+ MS_NOOVERWRITE="\$NOOVERWRITE"
+ MS_COMPRESS="\$COMPRESS"
+ export MS_BUNDLE MS_LABEL MS_SCRIPT MS_SCRIPTARGS
+ export MS_ARCHDIRNAME MS_KEEP MS_NOOVERWRITE MS_COMPRESS
+ fi
+
+ if test x"\$verbose" = x"y"; then
+ MS_Printf "OK to execute: \$script \$scriptargs \$* ? [Y/n] "
+ read yn
+ if test x"\$yn" = x -o x"\$yn" = xy -o x"\$yn" = xY; then
+ eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$?;
+ fi
+ else
+ eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$?
+ fi
+ if test "\$res" -ne 0; then
+ test x"\$verbose" = xy && echo "The program '\$script' returned an error code (\$res)" >&2
+ fi
+fi
+if test x"\$keep" = xn; then
+ cd \$TMPROOT
+ /bin/rm -rf \$tmpdir
+fi
+eval \$finish; exit \$res
+EOF
diff --git a/makeself/makeself-help-header.txt b/makeself/makeself-help-header.txt
new file mode 100644
index 00000000..e2649005
--- /dev/null
+++ b/makeself/makeself-help-header.txt
@@ -0,0 +1,46 @@
+
+ ^
+ |.-. .-. .-. .-. . netdata
+ | '-' '-' '-' '-' real-time performance monitoring, done right!
+ +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->
+
+ (C) Copyright 2017, Costa Tsaousis
+ All rights reserved
+ Released under GPL v3+
+
+ You are about to install netdata to this system.
+ netdata will be installed at:
+
+ /opt/netdata
+
+ The following changes will be made to your system:
+
+ # USERS / GROUPS
+ User 'netdata' and group 'netdata' will be added, if not present.
+
+ # LOGROTATE
+ This file will be installed if logrotate is present.
+
+ - /etc/logrotate.d/netdata
+
+ # SYSTEM INIT
+ This file will be installed if this system runs with systemd:
+
+ - /etc/systemd/system/netdata.service
+
+ or, for older Centos, Debian/Ubuntu or OpenRC Gentoo:
+
+ - /etc/init.d/netdata will be created
+
+
+ This package can also update a netdata installation that has been
+ created with another version of it.
+
+ Your netdata configuration will be retained.
+ After installation, netdata will be (re-)started.
+
+ netdata re-distributes a lot of open source software components.
+ Check its full license at:
+ https://github.com/firehol/netdata/blob/master/LICENSE.md
+
+
diff --git a/makeself/makeself-license.txt b/makeself/makeself-license.txt
new file mode 100644
index 00000000..e2649005
--- /dev/null
+++ b/makeself/makeself-license.txt
@@ -0,0 +1,46 @@
+
+ ^
+ |.-. .-. .-. .-. . netdata
+ | '-' '-' '-' '-' real-time performance monitoring, done right!
+ +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->
+
+ (C) Copyright 2017, Costa Tsaousis
+ All rights reserved
+ Released under GPL v3+
+
+ You are about to install netdata to this system.
+ netdata will be installed at:
+
+ /opt/netdata
+
+ The following changes will be made to your system:
+
+ # USERS / GROUPS
+ User 'netdata' and group 'netdata' will be added, if not present.
+
+ # LOGROTATE
+ This file will be installed if logrotate is present.
+
+ - /etc/logrotate.d/netdata
+
+ # SYSTEM INIT
+ This file will be installed if this system runs with systemd:
+
+ - /etc/systemd/system/netdata.service
+
+ or, for older Centos, Debian/Ubuntu or OpenRC Gentoo:
+
+ - /etc/init.d/netdata will be created
+
+
+ This package can also update a netdata installation that has been
+ created with another version of it.
+
+ Your netdata configuration will be retained.
+ After installation, netdata will be (re-)started.
+
+ netdata re-distributes a lot of open source software components.
+ Check its full license at:
+ https://github.com/firehol/netdata/blob/master/LICENSE.md
+
+
diff --git a/makeself/makeself.lsm b/makeself/makeself.lsm
new file mode 100644
index 00000000..02679629
--- /dev/null
+++ b/makeself/makeself.lsm
@@ -0,0 +1,16 @@
+Begin3
+Title: netdata
+Version: 1.6.0
+Description: netdata is a system for distributed real-time performance and health monitoring.
+ It provides unparalleled insights, in real-time, of everything happening on the
+ system it runs (including applications such as web and database servers), using
+ modern interactive web dashboards. netdata is fast and efficient, designed to
+ permanently run on all systems (physical & virtual servers, containers, IoT
+ devices), without disrupting their core function.
+Keywords: real-time performance and health monitoring
+Author: Costa Tsaousis (costa@tsaousis.gr)
+Maintained-by: Costa Tsaousis (costa@tsaousis.gr)
+Original-site: https://my-netdata.io/
+Platform: Unix
+Copying-policy: GPL
+End
diff --git a/makeself/makeself.sh b/makeself/makeself.sh
new file mode 100755
index 00000000..709473aa
--- /dev/null
+++ b/makeself/makeself.sh
@@ -0,0 +1,620 @@
+#!/bin/sh
+#
+# Makeself version 2.3.x
+# by Stephane Peter <megastep@megastep.org>
+#
+# Utility to create self-extracting tar.gz archives.
+# The resulting archive is a file holding the tar.gz archive with
+# a small Shell script stub that uncompresses the archive to a temporary
+# directory and then executes a given script from withing that directory.
+#
+# Makeself home page: http://makeself.io/
+#
+# Version 2.0 is a rewrite of version 1.0 to make the code easier to read and maintain.
+#
+# Version history :
+# - 1.0 : Initial public release
+# - 1.1 : The archive can be passed parameters that will be passed on to
+# the embedded script, thanks to John C. Quillan
+# - 1.2 : Package distribution, bzip2 compression, more command line options,
+# support for non-temporary archives. Ideas thanks to Francois Petitjean
+# - 1.3 : More patches from Bjarni R. Einarsson and Francois Petitjean:
+# Support for no compression (--nocomp), script is no longer mandatory,
+# automatic launch in an xterm, optional verbose output, and -target
+# archive option to indicate where to extract the files.
+# - 1.4 : Improved UNIX compatibility (Francois Petitjean)
+# Automatic integrity checking, support of LSM files (Francois Petitjean)
+# - 1.5 : Many bugfixes. Optionally disable xterm spawning.
+# - 1.5.1 : More bugfixes, added archive options -list and -check.
+# - 1.5.2 : Cosmetic changes to inform the user of what's going on with big
+# archives (Quake III demo)
+# - 1.5.3 : Check for validity of the DISPLAY variable before launching an xterm.
+# More verbosity in xterms and check for embedded command's return value.
+# Bugfix for Debian 2.0 systems that have a different "print" command.
+# - 1.5.4 : Many bugfixes. Print out a message if the extraction failed.
+# - 1.5.5 : More bugfixes. Added support for SETUP_NOCHECK environment variable to
+# bypass checksum verification of archives.
+# - 1.6.0 : Compute MD5 checksums with the md5sum command (patch from Ryan Gordon)
+# - 2.0 : Brand new rewrite, cleaner architecture, separated header and UNIX ports.
+# - 2.0.1 : Added --copy
+# - 2.1.0 : Allow multiple tarballs to be stored in one archive, and incremental updates.
+# Added --nochown for archives
+# Stopped doing redundant checksums when not necesary
+# - 2.1.1 : Work around insane behavior from certain Linux distros with no 'uncompress' command
+# Cleaned up the code to handle error codes from compress. Simplified the extraction code.
+# - 2.1.2 : Some bug fixes. Use head -n to avoid problems.
+# - 2.1.3 : Bug fixes with command line when spawning terminals.
+# Added --tar for archives, allowing to give arbitrary arguments to tar on the contents of the archive.
+# Added --noexec to prevent execution of embedded scripts.
+# Added --nomd5 and --nocrc to avoid creating checksums in archives.
+# Added command used to create the archive in --info output.
+# Run the embedded script through eval.
+# - 2.1.4 : Fixed --info output.
+# Generate random directory name when extracting files to . to avoid problems. (Jason Trent)
+# Better handling of errors with wrong permissions for the directory containing the files. (Jason Trent)
+# Avoid some race conditions (Ludwig Nussel)
+# Unset the $CDPATH variable to avoid problems if it is set. (Debian)
+# Better handling of dot files in the archive directory.
+# - 2.1.5 : Made the md5sum detection consistent with the header code.
+# Check for the presence of the archive directory
+# Added --encrypt for symmetric encryption through gpg (Eric Windisch)
+# Added support for the digest command on Solaris 10 for MD5 checksums
+# Check for available disk space before extracting to the target directory (Andreas Schweitzer)
+# Allow extraction to run asynchronously (patch by Peter Hatch)
+# Use file descriptors internally to avoid error messages (patch by Kay Tiong Khoo)
+# - 2.1.6 : Replaced one dot per file progress with a realtime progress percentage and a spining cursor (Guy Baconniere)
+# Added --noprogress to prevent showing the progress during the decompression (Guy Baconniere)
+# Added --target dir to allow extracting directly to a target directory (Guy Baconniere)
+# - 2.2.0 : Many bugfixes, updates and contributions from users. Check out the project page on Github for the details.
+# - 2.3.0 : Option to specify packaging date to enable byte-for-byte reproducibility. (Marc Pawlowsky)
+#
+# (C) 1998-2017 by Stephane Peter <megastep@megastep.org>
+#
+# This software is released under the terms of the GNU GPL version 2 and above
+# Please read the license at http://www.gnu.org/copyleft/gpl.html
+#
+
+MS_VERSION=2.3.1
+MS_COMMAND="$0"
+unset CDPATH
+
+for f in "${1+"$@"}"; do
+ MS_COMMAND="$MS_COMMAND \\\\
+ \\\"$f\\\""
+done
+
+# For Solaris systems
+if test -d /usr/xpg4/bin; then
+ PATH=/usr/xpg4/bin:$PATH
+ export PATH
+fi
+
+# Procedures
+
+MS_Usage()
+{
+ echo "Usage: $0 [params] archive_dir file_name label startup_script [args]"
+ echo "params can be one or more of the following :"
+ echo " --version | -v : Print out Makeself version number and exit"
+ echo " --help | -h : Print out this help message"
+ echo " --tar-quietly : Suppress verbose output from the tar command"
+ echo " --quiet | -q : Do not print any messages other than errors."
+ echo " --gzip : Compress using gzip (default if detected)"
+ echo " --pigz : Compress with pigz"
+ echo " --bzip2 : Compress using bzip2 instead of gzip"
+ echo " --pbzip2 : Compress using pbzip2 instead of gzip"
+ echo " --xz : Compress using xz instead of gzip"
+ echo " --lzo : Compress using lzop instead of gzip"
+ echo " --lz4 : Compress using lz4 instead of gzip"
+ echo " --compress : Compress using the UNIX 'compress' command"
+ echo " --complevel lvl : Compression level for gzip pigz xz lzo lz4 bzip2 and pbzip2 (default 9)"
+ echo " --base64 : Instead of compressing, encode the data using base64"
+ echo " --gpg-encrypt : Instead of compressing, encrypt the data using GPG"
+ echo " --gpg-asymmetric-encrypt-sign"
+ echo " : Instead of compressing, asymmetrically encrypt and sign the data using GPG"
+ echo " --gpg-extra opt : Append more options to the gpg command line"
+ echo " --ssl-encrypt : Instead of compressing, encrypt the data using OpenSSL"
+ echo " --nocomp : Do not compress the data"
+ echo " --notemp : The archive will create archive_dir in the"
+ echo " current directory and uncompress in ./archive_dir"
+ echo " --needroot : Check that the root user is extracting the archive before proceeding"
+ echo " --copy : Upon extraction, the archive will first copy itself to"
+ echo " a temporary directory"
+ echo " --append : Append more files to an existing Makeself archive"
+ echo " The label and startup scripts will then be ignored"
+ echo " --target dir : Extract directly to a target directory"
+ echo " directory path can be either absolute or relative"
+ echo " --nooverwrite : Do not extract the archive if the specified target directory exists"
+ echo " --current : Files will be extracted to the current directory"
+ echo " Both --current and --target imply --notemp"
+ echo " --tar-extra opt : Append more options to the tar command line"
+ echo " --untar-extra opt : Append more options to the during the extraction of the tar archive"
+ echo " --nomd5 : Don't calculate an MD5 for archive"
+ echo " --nocrc : Don't calculate a CRC for archive"
+ echo " --header file : Specify location of the header script"
+ echo " --follow : Follow the symlinks in the archive"
+ echo " --noprogress : Do not show the progress during the decompression"
+ echo " --nox11 : Disable automatic spawn of a xterm"
+ echo " --nowait : Do not wait for user input after executing embedded"
+ echo " program from an xterm"
+ echo " --lsm file : LSM file describing the package"
+ echo " --license file : Append a license file"
+ echo " --help-header file : Add a header to the archive's --help output"
+ echo " --packaging-date date"
+ echo " : Use provided string as the packaging date"
+ echo " instead of the current date."
+ echo
+ echo " --keep-umask : Keep the umask set to shell default, rather than overriding when executing self-extracting archive."
+ echo " --export-conf : Export configuration variables to startup_script"
+ echo
+ echo "Do not forget to give a fully qualified startup script name"
+ echo "(i.e. with a ./ prefix if inside the archive)."
+ exit 1
+}
+
+# Default settings
+if type gzip 2>&1 > /dev/null; then
+ COMPRESS=gzip
+else
+ COMPRESS=Unix
+fi
+COMPRESS_LEVEL=9
+KEEP=n
+CURRENT=n
+NOX11=n
+NOWAIT=n
+APPEND=n
+TAR_QUIETLY=n
+KEEP_UMASK=n
+QUIET=n
+NOPROGRESS=n
+COPY=none
+NEED_ROOT=n
+TAR_ARGS=cvf
+TAR_EXTRA=""
+GPG_EXTRA=""
+DU_ARGS=-ks
+HEADER=`dirname "$0"`/makeself-header.sh
+TARGETDIR=""
+NOOVERWRITE=n
+DATE=`LC_ALL=C date`
+EXPORT_CONF=n
+
+# LSM file stuff
+LSM_CMD="echo No LSM. >> \"\$archname\""
+
+while true
+do
+ case "$1" in
+ --version | -v)
+ echo Makeself version $MS_VERSION
+ exit 0
+ ;;
+ --pbzip2)
+ COMPRESS=pbzip2
+ shift
+ ;;
+ --bzip2)
+ COMPRESS=bzip2
+ shift
+ ;;
+ --gzip)
+ COMPRESS=gzip
+ shift
+ ;;
+ --pigz)
+ COMPRESS=pigz
+ shift
+ ;;
+ --xz)
+ COMPRESS=xz
+ shift
+ ;;
+ --lzo)
+ COMPRESS=lzo
+ shift
+ ;;
+ --lz4)
+ COMPRESS=lz4
+ shift
+ ;;
+ --compress)
+ COMPRESS=Unix
+ shift
+ ;;
+ --base64)
+ COMPRESS=base64
+ shift
+ ;;
+ --gpg-encrypt)
+ COMPRESS=gpg
+ shift
+ ;;
+ --gpg-asymmetric-encrypt-sign)
+ COMPRESS=gpg-asymmetric
+ shift
+ ;;
+ --gpg-extra)
+ GPG_EXTRA="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --ssl-encrypt)
+ COMPRESS=openssl
+ shift
+ ;;
+ --nocomp)
+ COMPRESS=none
+ shift
+ ;;
+ --complevel)
+ COMPRESS_LEVEL="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --notemp)
+ KEEP=y
+ shift
+ ;;
+ --copy)
+ COPY=copy
+ shift
+ ;;
+ --current)
+ CURRENT=y
+ KEEP=y
+ shift
+ ;;
+ --tar-extra)
+ TAR_EXTRA="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --untar-extra)
+ UNTAR_EXTRA="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --target)
+ TARGETDIR="$2"
+ KEEP=y
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --nooverwrite)
+ NOOVERWRITE=y
+ shift
+ ;;
+ --needroot)
+ NEED_ROOT=y
+ shift
+ ;;
+ --header)
+ HEADER="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --license)
+ LICENSE=`cat $2`
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --follow)
+ TAR_ARGS=cvhf
+ DU_ARGS=-ksL
+ shift
+ ;;
+ --noprogress)
+ NOPROGRESS=y
+ shift
+ ;;
+ --nox11)
+ NOX11=y
+ shift
+ ;;
+ --nowait)
+ NOWAIT=y
+ shift
+ ;;
+ --nomd5)
+ NOMD5=y
+ shift
+ ;;
+ --nocrc)
+ NOCRC=y
+ shift
+ ;;
+ --append)
+ APPEND=y
+ shift
+ ;;
+ --lsm)
+ LSM_CMD="cat \"$2\" >> \"\$archname\""
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --packaging-date)
+ DATE="$2"
+ if ! shift 2; then MS_Help; exit 1; fi
+ ;;
+ --help-header)
+ HELPHEADER=`sed -e "s/'/'\\\\\''/g" $2`
+ if ! shift 2; then MS_Help; exit 1; fi
+ [ -n "$HELPHEADER" ] && HELPHEADER="$HELPHEADER
+"
+ ;;
+ --tar-quietly)
+ TAR_QUIETLY=y
+ shift
+ ;;
+ --keep-umask)
+ KEEP_UMASK=y
+ shift
+ ;;
+ --export-conf)
+ EXPORT_CONF=y
+ shift
+ ;;
+ -q | --quiet)
+ QUIET=y
+ shift
+ ;;
+ -h | --help)
+ MS_Usage
+ ;;
+ -*)
+ echo Unrecognized flag : "$1"
+ MS_Usage
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+if test $# -lt 1; then
+ MS_Usage
+else
+ if test -d "$1"; then
+ archdir="$1"
+ else
+ echo "Directory $1 does not exist." >&2
+ exit 1
+ fi
+fi
+archname="$2"
+
+if test "$QUIET" = "y" || test "$TAR_QUIETLY" = "y"; then
+ if test "$TAR_ARGS" = "cvf"; then
+ TAR_ARGS="cf"
+ elif test "$TAR_ARGS" = "cvhf";then
+ TAR_ARGS="chf"
+ fi
+fi
+
+if test "$APPEND" = y; then
+ if test $# -lt 2; then
+ MS_Usage
+ fi
+
+ # Gather the info from the original archive
+ OLDENV=`sh "$archname" --dumpconf`
+ if test $? -ne 0; then
+ echo "Unable to update archive: $archname" >&2
+ exit 1
+ else
+ eval "$OLDENV"
+ fi
+else
+ if test "$KEEP" = n -a $# = 3; then
+ echo "ERROR: Making a temporary archive with no embedded command does not make sense!" >&2
+ echo >&2
+ MS_Usage
+ fi
+ # We don't want to create an absolute directory unless a target directory is defined
+ if test "$CURRENT" = y; then
+ archdirname="."
+ elif test x$TARGETDIR != x; then
+ archdirname="$TARGETDIR"
+ else
+ archdirname=`basename "$1"`
+ fi
+
+ if test $# -lt 3; then
+ MS_Usage
+ fi
+
+ LABEL="$3"
+ SCRIPT="$4"
+ test "x$SCRIPT" = x || shift 1
+ shift 3
+ SCRIPTARGS="$*"
+fi
+
+if test "$KEEP" = n -a "$CURRENT" = y; then
+ echo "ERROR: It is A VERY DANGEROUS IDEA to try to combine --notemp and --current." >&2
+ exit 1
+fi
+
+case $COMPRESS in
+gzip)
+ GZIP_CMD="gzip -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="gzip -cd"
+ ;;
+pigz)
+ GZIP_CMD="pigz -$COMPRESS_LEVEL"
+ GUNZIP_CMD="gzip -cd"
+ ;;
+pbzip2)
+ GZIP_CMD="pbzip2 -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="bzip2 -d"
+ ;;
+bzip2)
+ GZIP_CMD="bzip2 -$COMPRESS_LEVEL"
+ GUNZIP_CMD="bzip2 -d"
+ ;;
+xz)
+ GZIP_CMD="xz -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="xz -d"
+ ;;
+lzo)
+ GZIP_CMD="lzop -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="lzop -d"
+ ;;
+lz4)
+ GZIP_CMD="lz4 -c$COMPRESS_LEVEL"
+ GUNZIP_CMD="lz4 -d"
+ ;;
+base64)
+ GZIP_CMD="base64"
+ GUNZIP_CMD="base64 -d -i"
+ ;;
+gpg)
+ GZIP_CMD="gpg $GPG_EXTRA -ac -z$COMPRESS_LEVEL"
+ GUNZIP_CMD="gpg -d"
+ ;;
+gpg-asymmetric)
+ GZIP_CMD="gpg $GPG_EXTRA -z$COMPRESS_LEVEL -es"
+ GUNZIP_CMD="gpg --yes -d"
+ ;;
+openssl)
+ GZIP_CMD="openssl aes-256-cbc -a -salt"
+ GUNZIP_CMD="openssl aes-256-cbc -d -a"
+ ;;
+Unix)
+ GZIP_CMD="compress -cf"
+ GUNZIP_CMD="exec 2>&-; uncompress -c || test \\\$? -eq 2 || gzip -cd"
+ ;;
+none)
+ GZIP_CMD="cat"
+ GUNZIP_CMD="cat"
+ ;;
+esac
+
+tmpfile="${TMPDIR:=/tmp}/mkself$$"
+
+if test -f "$HEADER"; then
+ oldarchname="$archname"
+ archname="$tmpfile"
+ # Generate a fake header to count its lines
+ SKIP=0
+ . "$HEADER"
+ SKIP=`cat "$tmpfile" |wc -l`
+ # Get rid of any spaces
+ SKIP=`expr $SKIP`
+ rm -f "$tmpfile"
+ if test "$QUIET" = "n";then
+ echo Header is $SKIP lines long >&2
+ fi
+
+ archname="$oldarchname"
+else
+ echo "Unable to open header file: $HEADER" >&2
+ exit 1
+fi
+
+if test "$QUIET" = "n";then
+ echo
+fi
+
+if test "$APPEND" = n; then
+ if test -f "$archname"; then
+ echo "WARNING: Overwriting existing file: $archname" >&2
+ fi
+fi
+
+USIZE=`du $DU_ARGS "$archdir" | awk '{print $1}'`
+
+if test "." = "$archdirname"; then
+ if test "$KEEP" = n; then
+ archdirname="makeself-$$-`date +%Y%m%d%H%M%S`"
+ fi
+fi
+
+test -d "$archdir" || { echo "Error: $archdir does not exist."; rm -f "$tmpfile"; exit 1; }
+if test "$QUIET" = "n";then
+ echo About to compress $USIZE KB of data...
+ echo Adding files to archive named \"$archname\"...
+fi
+exec 3<> "$tmpfile"
+(cd "$archdir" && ( tar "$TAR_EXTRA" -$TAR_ARGS - . | eval "$GZIP_CMD" >&3 ) ) || { echo Aborting: Archive directory not found or temporary file: "$tmpfile" could not be created.; exec 3>&-; rm -f "$tmpfile"; exit 1; }
+exec 3>&- # try to close the archive
+
+fsize=`cat "$tmpfile" | wc -c | tr -d " "`
+
+# Compute the checksums
+
+md5sum=00000000000000000000000000000000
+crcsum=0000000000
+
+if test "$NOCRC" = y; then
+ if test "$QUIET" = "n";then
+ echo "skipping crc at user request"
+ fi
+else
+ crcsum=`cat "$tmpfile" | CMD_ENV=xpg4 cksum | sed -e 's/ /Z/' -e 's/ /Z/' | cut -dZ -f1`
+ if test "$QUIET" = "n";then
+ echo "CRC: $crcsum"
+ fi
+fi
+
+if test "$NOMD5" = y; then
+ if test "$QUIET" = "n";then
+ echo "skipping md5sum at user request"
+ fi
+else
+ # Try to locate a MD5 binary
+ OLD_PATH=$PATH
+ PATH=${GUESS_MD5_PATH:-"$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"}
+ MD5_ARG=""
+ MD5_PATH=`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum`
+ test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which md5 || command -v md5 || type md5`
+ test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which digest || command -v digest || type digest`
+ PATH=$OLD_PATH
+ if test -x "$MD5_PATH"; then
+ if test `basename ${MD5_PATH}`x = digestx; then
+ MD5_ARG="-a md5"
+ fi
+ md5sum=`cat "$tmpfile" | eval "$MD5_PATH $MD5_ARG" | cut -b-32`;
+ if test "$QUIET" = "n";then
+ echo "MD5: $md5sum"
+ fi
+ else
+ if test "$QUIET" = "n";then
+ echo "MD5: none, MD5 command not found"
+ fi
+ fi
+fi
+
+if test "$APPEND" = y; then
+ mv "$archname" "$archname".bak || exit
+
+ # Prepare entry for new archive
+ filesizes="$filesizes $fsize"
+ CRCsum="$CRCsum $crcsum"
+ MD5sum="$MD5sum $md5sum"
+ USIZE=`expr $USIZE + $OLDUSIZE`
+ # Generate the header
+ . "$HEADER"
+ # Append the original data
+ tail -n +$OLDSKIP "$archname".bak >> "$archname"
+ # Append the new data
+ cat "$tmpfile" >> "$archname"
+
+ chmod +x "$archname"
+ rm -f "$archname".bak
+ if test "$QUIET" = "n";then
+ echo Self-extractable archive \"$archname\" successfully updated.
+ fi
+else
+ filesizes="$fsize"
+ CRCsum="$crcsum"
+ MD5sum="$md5sum"
+
+ # Generate the header
+ . "$HEADER"
+
+ # Append the compressed tar data after the stub
+ if test "$QUIET" = "n";then
+ echo
+ fi
+ cat "$tmpfile" >> "$archname"
+ chmod +x "$archname"
+ if test "$QUIET" = "n";then
+ echo Self-extractable archive \"$archname\" successfully created.
+ fi
+fi
+rm -f "$tmpfile"
+
diff --git a/makeself/post-installer.sh b/makeself/post-installer.sh
new file mode 100755
index 00000000..10f9863b
--- /dev/null
+++ b/makeself/post-installer.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+# This script is started using the shell of the system
+# and executes our 'install-or-update.sh' script
+# using the netdata supplied, statically linked BASH
+#
+# so, at 'install-or-update.sh' we are always sure
+# we run under BASH v4.
+
+./bin/bash system/install-or-update.sh "${@}"
diff --git a/makeself/run-all-jobs.sh b/makeself/run-all-jobs.sh
new file mode 100755
index 00000000..b08fa918
--- /dev/null
+++ b/makeself/run-all-jobs.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+
+LC_ALL=C
+umask 002
+
+# be nice
+renice 19 $$ >/dev/null 2>/dev/null
+
+# -----------------------------------------------------------------------------
+# prepare the environment for the jobs
+
+# installation directory
+export NETDATA_INSTALL_PATH="${1-/opt/netdata}"
+
+# our source directory
+export NETDATA_MAKESELF_PATH="$(dirname "${0}")"
+if [ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ]
+ then
+ export NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}"
+fi
+
+# netdata source directory
+export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/.."
+
+# number of processors this system has
+PROCESSORS=$(cat /proc/cpuinfo 2>/dev/null | grep ^processor | wc -l)
+[ -z "${PROCESSORS}" -o $(( PROCESSORS )) -lt 1 ] && PROCESSORS=1
+export PROCESSORS
+
+# make sure ${NULL} is empty
+export NULL=
+
+# -----------------------------------------------------------------------------
+
+cd "${NETDATA_MAKESELF_PATH}" || exit 1
+
+. ./functions.sh "${@}" || exit 1
+
+for x in jobs/*.install.sh
+do
+ progress "running ${x}"
+ "${x}" "${NETDATA_INSTALL_PATH}"
+done
+
+echo >&2 "All jobs for static packaging done successfully."
+exit 0 \ No newline at end of file
diff --git a/makeself/setup-x86_64-static.sh b/makeself/setup-x86_64-static.sh
new file mode 100755
index 00000000..87cd2966
--- /dev/null
+++ b/makeself/setup-x86_64-static.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env sh
+
+# this script should be running in alpine linux
+# install the required packages
+apk update
+apk add --no-cache \
+ bash \
+ wget \
+ curl \
+ ncurses \
+ git \
+ netcat-openbsd \
+ alpine-sdk \
+ autoconf \
+ automake \
+ gcc \
+ make \
+ libtool \
+ pkgconfig \
+ util-linux-dev \
+ openssl-dev \
+ gnutls-dev \
+ zlib-dev \
+ libmnl-dev \
+ libnetfilter_acct-dev \
+ || exit 1
diff --git a/missing b/missing
index 86a8fc31..db98974f 100755
--- a/missing
+++ b/missing
@@ -1,11 +1,10 @@
#! /bin/sh
-# Common stub for a few missing GNU programs while installing.
+# Common wrapper for a few potentially missing GNU programs.
-scriptversion=2012-01-06.13; # UTC
+scriptversion=2013-10-28.13; # UTC
-# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006,
-# 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
-# Originally by Fran,cois Pinard <pinard@iro.umontreal.ca>, 1996.
+# Copyright (C) 1996-2013 Free Software Foundation, Inc.
+# Originally written by Fran,cois Pinard <pinard@iro.umontreal.ca>, 1996.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -26,68 +25,40 @@ scriptversion=2012-01-06.13; # UTC
# the same distribution terms that you use for the rest of that program.
if test $# -eq 0; then
- echo 1>&2 "Try \`$0 --help' for more information"
+ echo 1>&2 "Try '$0 --help' for more information"
exit 1
fi
-run=:
-sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p'
-sed_minuso='s/.* -o \([^ ]*\).*/\1/p'
-
-# In the cases where this matters, `missing' is being run in the
-# srcdir already.
-if test -f configure.ac; then
- configure_ac=configure.ac
-else
- configure_ac=configure.in
-fi
+case $1 in
-msg="missing on your system"
+ --is-lightweight)
+ # Used by our autoconf macros to check whether the available missing
+ # script is modern enough.
+ exit 0
+ ;;
-case $1 in
---run)
- # Try to run requested program, and just exit if it succeeds.
- run=
- shift
- "$@" && exit 0
- # Exit code 63 means version mismatch. This often happens
- # when the user try to use an ancient version of a tool on
- # a file that requires a minimum version. In this case we
- # we should proceed has if the program had been absent, or
- # if --run hadn't been passed.
- if test $? = 63; then
- run=:
- msg="probably too old"
- fi
- ;;
+ --run)
+ # Back-compat with the calling convention used by older automake.
+ shift
+ ;;
-h|--h|--he|--hel|--help)
echo "\
$0 [OPTION]... PROGRAM [ARGUMENT]...
-Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an
-error status if there is no known handling for PROGRAM.
+Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due
+to PROGRAM being missing or too old.
Options:
-h, --help display this help and exit
-v, --version output version information and exit
- --run try to run the given command, and emulate it if it fails
Supported PROGRAM values:
- aclocal touch file \`aclocal.m4'
- autoconf touch file \`configure'
- autoheader touch file \`config.h.in'
- autom4te touch the output file, or create a stub one
- automake touch all \`Makefile.in' files
- bison create \`y.tab.[ch]', if possible, from existing .[ch]
- flex create \`lex.yy.c', if possible, from existing .c
- help2man touch the output file
- lex create \`lex.yy.c', if possible, from existing .c
- makeinfo touch the output file
- yacc create \`y.tab.[ch]', if possible, from existing .[ch]
+ aclocal autoconf autoheader autom4te automake makeinfo
+ bison yacc flex lex help2man
-Version suffixes to PROGRAM as well as the prefixes \`gnu-', \`gnu', and
-\`g' are ignored when checking the name.
+Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and
+'g' are ignored when checking the name.
Send bug reports to <bug-automake@gnu.org>."
exit $?
@@ -99,228 +70,141 @@ Send bug reports to <bug-automake@gnu.org>."
;;
-*)
- echo 1>&2 "$0: Unknown \`$1' option"
- echo 1>&2 "Try \`$0 --help' for more information"
+ echo 1>&2 "$0: unknown '$1' option"
+ echo 1>&2 "Try '$0 --help' for more information"
exit 1
;;
esac
-# normalize program name to check for.
-program=`echo "$1" | sed '
- s/^gnu-//; t
- s/^gnu//; t
- s/^g//; t'`
-
-# Now exit if we have it, but it failed. Also exit now if we
-# don't have it and --version was passed (most likely to detect
-# the program). This is about non-GNU programs, so use $1 not
-# $program.
-case $1 in
- lex*|yacc*)
- # Not GNU programs, they don't have --version.
- ;;
-
- *)
- if test -z "$run" && ($1 --version) > /dev/null 2>&1; then
- # We have it, but it failed.
- exit 1
- elif test "x$2" = "x--version" || test "x$2" = "x--help"; then
- # Could not run --version or --help. This is probably someone
- # running `$TOOL --version' or `$TOOL --help' to check whether
- # $TOOL exists and not knowing $TOOL uses missing.
- exit 1
- fi
- ;;
-esac
-
-# If it does not exist, or fails to run (possibly an outdated version),
-# try to emulate it.
-case $program in
- aclocal*)
- echo 1>&2 "\
-WARNING: \`$1' is $msg. You should only need it if
- you modified \`acinclude.m4' or \`${configure_ac}'. You might want
- to install the \`Automake' and \`Perl' packages. Grab them from
- any GNU archive site."
- touch aclocal.m4
- ;;
-
- autoconf*)
- echo 1>&2 "\
-WARNING: \`$1' is $msg. You should only need it if
- you modified \`${configure_ac}'. You might want to install the
- \`Autoconf' and \`GNU m4' packages. Grab them from any GNU
- archive site."
- touch configure
- ;;
-
- autoheader*)
- echo 1>&2 "\
-WARNING: \`$1' is $msg. You should only need it if
- you modified \`acconfig.h' or \`${configure_ac}'. You might want
- to install the \`Autoconf' and \`GNU m4' packages. Grab them
- from any GNU archive site."
- files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}`
- test -z "$files" && files="config.h"
- touch_files=
- for f in $files; do
- case $f in
- *:*) touch_files="$touch_files "`echo "$f" |
- sed -e 's/^[^:]*://' -e 's/:.*//'`;;
- *) touch_files="$touch_files $f.in";;
- esac
- done
- touch $touch_files
- ;;
-
- automake*)
- echo 1>&2 "\
-WARNING: \`$1' is $msg. You should only need it if
- you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'.
- You might want to install the \`Automake' and \`Perl' packages.
- Grab them from any GNU archive site."
- find . -type f -name Makefile.am -print |
- sed 's/\.am$/.in/' |
- while read f; do touch "$f"; done
- ;;
-
- autom4te*)
- echo 1>&2 "\
-WARNING: \`$1' is needed, but is $msg.
- You might have modified some files without having the
- proper tools for further handling them.
- You can get \`$1' as part of \`Autoconf' from any GNU
- archive site."
-
- file=`echo "$*" | sed -n "$sed_output"`
- test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
- if test -f "$file"; then
- touch $file
- else
- test -z "$file" || exec >$file
- echo "#! /bin/sh"
- echo "# Created by GNU Automake missing as a replacement of"
- echo "# $ $@"
- echo "exit 0"
- chmod +x $file
- exit 1
- fi
- ;;
-
- bison*|yacc*)
- echo 1>&2 "\
-WARNING: \`$1' $msg. You should only need it if
- you modified a \`.y' file. You may need the \`Bison' package
- in order for those modifications to take effect. You can get
- \`Bison' from any GNU archive site."
- rm -f y.tab.c y.tab.h
- if test $# -ne 1; then
- eval LASTARG=\${$#}
- case $LASTARG in
- *.y)
- SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'`
- if test -f "$SRCFILE"; then
- cp "$SRCFILE" y.tab.c
- fi
- SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'`
- if test -f "$SRCFILE"; then
- cp "$SRCFILE" y.tab.h
- fi
- ;;
- esac
- fi
- if test ! -f y.tab.h; then
- echo >y.tab.h
- fi
- if test ! -f y.tab.c; then
- echo 'main() { return 0; }' >y.tab.c
- fi
- ;;
-
- lex*|flex*)
- echo 1>&2 "\
-WARNING: \`$1' is $msg. You should only need it if
- you modified a \`.l' file. You may need the \`Flex' package
- in order for those modifications to take effect. You can get
- \`Flex' from any GNU archive site."
- rm -f lex.yy.c
- if test $# -ne 1; then
- eval LASTARG=\${$#}
- case $LASTARG in
- *.l)
- SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'`
- if test -f "$SRCFILE"; then
- cp "$SRCFILE" lex.yy.c
- fi
- ;;
- esac
- fi
- if test ! -f lex.yy.c; then
- echo 'main() { return 0; }' >lex.yy.c
- fi
- ;;
-
- help2man*)
- echo 1>&2 "\
-WARNING: \`$1' is $msg. You should only need it if
- you modified a dependency of a manual page. You may need the
- \`Help2man' package in order for those modifications to take
- effect. You can get \`Help2man' from any GNU archive site."
-
- file=`echo "$*" | sed -n "$sed_output"`
- test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
- if test -f "$file"; then
- touch $file
- else
- test -z "$file" || exec >$file
- echo ".ab help2man is required to generate this page"
- exit $?
- fi
- ;;
-
- makeinfo*)
- echo 1>&2 "\
-WARNING: \`$1' is $msg. You should only need it if
- you modified a \`.texi' or \`.texinfo' file, or any other file
- indirectly affecting the aspect of the manual. The spurious
- call might also be the consequence of using a buggy \`make' (AIX,
- DU, IRIX). You might want to install the \`Texinfo' package or
- the \`GNU make' package. Grab either from any GNU archive site."
- # The file to touch is that specified with -o ...
- file=`echo "$*" | sed -n "$sed_output"`
- test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
- if test -z "$file"; then
- # ... or it is the one specified with @setfilename ...
- infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'`
- file=`sed -n '
- /^@setfilename/{
- s/.* \([^ ]*\) *$/\1/
- p
- q
- }' $infile`
- # ... or it is derived from the source name (dir/f.texi becomes f.info)
- test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info
- fi
- # If the file does not exist, the user really needs makeinfo;
- # let's fail without touching anything.
- test -f $file || exit 1
- touch $file
- ;;
+# Run the given program, remember its exit status.
+"$@"; st=$?
+
+# If it succeeded, we are done.
+test $st -eq 0 && exit 0
+
+# Also exit now if we it failed (or wasn't found), and '--version' was
+# passed; such an option is passed most likely to detect whether the
+# program is present and works.
+case $2 in --version|--help) exit $st;; esac
+
+# Exit code 63 means version mismatch. This often happens when the user
+# tries to use an ancient version of a tool on a file that requires a
+# minimum version.
+if test $st -eq 63; then
+ msg="probably too old"
+elif test $st -eq 127; then
+ # Program was missing.
+ msg="missing on your system"
+else
+ # Program was found and executed, but failed. Give up.
+ exit $st
+fi
- *)
- echo 1>&2 "\
-WARNING: \`$1' is needed, and is $msg.
- You might have modified some files without having the
- proper tools for further handling them. Check the \`README' file,
- it often tells you about the needed prerequisites for installing
- this package. You may also peek at any GNU archive site, in case
- some other package would contain this missing \`$1' program."
- exit 1
+perl_URL=http://www.perl.org/
+flex_URL=http://flex.sourceforge.net/
+gnu_software_URL=http://www.gnu.org/software
+
+program_details ()
+{
+ case $1 in
+ aclocal|automake)
+ echo "The '$1' program is part of the GNU Automake package:"
+ echo "<$gnu_software_URL/automake>"
+ echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:"
+ echo "<$gnu_software_URL/autoconf>"
+ echo "<$gnu_software_URL/m4/>"
+ echo "<$perl_URL>"
+ ;;
+ autoconf|autom4te|autoheader)
+ echo "The '$1' program is part of the GNU Autoconf package:"
+ echo "<$gnu_software_URL/autoconf/>"
+ echo "It also requires GNU m4 and Perl in order to run:"
+ echo "<$gnu_software_URL/m4/>"
+ echo "<$perl_URL>"
+ ;;
+ esac
+}
+
+give_advice ()
+{
+ # Normalize program name to check for.
+ normalized_program=`echo "$1" | sed '
+ s/^gnu-//; t
+ s/^gnu//; t
+ s/^g//; t'`
+
+ printf '%s\n' "'$1' is $msg."
+
+ configure_deps="'configure.ac' or m4 files included by 'configure.ac'"
+ case $normalized_program in
+ autoconf*)
+ echo "You should only need it if you modified 'configure.ac',"
+ echo "or m4 files included by it."
+ program_details 'autoconf'
+ ;;
+ autoheader*)
+ echo "You should only need it if you modified 'acconfig.h' or"
+ echo "$configure_deps."
+ program_details 'autoheader'
+ ;;
+ automake*)
+ echo "You should only need it if you modified 'Makefile.am' or"
+ echo "$configure_deps."
+ program_details 'automake'
+ ;;
+ aclocal*)
+ echo "You should only need it if you modified 'acinclude.m4' or"
+ echo "$configure_deps."
+ program_details 'aclocal'
+ ;;
+ autom4te*)
+ echo "You might have modified some maintainer files that require"
+ echo "the 'autom4te' program to be rebuilt."
+ program_details 'autom4te'
+ ;;
+ bison*|yacc*)
+ echo "You should only need it if you modified a '.y' file."
+ echo "You may want to install the GNU Bison package:"
+ echo "<$gnu_software_URL/bison/>"
+ ;;
+ lex*|flex*)
+ echo "You should only need it if you modified a '.l' file."
+ echo "You may want to install the Fast Lexical Analyzer package:"
+ echo "<$flex_URL>"
+ ;;
+ help2man*)
+ echo "You should only need it if you modified a dependency" \
+ "of a man page."
+ echo "You may want to install the GNU Help2man package:"
+ echo "<$gnu_software_URL/help2man/>"
;;
-esac
-
-exit 0
+ makeinfo*)
+ echo "You should only need it if you modified a '.texi' file, or"
+ echo "any other file indirectly affecting the aspect of the manual."
+ echo "You might want to install the Texinfo package:"
+ echo "<$gnu_software_URL/texinfo/>"
+ echo "The spurious makeinfo call might also be the consequence of"
+ echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might"
+ echo "want to install GNU make:"
+ echo "<$gnu_software_URL/make/>"
+ ;;
+ *)
+ echo "You might have modified some files without having the proper"
+ echo "tools for further handling them. Check the 'README' file, it"
+ echo "often tells you about the needed prerequisites for installing"
+ echo "this package. You may also peek at any GNU archive site, in"
+ echo "case some other package contains this missing '$1' program."
+ ;;
+ esac
+}
+
+give_advice "$1" | sed -e '1s/^/WARNING: /' \
+ -e '2,$s/^/ /' >&2
+
+# Propagate the correct exit status (expected to be 127 for a program
+# not found, 63 for a program that failed due to version mismatch).
+exit $st
# Local variables:
# eval: (add-hook 'write-file-hooks 'time-stamp)
diff --git a/netdata-installer.sh b/netdata-installer.sh
index 6b672a24..35cb850f 100755
--- a/netdata-installer.sh
+++ b/netdata-installer.sh
@@ -7,7 +7,7 @@ installer_dir="$(dirname "${0}")"
if [ "${netdata_source_dir}" != "${installer_dir}" -a "${installer_dir}" != "." ]
then
- echo >&2 "Warninng: you are currently in '${netdata_source_dir}' but the installer is in '${installer_dir}'."
+ echo >&2 "Warning: you are currently in '${netdata_source_dir}' but the installer is in '${installer_dir}'."
fi
@@ -43,7 +43,7 @@ then
export ACLOCAL_PATH
fi
-LC_ALL=C
+export LC_ALL=C
umask 002
# Be nice on production environments
@@ -65,12 +65,16 @@ printf "\n" >>netdata-installer.log
REINSTALL_PWD="${PWD}"
REINSTALL_COMMAND="$(printf "%q " "$0" "${@}"; printf "\n")"
+# remove options that shown not be inherited by netdata-updater.sh
+REINSTALL_COMMAND="${REINSTALL_COMMAND// --dont-wait/}"
+REINSTALL_COMMAND="${REINSTALL_COMMAND// --dont-start-it/}"
setcap="$(which setcap 2>/dev/null || command -v setcap 2>/dev/null)"
ME="$0"
DONOTSTART=0
DONOTWAIT=0
+AUTOUPDATE=0
NETDATA_PREFIX=
LIBS_ARE_HERE=0
NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS-}"
@@ -98,6 +102,12 @@ Valid <installer options> are:
Do not wait for the user to press ENTER.
Start immediately building it.
+ --auto-update | -u
+
+ Install netdata-updater to cron,
+ to update netdata automatically once per day
+ (can only be done for installations from git)
+
--enable-plugin-freeipmi
--disable-plugin-freeipmi
@@ -138,7 +148,7 @@ these packages installed:
For the plugins, you will at least need:
- curl nodejs
+ curl, bash v4+, python v2 or v3, node.js
USAGE
}
@@ -203,6 +213,10 @@ do
then
DONOTWAIT=1
shift 1
+ elif [ "$1" = "--auto-update" -o "$1" = "-u" ]
+ then
+ AUTOUPDATE=1
+ shift 1
elif [ "$1" = "--enable-plugin-freeipmi" ]
then
NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS} --enable-plugin-freeipmi"
@@ -541,7 +555,7 @@ config_signature_matches() {
# backup user configurations
installer_backup_suffix="${PID}.${RANDOM}"
-for x in $(find "${NETDATA_PREFIX}/etc/netdata/" -name '*.conf' -type f)
+for x in $(find -L "${NETDATA_PREFIX}/etc/netdata/" -name '*.conf' -type f)
do
if [ -f "${x}" ]
then
@@ -552,7 +566,7 @@ do
then
# we don't have md5sum - keep it
echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RET}is not known to distribution${TPUT_RESET}. Keeping it."
- run cp -p "${x}" "${x}.installer_backup.${installer_backup_suffix}"
+ run cp -a "${x}" "${x}.installer_backup.${installer_backup_suffix}"
else
# find it relative filename
f="${x/*\/etc\/netdata\//}"
@@ -573,7 +587,7 @@ do
else
# edited by user - keep it
echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RED} has been edited by user${TPUT_RESET}. Keeping it."
- run cp -p "${x}" "${x}.installer_backup.${installer_backup_suffix}"
+ run cp -a "${x}" "${x}.installer_backup.${installer_backup_suffix}"
fi
fi
@@ -593,11 +607,11 @@ run make install || exit 1
# -----------------------------------------------------------------------------
progress "Restore user edited netdata configuration files"
-for x in $(find "${NETDATA_PREFIX}/etc/netdata/" -name '*.conf' -type f)
+for x in $(find -L "${NETDATA_PREFIX}/etc/netdata/" -name '*.conf' -type f)
do
if [ -f "${x}.installer_backup.${installer_backup_suffix}" ]
then
- run cp -p "${x}.installer_backup.${installer_backup_suffix}" "${x}" && \
+ run cp -a "${x}.installer_backup.${installer_backup_suffix}" "${x}" && \
run rm -f "${x}.installer_backup.${installer_backup_suffix}"
fi
done
@@ -612,42 +626,13 @@ run find ./system/ -type f -a \! -name \*.in -a \! -name Makefile\* -a \! -name
# -----------------------------------------------------------------------------
progress "Add user netdata to required user groups"
-NETDATA_ADDED_TO_DOCKER=0
-NETDATA_ADDED_TO_NGINX=0
-NETDATA_ADDED_TO_VARNISH=0
-NETDATA_ADDED_TO_HAPROXY=0
-NETDATA_ADDED_TO_ADM=0
-NETDATA_ADDED_TO_NSD=0
-if [ ${UID} -eq 0 ]
- then
- portable_add_group netdata
- portable_add_user netdata
- portable_add_user_to_group docker netdata && NETDATA_ADDED_TO_DOCKER=1
- portable_add_user_to_group nginx netdata && NETDATA_ADDED_TO_NGINX=1
- portable_add_user_to_group varnish netdata && NETDATA_ADDED_TO_VARNISH=1
- portable_add_user_to_group haproxy netdata && NETDATA_ADDED_TO_HAPROXY=1
- portable_add_user_to_group adm netdata && NETDATA_ADDED_TO_ADM=1
- portable_add_user_to_group nsd netdata && NETDATA_ADDED_TO_NSD=1
- run_ok
-else
- run_failed "The installer does not run as root."
-fi
+add_netdata_user_and_group || run_failed "The installer does not run as root."
+
# -----------------------------------------------------------------------------
progress "Install logrotate configuration for netdata"
-if [ ${UID} -eq 0 ]
- then
- if [ -d /etc/logrotate.d -a ! -f /etc/logrotate.d/netdata ]
- then
- run cp system/netdata.logrotate /etc/logrotate.d/netdata
- fi
-
- if [ -f /etc/logrotate.d/netdata ]
- then
- run chmod 644 /etc/logrotate.d/netdata
- fi
-fi
+install_netdata_logrotate
# -----------------------------------------------------------------------------
@@ -659,51 +644,41 @@ progress "Read installation options from netdata.conf"
# function to extract values from the config file
config_option() {
- local key="${1}" value="${2}" line=
+ local section="${1}" key="${2}" value="${3}"
if [ -s "${NETDATA_PREFIX}/etc/netdata/netdata.conf" ]
then
- line="$( grep "^[[:space:]]*${key}[[:space:]]*=[[:space:]]*" "${NETDATA_PREFIX}/etc/netdata/netdata.conf" | head -n 1 )"
- [ ! -z "${line}" ] && value="$( echo "${line}" | cut -d '=' -f 2 | sed -e "s/^[[:space:]]\+//g" -e "s/[[:space:]]\+$//g" )"
+ "${NETDATA_PREFIX}/usr/sbin/netdata" \
+ -c "${NETDATA_PREFIX}/etc/netdata/netdata.conf" \
+ -W get "${section}" "${key}" "${value}" || \
+ echo "${value}"
+ else
+ echo "${value}"
fi
-
- echo "${value}"
}
# the user netdata will run as
if [ "${UID}" = "0" ]
then
- NETDATA_USER="$( config_option "run as user" "netdata" )"
+ NETDATA_USER="$( config_option "global" "run as user" "netdata" )"
else
NETDATA_USER="${USER}"
fi
# the owners of the web files
-NETDATA_WEB_USER="$( config_option "web files owner" "${NETDATA_USER}" )"
-NETDATA_WEB_GROUP="$( config_option "web files group" "${NETDATA_WEB_USER}" )"
-
-# debug flags
-NETDATA_DEBUG="$( config_option "debug flags" 0 )"
+NETDATA_WEB_USER="$( config_option "web" "web files owner" "${NETDATA_USER}" )"
+NETDATA_WEB_GROUP="$( config_option "web" "web files group" "${NETDATA_WEB_USER}" )"
# port
defport=19999
-NETDATA_PORT="$( config_option "default port" ${defport} )"
-NETDATA_PORT2="$( config_option "port" ${defport} )"
-
-if [ "${NETDATA_PORT}" != "${NETDATA_PORT2}" ]
-then
- if [ "${NETDATA_PORT2}" != "${defport}" ]
- then
- NETDATA_PORT="${NETDATA_PORT2}"
- fi
-fi
+NETDATA_PORT="$( config_option "web" "default port" ${defport} )"
# directories
-NETDATA_LIB_DIR="$( config_option "lib directory" "${NETDATA_PREFIX}/var/lib/netdata" )"
-NETDATA_CACHE_DIR="$( config_option "cache directory" "${NETDATA_PREFIX}/var/cache/netdata" )"
-NETDATA_WEB_DIR="$( config_option "web files directory" "${NETDATA_PREFIX}/usr/share/netdata/web" )"
-NETDATA_LOG_DIR="$( config_option "log directory" "${NETDATA_PREFIX}/var/log/netdata" )"
-NETDATA_CONF_DIR="$( config_option "config directory" "${NETDATA_PREFIX}/etc/netdata" )"
+NETDATA_LIB_DIR="$( config_option "global" "lib directory" "${NETDATA_PREFIX}/var/lib/netdata" )"
+NETDATA_CACHE_DIR="$( config_option "global" "cache directory" "${NETDATA_PREFIX}/var/cache/netdata" )"
+NETDATA_WEB_DIR="$( config_option "global" "web files directory" "${NETDATA_PREFIX}/usr/share/netdata/web" )"
+NETDATA_LOG_DIR="$( config_option "global" "log directory" "${NETDATA_PREFIX}/var/log/netdata" )"
+NETDATA_CONF_DIR="$( config_option "global" "config directory" "${NETDATA_PREFIX}/etc/netdata" )"
NETDATA_RUN_DIR="${NETDATA_PREFIX}/var/run"
@@ -824,139 +799,14 @@ fi
# -----------------------------------------------------------------------------
progress "Install netdata at system init"
-installed_init_d=0
-install_non_systemd_init() {
- [ "${UID}" != 0 ] && return 1
-
- local key="unknown"
- if [ -f /etc/os-release ]
- then
- source /etc/os-release || return 1
- key="${ID}-${VERSION_ID}"
-
- elif [ -f /etc/centos-release ]
- then
- key=$(</etc/centos-release)
- fi
-
- if [ -d /etc/init.d -a ! -f /etc/init.d/netdata ]
- then
- if [ "${key}" = "gentoo" ]
- then
- run cp system/netdata-openrc /etc/init.d/netdata && \
- run chmod 755 /etc/init.d/netdata && \
- run rc-update add netdata default && \
- installed_init_d=1
-
- elif [ "${key}" = "ubuntu-12.04" -o "${key}" = "ubuntu-14.04" -o "${key}" = "debian-7" ]
- then
- run cp system/netdata-lsb /etc/init.d/netdata && \
- run chmod 755 /etc/init.d/netdata && \
- run update-rc.d netdata defaults && \
- run update-rc.d netdata enable && \
- installed_init_d=1
-
- elif [ "${key}" = "CentOS release 6.8 (Final)" -o "${key}" = "amzn-2016.09" ]
- then
- run cp system/netdata-init-d /etc/init.d/netdata && \
- run chmod 755 /etc/init.d/netdata && \
- run chkconfig netdata on && \
- installed_init_d=1
- fi
- fi
-
- return 0
-}
-
-if [ "${UID}" -eq 0 ]
- then
-
- if issystemd
- then
- # systemd is running on this system
-
- if [ ! -f /etc/systemd/system/netdata.service ]
- then
- echo >&2 "Installing systemd service..."
- run cp system/netdata.service /etc/systemd/system/netdata.service && \
- run systemctl daemon-reload && \
- run systemctl enable netdata
- fi
- else
- install_non_systemd_init
- fi
-fi
+NETDATA_START_CMD="${NETDATA_PREFIX}/usr/sbin/netdata"
+install_netdata_service || run_failed "Cannot install netdata init service."
# -----------------------------------------------------------------------------
# check if we can re-start netdata
started=0
-
-isnetdata() {
- if [ -d /proc/self ]
- then
- [ -z "$1" -o ! -f "/proc/$1/stat" ] && return 1
- [ "$(cat "/proc/$1/stat" | cut -d '(' -f 2 | cut -d ')' -f 1)" = "netdata" ] && return 0
- return 1
- fi
- return 0
-}
-
-stop_netdata_on_pid() {
- local pid="${1}" ret=0 count=0
-
- isnetdata ${pid} || return 0
-
- printf >&2 "Stopping netdata on pid ${pid} ..."
- while [ ! -z "$pid" -a ${ret} -eq 0 ]
- do
- if [ ${count} -gt 45 ]
- then
- echo >&2 "Cannot stop the running netdata on pid ${pid}."
- return 1
- fi
-
- count=$(( count + 1 ))
-
- run kill ${pid} 2>/dev/null
- ret=$?
-
- test ${ret} -eq 0 && printf >&2 "." && sleep 2
- done
-
- echo >&2
- if [ ${ret} -eq 0 ]
- then
- echo >&2 "SORRY! CANNOT STOP netdata ON PID ${pid} !"
- return 1
- fi
-
- echo >&2 "netdata on pid ${pid} stopped."
- return 0
-}
-
-stop_all_netdata() {
- local p myns ns
-
- myns="$(readlink /proc/self/ns/pid 2>/dev/null)"
-
- # echo >&2 "Stopping a (possibly) running netdata (namespace '${myns}')..."
-
- for p in $(cat "${NETDATA_RUN_DIR}/netdata.pid" 2>/dev/null) \
- $(cat /var/run/netdata.pid 2>/dev/null) \
- $(cat /var/run/netdata/netdata.pid 2>/dev/null) \
- $(pidof netdata 2>/dev/null)
- do
- ns="$(readlink /proc/${p}/ns/pid 2>/dev/null)"
-
- if [ -z "${myns}" -o -z "${ns}" -o "${myns}" = "${ns}" ]
- then
- stop_netdata_on_pid ${p}
- fi
- done
-}
-
if [ ${DONOTSTART} -eq 1 ]
then
if [ ! -s "${NETDATA_PREFIX}/etc/netdata/netdata.conf" ]
@@ -971,40 +821,19 @@ if [ ${DONOTSTART} -eq 1 ]
fi
else
-
- progress "Start netdata"
-
- if [ "${UID}" -eq 0 ]
+ restart_netdata ${NETDATA_PREFIX}/usr/sbin/netdata "${@}"
+ if [ $? -ne 0 ]
then
- service netdata stop
- stop_all_netdata
- service netdata restart && started=1
- if [ ${started} -eq 0 ]
- then
- service netdata start && started=1
- fi
- fi
-
- if [ ${started} -eq 0 ]
- then
- # still not started...
-
- stop_all_netdata
-
- echo >&2 "Starting netdata..."
- run ${NETDATA_PREFIX}/usr/sbin/netdata -P ${NETDATA_RUN_DIR}/netdata.pid "${@}"
- if [ $? -ne 0 ]
- then
- echo >&2
- echo >&2 "SORRY! FAILED TO START NETDATA!"
- exit 1
- else
- echo >&2 "OK. NetData Started!"
- fi
-
echo >&2
+ echo >&2 "SORRY! FAILED TO START NETDATA!"
+ echo >&2
+ exit 1
fi
+ started=1
+ echo >&2 "OK. NetData Started!"
+ echo >&2
+
# -----------------------------------------------------------------------------
# save a config file, if it is not already there
@@ -1289,6 +1118,23 @@ if [ $? -eq 0 -a "${NETDATA_ADDED_TO_NSD}" = "1" ]
echo " gpasswd -d netdata nsd"
fi
+getent group proxy > /dev/null
+if [ $? -eq 0 -a "${NETDATA_ADDED_TO_PROXY}" = "1" ]
+ then
+ echo
+ echo "You may also want to remove the netdata user from the proxy group"
+ echo "by running:"
+ echo " gpasswd -d netdata proxy"
+fi
+
+getent group squid > /dev/null
+if [ $? -eq 0 -a "${NETDATA_ADDED_TO_SQUID}" = "1" ]
+ then
+ echo
+ echo "You may also want to remove the netdata user from the squid group"
+ echo "by running:"
+ echo " gpasswd -d netdata squid"
+fi
UNINSTALL
chmod 750 netdata-uninstaller.sh
@@ -1303,13 +1149,13 @@ so you can access it with:
${TPUT_CYAN}${TPUT_BOLD}http://this.machine.ip:${NETDATA_PORT}/${TPUT_RESET}
-To stop netdata, just kill it, with:
+To stop netdata run:
- ${TPUT_YELLOW}${TPUT_BOLD}killall netdata${TPUT_RESET}
+ ${TPUT_YELLOW}${TPUT_BOLD}${NETDATA_STOP_CMD}${TPUT_RESET}
-To start it, just run it:
+To start netdata run:
- ${TPUT_YELLOW}${TPUT_BOLD}${NETDATA_PREFIX}/usr/sbin/netdata${TPUT_RESET}
+ ${TPUT_YELLOW}${TPUT_BOLD}${NETDATA_START_CMD}${TPUT_RESET}
END
@@ -1327,6 +1173,8 @@ export PATH="\${PATH}:${PATH}"
export CFLAGS="${CFLAGS}"
export NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS}"
+# make sure we have a UID
+[ -z "\${UID}" ] && UID="\$(id -u)"
INSTALL_UID="${UID}"
if [ "\${INSTALL_UID}" != "\${UID}" ]
then
@@ -1384,10 +1232,7 @@ failed() {
}
get_latest_commit_id() {
- git log -1 2>&3 |\\
- grep ^commit 2>&3 |\\
- head -n 1 2>&3 |\\
- cut -d ' ' -f 2 2>&3
+ git rev-parse HEAD 2>&3
}
update() {
@@ -1417,7 +1262,7 @@ update() {
emptyline
info "Re-installing netdata..."
- ${REINSTALL_COMMAND// --dont-wait/} --dont-wait >&3 2>&3 || failed "FAILED TO COMPILE/INSTALL NETDATA"
+ ${REINSTALL_COMMAND} --dont-wait >&3 2>&3 || failed "FAILED TO COMPILE/INSTALL NETDATA"
[ ! -z "\${tmp}" ] && rm "\${tmp}" && tmp=
return 0
@@ -1433,24 +1278,47 @@ REINSTALL
echo >&2 "Update script generated : ${TPUT_GREEN}${TPUT_BOLD}./netdata-updater.sh${TPUT_RESET}"
echo >&2
echo >&2 "${TPUT_DIM}${TPUT_BOLD}netdata-updater.sh${TPUT_RESET}${TPUT_DIM} can work from cron. It will trigger an email from cron"
- echo >&2 "only if it fails (it does not print anything if it can update netdata).${TPUT_RESET}"
+ echo >&2 "only if it fails (it does not print anything when it can update netdata).${TPUT_RESET}"
if [ "${UID}" -eq "0" ]
then
- if [ -d "/etc/cron.daily" -a ! -f "/etc/cron.daily/netdata-updater.sh" ]
+ crondir=
+ [ -d "/etc/periodic/daily" ] && crondir="/etc/periodic/daily"
+ [ -d "/etc/cron.daily" ] && crondir="/etc/cron.daily"
+
+ if [ ! -z "${crondir}" ]
+ then
+ if [ -f "${crondir}/netdata-updater.sh" -a ! -f "${crondir}/netdata-updater" ]
then
- echo >&2 "${TPUT_DIM}Run this to automatically check and install netdata updates once per day:${TPUT_RESET}"
- echo >&2
- echo >&2 "${TPUT_YELLOW}${TPUT_BOLD}ln -s ${PWD}/netdata-updater.sh /etc/cron.daily/netdata-updater.sh${TPUT_RESET}"
- elif [ -d "/etc/periodic/daily" -a ! -f "/etc/periodic/daily/netdata-updater" ]
+ # remove .sh from the filename under cron
+ progress "Fixing netdata-updater filename at cron"
+ mv -f "${crondir}/netdata-updater.sh" "${crondir}/netdata-updater"
+ fi
+
+ if [ ! -f "${crondir}/netdata-updater" ]
then
- echo >&2 "${TPUT_DIM}Run this to automatically check and install netdata updates once per day:${TPUT_RESET}"
- echo >&2
- echo >&2 "${TPUT_YELLOW}${TPUT_BOLD}ln -s ${PWD}/netdata-updater.sh /etc/periodic/daily/netdata-updater${TPUT_RESET}"
+ if [ "${AUTOUPDATE}" = "1" ]
+ then
+ progress "Installing netdata-updater at cron"
+ run ln -s "${PWD}/netdata-updater.sh" "${crondir}/netdata-updater"
+ else
+ echo >&2 "${TPUT_DIM}Run this to automatically check and install netdata updates once per day:${TPUT_RESET}"
+ echo >&2
+ echo >&2 "${TPUT_YELLOW}${TPUT_BOLD}sudo ln -s ${PWD}/netdata-updater.sh ${crondir}/netdata-updater${TPUT_RESET}"
+ fi
+ else
+ progress "Refreshing netdata-updater at cron"
+ run rm "${crondir}/netdata-updater"
+ run ln -s "${PWD}/netdata-updater.sh" "${crondir}/netdata-updater"
+ fi
+ else
+ [ "${AUTOUPDATE}" = "1" ] && echo >&2 "Cannot figure out the cron directory to install netdata-updater."
fi
+ else
+ [ "${AUTOUPDATE}" = "1" ] && echo >&2 "You need to run the installer as root for auto-updating via cron."
fi
-elif [ -f "netdata-updater.sh" ]
- then
- rm "netdata-updater.sh"
+else
+ [ -f "netdata-updater.sh" ] && rm "netdata-updater.sh"
+ [ "${AUTOUPDATE}" = "1" ] && echo >&2 "Your installation method does not support daily auto-updating via cron."
fi
# -----------------------------------------------------------------------------
diff --git a/netdata.spec b/netdata.spec
index 6a3d63f8..3753e27a 100644
--- a/netdata.spec
+++ b/netdata.spec
@@ -27,7 +27,8 @@ BuildRequires: systemd-rpm-macros \
%global netdata_init_preun %service_del_preun netdata.service
%global netdata_init_postun %service_del_postun netdata.service
%else
-%global netdata_initd_buildrequires %{nil}
+%global netdata_initd_buildrequires \
+BuildRequires: systemd
%global netdata_initd_requires \
Requires(preun): systemd-units \
Requires(postun): systemd-units \
@@ -76,11 +77,11 @@ Recommends: python2-psycopg2 \
Summary: Real-time performance monitoring, done right
Name: netdata
-Version: 1.6.0
+Version: 1.7.0
Release: 1%{?dist}
License: GPLv3+
Group: Applications/System
-Source0: https://firehol.org/download/netdata/releases/v1.6.0/%{name}-1.6.0.tar.xz
+Source0: https://github.com/firehol/%{name}/releases/download/v1.7.0/%{name}-1.7.0.tar.xz
URL: http://my-netdata.io
BuildRequires: pkgconfig
BuildRequires: xz
@@ -116,7 +117,7 @@ so that you can get insights of what is happening now and what just
happened, on your systems and applications.
%prep
-%setup -q -n netdata-1.6.0
+%setup -q -n netdata-1.7.0
%build
%configure \
@@ -176,6 +177,7 @@ rm -rf "${RPM_BUILD_ROOT}"
%config(noreplace) %{_sysconfdir}/%{name}/health.d/*.conf
#%%config(noreplace) %{_sysconfdir}/%{name}/node.d/*.conf
%config(noreplace) %{_sysconfdir}/%{name}/python.d/*.conf
+%config(noreplace) %{_sysconfdir}/%{name}/statsd.d/*.conf
%config(noreplace) %{_sysconfdir}/logrotate.d/%{name}
# To be eventually moved to %%_defaultdocdir
@@ -205,6 +207,21 @@ rm -rf "${RPM_BUILD_ROOT}"
%{_datadir}/%{name}/web
%changelog
+* Mon Jul 16 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.7.0-1
+- netdata is now a fully featured statsd server
+- new installation options
+- metrics streaming and replication improvements
+- backends improvements - prometheus support rewritten
+- netdata now monitors ZFS (on Linux and FreeBSD)
+- netdata now monitors ElasticSearch
+- netdata now monitors RabbitMQ
+- netdata now monitors Go applications (via expvar)
+- netdata now monitors ipfw (on FreeBSD 11)
+- netdata now monitors samba
+- netdata now monitors squid logs
+- netdata dashboard loading times have been improved significantly
+- netdata alarms now support custom hooks
+- dozens more improvements and bug fixes
* Mon Mar 20 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.6.0-1
- central netdata
- monitoring ephemeral nodes
diff --git a/netdata.spec.in b/netdata.spec.in
index 685d6e0e..6ba79123 100644
--- a/netdata.spec.in
+++ b/netdata.spec.in
@@ -27,7 +27,8 @@ BuildRequires: systemd-rpm-macros \
%global netdata_init_preun %service_del_preun netdata.service
%global netdata_init_postun %service_del_postun netdata.service
%else
-%global netdata_initd_buildrequires %{nil}
+%global netdata_initd_buildrequires \
+BuildRequires: systemd
%global netdata_initd_requires \
Requires(preun): systemd-units \
Requires(postun): systemd-units \
@@ -80,7 +81,7 @@ Version: @PACKAGE_RPM_VERSION@
Release: 1%{?dist}
License: GPLv3+
Group: Applications/System
-Source0: https://firehol.org/download/netdata/releases/v@PACKAGE_VERSION@/%{name}-@PACKAGE_VERSION@.tar.xz
+Source0: https://github.com/firehol/%{name}/releases/download/v@PACKAGE_VERSION@/%{name}-@PACKAGE_VERSION@.tar.xz
URL: http://my-netdata.io
BuildRequires: pkgconfig
BuildRequires: xz
@@ -176,6 +177,7 @@ rm -rf "${RPM_BUILD_ROOT}"
%config(noreplace) %{_sysconfdir}/%{name}/health.d/*.conf
#%%config(noreplace) %{_sysconfdir}/%{name}/node.d/*.conf
%config(noreplace) %{_sysconfdir}/%{name}/python.d/*.conf
+%config(noreplace) %{_sysconfdir}/%{name}/statsd.d/*.conf
%config(noreplace) %{_sysconfdir}/logrotate.d/%{name}
# To be eventually moved to %%_defaultdocdir
@@ -205,6 +207,21 @@ rm -rf "${RPM_BUILD_ROOT}"
%{_datadir}/%{name}/web
%changelog
+* Mon Jul 16 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.7.0-1
+- netdata is now a fully featured statsd server
+- new installation options
+- metrics streaming and replication improvements
+- backends improvements - prometheus support rewritten
+- netdata now monitors ZFS (on Linux and FreeBSD)
+- netdata now monitors ElasticSearch
+- netdata now monitors RabbitMQ
+- netdata now monitors Go applications (via expvar)
+- netdata now monitors ipfw (on FreeBSD 11)
+- netdata now monitors samba
+- netdata now monitors squid logs
+- netdata dashboard loading times have been improved significantly
+- netdata alarms now support custom hooks
+- dozens more improvements and bug fixes
* Mon Mar 20 2017 Costa Tsaousis <costa@tsaousis.gr> - 1.6.0-1
- central netdata
- monitoring ephemeral nodes
diff --git a/node.d/Makefile.am b/node.d/Makefile.am
index c1caa4f0..28008aeb 100644
--- a/node.d/Makefile.am
+++ b/node.d/Makefile.am
@@ -3,6 +3,7 @@ MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
dist_node_DATA = \
README.md \
named.node.js \
+ fronius.node.js \
sma_webbox.node.js \
snmp.node.js \
$(NULL)
diff --git a/node.d/Makefile.in b/node.d/Makefile.in
index 2af9a4a6..35024cb1 100644
--- a/node.d/Makefile.in
+++ b/node.d/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11.3 from Makefile.am.
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
-# Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -16,6 +15,51 @@
@SET_MAKE@
VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -35,9 +79,9 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
subdir = node.d
-DIST_COMMON = $(dist_node_DATA) $(dist_nodemodules_DATA) \
- $(dist_nodemodulesber_DATA) $(srcdir)/Makefile.am \
- $(srcdir)/Makefile.in
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_node_DATA) $(dist_nodemodules_DATA) \
+ $(dist_nodemodulesber_DATA)
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
$(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
@@ -53,8 +97,25 @@ mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
SOURCES =
DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
@@ -86,9 +147,11 @@ am__installdirs = "$(DESTDIR)$(nodedir)" "$(DESTDIR)$(nodemodulesdir)" \
"$(DESTDIR)$(nodemodulesberdir)"
DATA = $(dist_node_DATA) $(dist_nodemodules_DATA) \
$(dist_nodemodulesber_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
@@ -234,6 +297,7 @@ MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
dist_node_DATA = \
README.md \
named.node.js \
+ fronius.node.js \
sma_webbox.node.js \
snmp.node.js \
$(NULL)
@@ -291,8 +355,11 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
$(am__aclocal_m4_deps):
install-dist_nodeDATA: $(dist_node_DATA)
@$(NORMAL_INSTALL)
- test -z "$(nodedir)" || $(MKDIR_P) "$(DESTDIR)$(nodedir)"
@list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodedir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodedir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -309,8 +376,11 @@ uninstall-dist_nodeDATA:
dir='$(DESTDIR)$(nodedir)'; $(am__uninstall_files_from_dir)
install-dist_nodemodulesDATA: $(dist_nodemodules_DATA)
@$(NORMAL_INSTALL)
- test -z "$(nodemodulesdir)" || $(MKDIR_P) "$(DESTDIR)$(nodemodulesdir)"
@list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodemodulesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodemodulesdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -327,8 +397,11 @@ uninstall-dist_nodemodulesDATA:
dir='$(DESTDIR)$(nodemodulesdir)'; $(am__uninstall_files_from_dir)
install-dist_nodemodulesberDATA: $(dist_nodemodulesber_DATA)
@$(NORMAL_INSTALL)
- test -z "$(nodemodulesberdir)" || $(MKDIR_P) "$(DESTDIR)$(nodemodulesberdir)"
@list='$(dist_nodemodulesber_DATA)'; test -n "$(nodemodulesberdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodemodulesberdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodemodulesberdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -343,11 +416,11 @@ uninstall-dist_nodemodulesberDATA:
@list='$(dist_nodemodulesber_DATA)'; test -n "$(nodemodulesberdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(nodemodulesberdir)'; $(am__uninstall_files_from_dir)
-tags: TAGS
-TAGS:
+tags TAGS:
+
+ctags CTAGS:
-ctags: CTAGS
-CTAGS:
+cscope cscopelist:
distdir: $(DISTFILES)
@@ -488,17 +561,18 @@ uninstall-am: uninstall-dist_nodeDATA uninstall-dist_nodemodulesDATA \
.MAKE: install-am install-strip
-.PHONY: all all-am check check-am clean clean-generic distclean \
- distclean-generic distdir dvi dvi-am html html-am info info-am \
- install install-am install-data install-data-am \
- install-dist_nodeDATA install-dist_nodemodulesDATA \
- install-dist_nodemodulesberDATA install-dvi install-dvi-am \
- install-exec install-exec-am install-html install-html-am \
- install-info install-info-am install-man install-pdf \
- install-pdf-am install-ps install-ps-am install-strip \
- installcheck installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am uninstall uninstall-am uninstall-dist_nodeDATA \
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_nodeDATA \
+ install-dist_nodemodulesDATA install-dist_nodemodulesberDATA \
+ install-dvi install-dvi-am install-exec install-exec-am \
+ install-html install-html-am install-info install-info-am \
+ install-man install-pdf install-pdf-am install-ps \
+ install-ps-am install-strip installcheck installcheck-am \
+ installdirs maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
+ uninstall uninstall-am uninstall-dist_nodeDATA \
uninstall-dist_nodemodulesDATA \
uninstall-dist_nodemodulesberDATA
diff --git a/node.d/README.md b/node.d/README.md
index e69de29b..3c297790 100644
--- a/node.d/README.md
+++ b/node.d/README.md
@@ -0,0 +1,63 @@
+# Disclaimer
+
+Module configurations are written in JSON and **node.js is required**.
+
+to be edited.
+
+---
+
+The following node.d modules are supported:
+
+# fronius
+
+This module collects metrics from the configured solar power installation from Fronius Symo.
+See `netdata/conf.d/node.d/fronius.conf.md` for more details.
+
+**Requirements**
+ * Configuration file `fronius.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/fronius.conf`)
+ * Fronius Symo with network access (http)
+
+It produces per server:
+
+1. **Power**
+ * Current power input from the grid (positive values), output to the grid (negative values), in W
+ * Current power input from the solar panels, in W
+ * Current power stored in the accumulator (if present), in W (in theory, untested)
+
+2. **Consumption**
+ * Local consumption in W
+
+3. **Autonomy**
+ * Relative autonomy in %. 100 % autonomy means that the solar panels are delivering more power than it is needed by local consumption.
+ * Relative self consumption in %. The lower the better
+
+4. **Energy**
+ * The energy produced during the current day, in kWh
+ * The energy produced during the current year, in kWh
+
+5. **Inverter**
+ * The current power output from the connected inverters, in W, one dimension per inverter. At least one is always present.
+
+
+### configuration
+
+Sample:
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "Symo",
+ "hostname": "symo.ip.or.dns",
+ "update_every": 5,
+ "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi"
+ }
+ ]
+}
+```
+
+If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `5`.
+
+---
diff --git a/node.d/fronius.node.js b/node.d/fronius.node.js
new file mode 100644
index 00000000..f771f6c3
--- /dev/null
+++ b/node.d/fronius.node.js
@@ -0,0 +1,317 @@
+'use strict';
+
+// This program will connect to one or more Fronius Symo Inverters.
+// to get the Solar Power Generated (current, today).
+
+// example configuration in netdata/conf.d/node.d/fronius.conf.md
+
+var url = require('url');
+var http = require('http');
+var netdata = require('netdata');
+
+netdata.debug('loaded ' + __filename + ' plugin');
+
+var fronius = {
+ name: "Fronius",
+ enable_autodetect: false,
+ update_every: 5,
+ base_priority: 60000,
+ charts: {},
+
+ powerGridId: "p_grid",
+ powerPvId: "p_pv",
+ powerAccuId: "p_akku", // not my typo! Using the ID from the AP
+ consumptionLoadId: "p_load",
+ autonomyId: "rel_autonomy",
+ consumptionSelfId: "rel_selfconsumption",
+ energyTodayId: "e_day",
+ energyYearId: "e_year",
+
+ createBasicDimension: function (id, name, divisor) {
+ return {
+ id: id, // the unique id of the dimension
+ name: name, // the name of the dimension
+ algorithm: netdata.chartAlgorithms.absolute,// the id of the netdata algorithm
+ multiplier: 1, // the multiplier
+ divisor: divisor, // the divisor
+ hidden: false // is hidden (boolean)
+ };
+ },
+
+ // Gets the site power chart. Will be created if not existing.
+ getSitePowerChart: function (service, id) {
+ var chart = fronius.charts[id];
+ if (fronius.isDefined(chart)) return chart;
+
+ var dim = {};
+ dim[fronius.powerGridId] = this.createBasicDimension(fronius.powerGridId, "Grid", 1);
+ dim[fronius.powerPvId] = this.createBasicDimension(fronius.powerPvId, "Photovoltaics", 1);
+ dim[fronius.powerAccuId] = this.createBasicDimension(fronius.powerAccuId, "Accumulator", 1);
+
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Current Site Power', // the title of the chart
+ units: 'W', // the units of the chart dimensions
+ family: 'power', // the family of the chart
+ context: 'fronius.power', // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 1, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(id, chart);
+ fronius.charts[id] = chart;
+
+ return chart;
+ },
+
+ // Gets the site consumption chart. Will be created if not existing.
+ getSiteConsumptionChart: function (service, id) {
+ var chart = fronius.charts[id];
+ if (fronius.isDefined(chart)) return chart;
+ var dim = {};
+ dim[fronius.consumptionLoadId] = this.createBasicDimension(fronius.consumptionLoadId, "Load", 1);
+
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Current Load', // the title of the chart
+ units: 'W', // the units of the chart dimensions
+ family: 'consumption', // the family of the chart
+ context: 'fronius.consumption', // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 2, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(id, chart);
+ fronius.charts[id] = chart;
+
+ return chart;
+ },
+
+ // Gets the site consumption chart. Will be created if not existing.
+ getSiteAutonomyChart: function (service, id) {
+ var chart = fronius.charts[id];
+ if (fronius.isDefined(chart)) return chart;
+ var dim = {};
+ dim[fronius.autonomyId] = this.createBasicDimension(fronius.autonomyId, "Autonomy", 1);
+ dim[fronius.consumptionSelfId] = this.createBasicDimension(fronius.consumptionSelfId, "Self Consumption", 1);
+
+ chart = {
+ id: id, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Current Autonomy', // the title of the chart
+ units: '%', // the units of the chart dimensions
+ family: 'autonomy', // the family of the chart
+ context: 'fronius.autonomy', // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 3, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(id, chart);
+ fronius.charts[id] = chart;
+
+ return chart;
+ },
+
+ // Gets the site energy chart for today. Will be created if not existing.
+ getSiteEnergyTodayChart: function (service, chartId) {
+ var chart = fronius.charts[chartId];
+ if (fronius.isDefined(chart)) return chart;
+ var dim = {};
+ dim[fronius.energyTodayId] = this.createBasicDimension(fronius.energyTodayId, "Today", 1000);
+ chart = {
+ id: chartId, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Energy production for today', // the title of the chart
+ units: 'kWh', // the units of the chart dimensions
+ family: 'energy', // the family of the chart
+ context: 'fronius.energy.today', // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 4, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(chartId, chart);
+ fronius.charts[chartId] = chart;
+
+ return chart;
+ },
+
+ // Gets the site energy chart for today. Will be created if not existing.
+ getSiteEnergyYearChart: function (service, chartId) {
+ var chart = fronius.charts[chartId];
+ if (fronius.isDefined(chart)) return chart;
+ var dim = {};
+ dim[fronius.energyYearId] = this.createBasicDimension(fronius.energyYearId, "Year", 1000);
+ chart = {
+ id: chartId, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Energy production for this year', // the title of the chart
+ units: 'kWh', // the units of the chart dimensions
+ family: 'energy', // the family of the chart
+ context: 'fronius.energy.year', // the context of the chart
+ type: netdata.chartTypes.area, // the type of the chart
+ priority: fronius.base_priority + 5, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(chartId, chart);
+ fronius.charts[chartId] = chart;
+
+ return chart;
+ },
+
+ // Gets the inverter power chart. Will be created if not existing.
+ // Needs the array of inverters in order to create a chart with all inverters as dimensions
+ getInverterPowerChart: function (service, chartId, inverters) {
+
+ var chart = fronius.charts[chartId];
+ if (fronius.isDefined(chart)) return chart;
+
+ var dim = {};
+
+ var inverterCount = Object.keys(inverters).length;
+ var inverter = inverters[inverterCount.toString()];
+ var i = 1;
+ for (i; i <= inverterCount; i++) {
+ if (fronius.isUndefined(inverter)) {
+ netdata.error("Expected an Inverter with a numerical name! " +
+ "Have a look at your JSON output to verify.");
+ continue;
+ }
+ dim[i.toString()] = this.createBasicDimension("inverter_" + i, "Inverter " + i, 1);
+ }
+
+ chart = {
+ id: chartId, // the unique id of the chart
+ name: '', // the unique name of the chart
+ title: service.name + ' Current Inverter Output', // the title of the chart
+ units: 'W', // the units of the chart dimensions
+ family: 'inverters', // the family of the chart
+ context: 'fronius.inverter.output', // the context of the chart
+ type: netdata.chartTypes.stacked, // the type of the chart
+ priority: fronius.base_priority + 6, // the priority relative to others in the same family
+ update_every: service.update_every, // the expected update frequency of the chart
+ dimensions: dim
+ };
+ chart = service.chart(chartId, chart);
+ fronius.charts[chartId] = chart;
+
+ return chart;
+ },
+
+ processResponse: function (service, content) {
+ if (content === null) return;
+ var json = JSON.parse(content);
+ if (!fronius.isResponseValid(json)) return;
+
+ // add the service
+ service.commit();
+
+ var site = json.Body.Data.Site;
+
+ // Site Current Power Chart
+ service.begin(fronius.getSitePowerChart(service, 'fronius_' + service.name + '.power'));
+ service.set(fronius.powerGridId, Math.round(site.P_Grid));
+ service.set(fronius.powerPvId, Math.round(site.P_PV));
+ service.set(fronius.powerAccuId, Math.round(site.P_Akku));
+ service.end();
+
+ // Site Consumption Chart
+ service.begin(fronius.getSiteConsumptionChart(service, 'fronius_' + service.name + '.consumption'));
+ service.set(fronius.consumptionLoadId, Math.round(Math.abs(site.P_Load)));
+ service.end();
+
+ // Site Autonomy Chart
+ service.begin(fronius.getSiteAutonomyChart(service, 'fronius_' + service.name + '.autonomy'));
+ service.set(fronius.autonomyId, Math.round(site.rel_Autonomy));
+ var selfConsumption = site.rel_SelfConsumption;
+ service.set(fronius.consumptionSelfId, Math.round(selfConsumption === null ? 100 : selfConsumption));
+ service.end();
+
+ // Site Energy Today Chart
+ service.begin(fronius.getSiteEnergyTodayChart(service, 'fronius_' + service.name + '.energy.today'));
+ service.set(fronius.energyTodayId, Math.round(site.E_Day));
+ service.end();
+
+ // Site Energy Year Chart
+ service.begin(fronius.getSiteEnergyYearChart(service, 'fronius_' + service.name + '.energy.year'));
+ service.set(fronius.energyYearId, Math.round(site.E_Year));
+ service.end();
+
+ // Inverters
+ var inverters = json.Body.Data.Inverters;
+ var inverterCount = Object.keys(inverters).length + 1;
+ while (inverterCount--) {
+ var inverter = inverters[inverterCount];
+ if (fronius.isUndefined(inverter)) continue;
+ service.begin(fronius.getInverterPowerChart(service, 'fronius_' + service.name + '.inverters.output', inverters));
+ service.set(inverterCount.toString(), Math.round(inverter.P));
+ service.end();
+ }
+ },
+
+ // some basic validation
+ isResponseValid: function (json) {
+ if (fronius.isUndefined(json.Body)) return false;
+ if (fronius.isUndefined(json.Body.Data)) return false;
+ if (fronius.isUndefined(json.Body.Data.Site)) return false;
+ return fronius.isDefined(json.Body.Data.Inverters);
+ },
+
+ // module.serviceExecute()
+ // this function is called only from this module
+ // its purpose is to prepare the request and call
+ // netdata.serviceExecute()
+ serviceExecute: function (name, uri, update_every) {
+ netdata.debug(this.name + ': ' + name + ': url: ' + uri + ', update_every: ' + update_every);
+
+ var service = netdata.service({
+ name: name,
+ request: netdata.requestFromURL('http://' + uri),
+ update_every: update_every,
+ module: this
+ });
+ service.execute(this.processResponse);
+ },
+
+
+ configure: function (config) {
+ if (fronius.isUndefined(config.servers)) return 0;
+ var added = 0;
+ var len = config.servers.length;
+ while (len--) {
+ var server = config.servers[len];
+ if (fronius.isUndefined(server.update_every)) server.update_every = this.update_every;
+
+ var url = server.hostname + server.api_path;
+ this.serviceExecute(server.name, url, server.update_every);
+ added++;
+ }
+ return added;
+ },
+
+ // module.update()
+ // this is called repeatedly to collect data, by calling
+ // netdata.serviceExecute()
+ update: function (service, callback) {
+ service.execute(function (serv, data) {
+ service.module.processResponse(serv, data);
+ callback();
+ });
+ },
+
+ isUndefined: function (value) {
+ return typeof value === 'undefined';
+ },
+
+ isDefined: function (value) {
+ return typeof value !== 'undefined';
+ }
+};
+
+module.exports = fronius;
diff --git a/node.d/node_modules/net-snmp.js b/node.d/node_modules/net-snmp.js
index 6fbd4e72..de592610 100644
--- a/node.d/node_modules/net-snmp.js
+++ b/node.d/node_modules/net-snmp.js
@@ -12,7 +12,7 @@ var util = require ("util");
function _expandConstantObject (object) {
var keys = [];
- for (key in object)
+ for (var key in object)
keys.push (key);
for (var i = 0; i < keys.length; i++)
object[object[keys[i]]] = parseInt (keys[i]);
@@ -133,12 +133,9 @@ util.inherits (RequestTimedOutError, Error);
**/
function isVarbindError (varbind) {
- if (varbind.type == ObjectType.NoSuchObject
- || varbind.type == ObjectType.NoSuchInstance
- || varbind.type == ObjectType.EndOfMibView)
- return true;
- else
- return false;
+ return !!(varbind.type == ObjectType.NoSuchObject
+ || varbind.type == ObjectType.NoSuchInstance
+ || varbind.type == ObjectType.EndOfMibView);
}
function varbindError (varbind) {
@@ -216,6 +213,8 @@ function readInt (buffer) {
function readUint (buffer, isSigned) {
buffer.readByte ();
var length = buffer.readByte ();
+ var value = 0;
+ var signedBitSet = false;
if (length > 5) {
throw new RangeError ("Integer too long '" + length + "'");
@@ -225,8 +224,6 @@ function readUint (buffer, isSigned) {
length = 4;
}
- value = 0, signedBitSet = false;
-
for (var i = 0; i < length; i++) {
value *= 256;
value += buffer.readByte ();
@@ -246,10 +243,6 @@ function readUint (buffer, isSigned) {
function readUint64 (buffer) {
var value = buffer.readString (ObjectType.Counter64, true);
- if (value.length > 8)
- throw new RequestInvalidError ("64 bit unsigned integer too long '"
- + value.length + "'")
-
return value;
}
@@ -327,9 +320,6 @@ function writeUint (buffer, type, value) {
}
function writeUint64 (buffer, value) {
- if (value.length > 8)
- throw new RequestInvalidError ("64 bit unsigned integer too long '"
- + value.length + "'")
buffer.writeBuffer (value, ObjectType.Counter64);
}
@@ -381,7 +371,7 @@ function writeVarbinds (buffer, varbinds) {
}
buffer.endSequence ();
- };
+ }
buffer.endSequence ();
}
@@ -549,7 +539,7 @@ var ResponseMessage = function (buffer) {
throw new ResponseInvalidError ("Unknown PDU type '" + type
+ "' in response");
}
-}
+};
/*****************************************************************************
** Session class definition
@@ -599,7 +589,7 @@ var Session = function (target, community, options) {
this.dgram.on ("error", me.onError.bind (me));
if (this.sourceAddress || this.sourcePort)
- req.dgram.bind (this.sourcePort, this.sourceAddress);
+ this.dgram.bind (this.sourcePort, this.sourceAddress);
};
util.inherits (Session, events.EventEmitter);
@@ -607,15 +597,16 @@ util.inherits (Session, events.EventEmitter);
Session.prototype.close = function () {
this.dgram.close ();
return this;
-}
+};
Session.prototype.cancelRequests = function (error) {
+ var id;
for (id in this.reqs) {
var req = this.reqs[id];
this.unregisterRequest (req.id);
req.responseCb (error);
}
-}
+};
function _generateId () {
return Math.floor (Math.random () + Math.random () * 10000000)
@@ -645,7 +636,7 @@ Session.prototype.get = function (oids, responseCb) {
req.responseCb (null, varbinds);
}
- };
+ }
var pduVarbinds = [];
@@ -747,7 +738,7 @@ Session.prototype.getBulk = function () {
}
req.responseCb (null, varbinds);
- };
+ }
var pduVarbinds = [];
@@ -796,7 +787,7 @@ Session.prototype.getNext = function (oids, responseCb) {
req.responseCb (null, varbinds);
}
- };
+ }
var pduVarbinds = [];
@@ -813,7 +804,7 @@ Session.prototype.getNext = function (oids, responseCb) {
};
Session.prototype.inform = function () {
- var typeOrOid = arguments[0];;
+ var typeOrOid = arguments[0];
var varbinds, options = {}, responseCb;
/**
@@ -865,7 +856,7 @@ Session.prototype.inform = function () {
req.responseCb (null, varbinds);
}
- };
+ }
if (typeof typeOrOid != "string")
typeOrOid = "1.3.6.1.6.3.1.1.5." + (typeOrOid + 1);
@@ -1029,7 +1020,7 @@ Session.prototype.set = function (varbinds, responseCb) {
req.responseCb (null, varbinds);
}
- };
+ }
var pduVarbinds = [];
@@ -1049,7 +1040,7 @@ Session.prototype.set = function (varbinds, responseCb) {
Session.prototype.simpleGet = function (pduClass, feedCb, varbinds,
responseCb, options) {
- var req = {}
+ var req = {};
try {
var id = _generateId ();
@@ -1116,7 +1107,7 @@ Session.prototype.subtree = function () {
this.walk (oid, maxRepetitions, subtreeCb.bind (me, req), doneCb);
return this;
-}
+};
function tableColumnsResponseCb (req, error) {
if (error) {
@@ -1143,7 +1134,7 @@ function tableColumnsFeedCb (req, varbinds) {
return true;
}
- var oid = varbinds[i].oid.replace (req.rowOid, "")
+ var oid = varbinds[i].oid.replace (req.rowOid, "");
if (oid && oid != varbinds[i].oid) {
var match = oid.match (/^(\d+)\.(.+)$/);
if (match && match[1] > 0) {
@@ -1187,7 +1178,7 @@ Session.prototype.tableColumns = function () {
}
return this;
-}
+};
function tableResponseCb (req, error) {
if (error)
@@ -1205,7 +1196,7 @@ function tableFeedCb (req, varbinds) {
return true;
}
- var oid = varbinds[i].oid.replace (req.rowOid, "")
+ var oid = varbinds[i].oid.replace (req.rowOid, "");
if (oid && oid != varbinds[i].oid) {
var match = oid.match (/^(\d+)\.(.+)$/);
if (match && match[1] > 0) {
@@ -1243,7 +1234,7 @@ Session.prototype.table = function () {
tableResponseCb.bind (me, req));
return this;
-}
+};
Session.prototype.trap = function () {
var req = {};
@@ -1430,7 +1421,7 @@ Session.prototype.walk = function () {
this.getNext ([oid], walkCb.bind (me, req));
return this;
-}
+};
/*****************************************************************************
** Exports
@@ -1438,8 +1429,8 @@ Session.prototype.walk = function () {
exports.Session = Session;
-exports.createSession = function (target, community, version, options) {
- return new Session (target, community, version, options);
+exports.createSession = function (target, community, options) {
+ return new Session (target, community, options);
};
exports.isVarbindError = isVarbindError;
diff --git a/plugins.d/Makefile.in b/plugins.d/Makefile.in
index 2a8806cb..256605f5 100644
--- a/plugins.d/Makefile.in
+++ b/plugins.d/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11.3 from Makefile.am.
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
-# Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -17,6 +16,51 @@
VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -36,8 +80,8 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
subdir = plugins.d
-DIST_COMMON = $(dist_plugins_DATA) $(dist_plugins_SCRIPTS) \
- $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_plugins_SCRIPTS) $(dist_plugins_DATA)
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
$(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
@@ -82,12 +126,31 @@ am__uninstall_files_from_dir = { \
}
am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pluginsdir)"
SCRIPTS = $(dist_plugins_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
SOURCES =
DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
DATA = $(dist_plugins_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
@@ -287,8 +350,11 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
$(am__aclocal_m4_deps):
install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
@$(NORMAL_INSTALL)
- test -z "$(pluginsdir)" || $(MKDIR_P) "$(DESTDIR)$(pluginsdir)"
@list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
@@ -319,8 +385,11 @@ uninstall-dist_pluginsSCRIPTS:
dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
install-dist_pluginsDATA: $(dist_plugins_DATA)
@$(NORMAL_INSTALL)
- test -z "$(pluginsdir)" || $(MKDIR_P) "$(DESTDIR)$(pluginsdir)"
@list='$(dist_plugins_DATA)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -335,11 +404,11 @@ uninstall-dist_pluginsDATA:
@list='$(dist_plugins_DATA)'; test -n "$(pluginsdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-tags: TAGS
-TAGS:
+tags TAGS:
+
+ctags CTAGS:
-ctags: CTAGS
-CTAGS:
+cscope cscopelist:
distdir: $(DISTFILES)
@@ -478,18 +547,18 @@ uninstall-am: uninstall-dist_pluginsDATA uninstall-dist_pluginsSCRIPTS
.MAKE: install-am install-strip
-.PHONY: all all-am check check-am clean clean-generic distclean \
- distclean-generic distdir dvi dvi-am html html-am info info-am \
- install install-am install-data install-data-am \
- install-dist_pluginsDATA install-dist_pluginsSCRIPTS \
- install-dvi install-dvi-am install-exec install-exec-am \
- install-html install-html-am install-info install-info-am \
- install-man install-pdf install-pdf-am install-ps \
- install-ps-am install-strip installcheck installcheck-am \
- installdirs maintainer-clean maintainer-clean-generic \
- mostlyclean mostlyclean-generic pdf pdf-am ps ps-am uninstall \
- uninstall-am uninstall-dist_pluginsDATA \
- uninstall-dist_pluginsSCRIPTS
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_pluginsDATA \
+ install-dist_pluginsSCRIPTS install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am \
+ uninstall-dist_pluginsDATA uninstall-dist_pluginsSCRIPTS
# Tell versions [3.59,3.63) of GNU make to not export all variables.
diff --git a/plugins.d/alarm-notify.sh b/plugins.d/alarm-notify.sh
index d0188fe3..4f619091 100755
--- a/plugins.d/alarm-notify.sh
+++ b/plugins.d/alarm-notify.sh
@@ -2,7 +2,7 @@
# netdata
# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
# GPL v3+
#
# Script to send alarm notifications for netdata
@@ -18,13 +18,14 @@
# - slack.com notifications by @ktsaou
# - discordapp.com notifications by @lowfive
# - pushover.net notifications by @ktsaou
-# - pushbullet.com push notifications by Tiago Peralta @tperalta82 PR #1070
-# - telegram.org notifications by @hashworks PR #1002
-# - twilio.com notifications by Levi Blaney @shadycuz PR #1211
+# - pushbullet.com push notifications by Tiago Peralta @tperalta82 #1070
+# - telegram.org notifications by @hashworks #1002
+# - twilio.com notifications by Levi Blaney @shadycuz #1211
# - kafka notifications by @ktsaou #1342
-# - pagerduty.com notifications by Jim Cooley @jimcooley PR #1373
+# - pagerduty.com notifications by Jim Cooley @jimcooley #1373
# - messagebird.com notifications by @tech_no_logical #1453
# - hipchat notifications by @ktsaou #1561
+# - custom notifications by @ktsaou
# -----------------------------------------------------------------------------
# testing notifications
@@ -103,6 +104,15 @@ debug() {
[ ${debug} -eq 1 ] && log DEBUG "${@}"
}
+
+# -----------------------------------------------------------------------------
+# this is to be overwritten by the config file
+
+custom_sender() {
+ info "not sending custom notification for ${status} of '${host}.${chart}.${name}'"
+}
+
+
# -----------------------------------------------------------------------------
# check for BASH v4+ (required for associative arrays)
@@ -112,11 +122,9 @@ debug() {
# -----------------------------------------------------------------------------
# defaults to allow running this script by hand
-NETDATA_CONFIG_DIR="${NETDATA_CONFIG_DIR-/etc/netdata}"
-NETDATA_CACHE_DIR="${NETDATA_CACHE_DIR-/var/cache/netdata}"
+[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
+[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="$(dirname "${0}")/../../../../var/cache/netdata"
[ -z "${NETDATA_REGISTRY_URL}" ] && NETDATA_REGISTRY_URL="https://registry.my-netdata.io"
-[ -z "${NETDATA_HOSTNAME}" ] && NETDATA_HOSTNAME="$(hostname)"
-[ -z "${NETDATA_REGISTRY_HOSTNAME}" ] && NETDATA_REGISTRY_HOSTNAME="${NETDATA_HOSTNAME}"
# -----------------------------------------------------------------------------
# parse command line parameters
@@ -130,8 +138,8 @@ when="${6}" # the timestamp this event occurred
name="${7}" # the name of the alarm, as given in netdata health.d entries
chart="${8}" # the name of the chart (type.id)
family="${9}" # the family of the chart
-status="${10}" # the current status : REMOVED, UNITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
-old_status="${11}" # the previous status: REMOVED, UNITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
+old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
value="${12}" # the current value of the alarm
old_value="${13}" # the previous value of the alarm
src="${14}" # the line number and file the alarm has been configured
@@ -145,9 +153,8 @@ old_value_string="${20}" # friendly old value (with units)
# -----------------------------------------------------------------------------
# find a suitable hostname to use, if netdata did not supply a hostname
-[ -z "${host}" ] && host="${NETDATA_HOSTNAME}"
-[ -z "${host}" ] && host="${NETDATA_REGISTRY_HOSTNAME}"
-[ -z "${host}" ] && host="$(hostname 2>/dev/null)"
+this_host=$(hostname -s 2>/dev/null)
+[ -z "${host}" ] && host="${this_host}"
# -----------------------------------------------------------------------------
# screen statuses we don't need to send a notification
@@ -192,6 +199,7 @@ SEND_EMAIL="YES"
SEND_PUSHBULLET="YES"
SEND_KAFKA="YES"
SEND_PD="YES"
+SEND_CUSTOM="YES"
# slack configs
SLACK_WEBHOOK_URL=
@@ -245,6 +253,10 @@ KAFKA_SENDER_IP=
PD_SERVICE_KEY=
declare -A role_recipients_pd=()
+# custom configs
+DEFAULT_RECIPIENT_CUSTOM=
+declare -A role_recipients_custom=()
+
# email configs
DEFAULT_RECIPIENT_EMAIL="root"
declare -A role_recipients_email=()
@@ -308,6 +320,7 @@ declare -A arr_hipchat=()
declare -A arr_telegram=()
declare -A arr_pd=()
declare -A arr_email=()
+declare -A arr_custom=()
# netdata may call us with multiple roles, and roles may have multiple but
# overlapping recipients - so, here we find the unique recipients.
@@ -396,6 +409,15 @@ do
do
[ "${r}" != "disabled" ] && filter_recipient_by_criticality pd "${r}" && arr_pd[${r/|*/}]="1"
done
+
+ # custom
+ a="${role_recipients_custom[${x}]}"
+ [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_CUSTOM}"
+ for r in ${a//,/ }
+ do
+ [ "${r}" != "disabled" ] && filter_recipient_by_criticality custom "${r}" && arr_custom[${r/|*/}]="1"
+ done
+
done
# build the list of slack recipients (channels)
@@ -434,6 +456,10 @@ to_telegram="${!arr_telegram[*]}"
to_pd="${!arr_pd[*]}"
[ -z "${to_pd}" ] && SEND_PD="NO"
+# build the list of custom recipients
+to_custom="${!arr_custom[*]}"
+[ -z "${to_custom}" ] && SEND_CUSTOM="NO"
+
# build the list of email recipients (email addresses)
to_email=
for x in "${!arr_email[@]}"
@@ -492,7 +518,7 @@ fi
if [ \( \
"${SEND_PUSHOVER}" = "YES" \
-o "${SEND_SLACK}" = "YES" \
- -o "${SEND_DISCORD}" = "YES" \
+ -o "${SEND_DISCORD}" = "YES" \
-o "${SEND_HIPCHAT}" = "YES" \
-o "${SEND_TWILIO}" = "YES" \
-o "${SEND_MESSAGEBIRD}" = "YES" \
@@ -530,13 +556,14 @@ if [ "${SEND_EMAIL}" != "YES" \
-a "${SEND_PUSHOVER}" != "YES" \
-a "${SEND_TELEGRAM}" != "YES" \
-a "${SEND_SLACK}" != "YES" \
- -a "${SEND_DISCORD}" != "YES" \
+ -a "${SEND_DISCORD}" != "YES" \
-a "${SEND_TWILIO}" != "YES" \
-a "${SEND_HIPCHAT}" != "YES" \
-a "${SEND_MESSAGEBIRD}" != "YES" \
-a "${SEND_PUSHBULLET}" != "YES" \
-a "${SEND_KAFKA}" != "YES" \
-a "${SEND_PD}" != "YES" \
+ -a "${SEND_CUSTOM}" != "YES" \
]
then
fatal "All notification methods are disabled. Not sending notification for host '${host}', chart '${chart}' to '${roles}' for '${name}' = '${value}' for status '${status}'."
@@ -939,9 +966,16 @@ send_messagebird() {
# telegram sender
send_telegram() {
- local bottoken="${1}" chatids="${2}" message="${3}" httpcode sent=0 chatid disableNotification=""
+ local bottoken="${1}" chatids="${2}" message="${3}" httpcode sent=0 chatid emoji disableNotification=""
if [ "${status}" = "CLEAR" ]; then disableNotification="--data-urlencode disable_notification=true"; fi
+
+ case "${status}" in
+ WARNING) emoji="⚠️" ;;
+ CRITICAL) emoji="🔴" ;;
+ CLEAR) emoji="✅" ;;
+ *) emoji="⚪️" ;;
+ esac
if [ "${SEND_TELEGRAM}" = "YES" -a ! -z "${bottoken}" -a ! -z "${chatids}" -a ! -z "${message}" ];
then
@@ -951,7 +985,7 @@ send_telegram() {
httpcode=$(${curl} --write-out %{http_code} --silent --output /dev/null ${disableNotification} \
--data-urlencode "parse_mode=HTML" \
--data-urlencode "disable_web_page_preview=true" \
- --data-urlencode "text=${message}" \
+ --data-urlencode "text=${emoji} ${message}" \
"https://api.telegram.org/bot${bottoken}/sendMessage?chat_id=${chatid}")
if [ "${httpcode}" == "200" ]
@@ -1040,7 +1074,7 @@ EOF
# discord sender
send_discord() {
- local webhook="${1}/slack" channels="${2}" httpcode sent=0 channel color payload
+ local webhook="${1}/slack" channels="${2}" httpcode sent=0 channel color payload username
[ "${SEND_DISCORD}" != "YES" ] && return 1
@@ -1053,10 +1087,13 @@ send_discord() {
for channel in ${channels}
do
+ username="netdata on ${host}"
+ [ ${#username} -gt 32 ] && username="${username:0:29}..."
+
payload="$(cat <<EOF
{
"channel": "#${channel}",
- "username": "netdata on ${host}",
+ "username": "${username}",
"text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*",
"icon_url": "${images_base_url}/images/seo-performance-128.png",
"attachments": [
@@ -1101,7 +1138,7 @@ EOF
# prepare the content of the notification
# the url to send the user on click
-urlencode "${NETDATA_REGISTRY_HOSTNAME}" >/dev/null; url_host="${REPLY}"
+urlencode "${host}" >/dev/null; url_host="${REPLY}"
urlencode "${chart}" >/dev/null; url_chart="${REPLY}"
urlencode "${family}" >/dev/null; url_family="${REPLY}"
urlencode "${name}" >/dev/null; url_name="${REPLY}"
@@ -1282,6 +1319,24 @@ SENT_PD=$?
# -----------------------------------------------------------------------------
+# send the custom message
+
+send_custom() {
+ # is it enabled?
+ [ "${SEND_CUSTOM}" != "YES" ] && return 1
+
+ # do we have any sender?
+ [ -z "${1}" ] && return 1
+
+ # call the custom_sender function
+ custom_sender "${@}"
+}
+
+send_custom "${to_custom}"
+SENT_CUSTOM=$?
+
+
+# -----------------------------------------------------------------------------
# send hipchat message
send_hipchat "${HIPCHAT_AUTH_TOKEN}" "${to_hipchat}" " \
@@ -1295,17 +1350,40 @@ ${host} ${status_message}<br/> \
SENT_HIPCHAT=$?
+
# -----------------------------------------------------------------------------
# send the email
send_email <<EOF
To: ${to_email}
Subject: ${host} ${status_message} - ${name//_/ } - ${chart}
+MIME-Version: 1.0
+Content-Type: multipart/alternative; boundary="multipart-boundary"
+
+This is a MIME-encoded multipart message
+
+--multipart-boundary
+Content-Type: text/plain
+
+${host} ${status_message}
+
+${alarm} ${info}
+${raised_for}
+
+Chart : ${chart}
+Family : ${family}
+Severity: ${severity}
+URL : ${goto_url}
+Source : ${src}
+Date : ${date}
+Notification generated on ${this_host}
+
+--multipart-boundary
Content-Type: text/html
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml" style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 14px; margin: 0; padding: 0;">
-<body style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; font-size: 14px; width: 100% !important; min-height: 100%; line-height: 1.6; background: #f6f6f6; margin:0; padding: 0;">
+<html xmlns="http://www.w3.org/1999/xhtml" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 14px; margin: 0; padding: 0;">
+<body style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 14px; width: 100% !important; min-height: 100%; line-height: 1.6; background: #f6f6f6; margin:0; padding: 0;">
<table>
<tbody>
<tr>
@@ -1316,12 +1394,12 @@ Content-Type: text/html
<tbody>
<tr>
<td bgcolor="#eee" style="padding: 5px 20px 5px 20px; background-color: #eee;">
- <div style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; font-size: 20px; color: #777; font-weight: bold;">netdata notification</div>
+ <div style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 20px; color: #777; font-weight: bold;">netdata notification</div>
</td>
</tr>
<tr>
<td bgcolor="${color}" style="font-size: 16px; vertical-align: top; font-weight: 400; text-align: center; margin: 0; padding: 10px; color: #ffffff; background: ${color} !important; border: 1px solid ${color}; border-top-color: ${color};" align="center" valign="top">
- <h1 style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; font-weight: 400; margin: 0;">${host} ${status_message}</h1>
+ <h1 style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-weight: 400; margin: 0;">${host} ${status_message}</h1>
</td>
</tr>
<tr>
@@ -1330,46 +1408,46 @@ Content-Type: text/html
<table width="100%" cellpadding="0" cellspacing="0" style="max-width:700px">
<tbody>
<tr>
- <td style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding:0 0 20px;" align="left" valign="top">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding:0 0 20px;" align="left" valign="top">
<span>${chart}</span>
<span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Chart</span>
</td>
</tr>
<tr style="margin: 0; padding: 0;">
- <td style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
<span><b>${alarm}</b>${info_html}</span>
<span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Alarm</span>
</td>
</tr>
<tr>
- <td style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
<span>${family}</span>
<span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Family</span>
</td>
</tr>
<tr style="margin: 0; padding: 0;">
- <td style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
<span>${severity}</span>
<span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Severity</span>
</td>
</tr>
<tr style="margin: 0; padding: 0;">
- <td style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"><span>${date}</span>
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"><span>${date}</span>
<span>${raised_for_html}</span> <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Time</span>
</td>
</tr>
<tr style="margin: 0; padding: 0;">
- <td style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;">
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;">
<a href="${goto_url}" style="font-size: 14px; color: #ffffff; text-decoration: none; line-height: 1.5; font-weight: bold; text-align: center; display: inline-block; text-transform: capitalize; background: #35568d; border-width: 1px; border-style: solid; border-color: #2b4c86; margin: 0; padding: 10px 15px;" target="_blank">View Netdata</a>
</td>
</tr>
<tr style="text-align: center; margin: 0; padding: 0;">
- <td style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; font-size: 11px; vertical-align: top; margin: 0; padding: 10px 0 0 0; color: #666666;" align="center" valign="bottom">The source of this alarm is line <code>${src}</code>
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 11px; vertical-align: top; margin: 0; padding: 10px 0 0 0; color: #666666;" align="center" valign="bottom">The source of this alarm is line <code>${src}</code><br/>(alarms are configurable, edit this file to adapt the alarm to your needs)
</td>
</tr>
<tr style="text-align: center; margin: 0; padding: 0;">
- <td style="font-family: 'Helvetica Neue', 'Helvetica', Helvetica, Arial, sans-serif; font-size: 12px; vertical-align: top; margin:0; padding: 20px 0 0 0; color: #666666; border-top: 1px solid #f0f0f0;" align="center" valign="bottom">Sent by
- <a href="https://mynetdata.io/" target="_blank">netdata</a>, the real-time performance monitoring.
+ <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; vertical-align: top; margin:0; padding: 20px 0 0 0; color: #666666; border-top: 1px solid #f0f0f0;" align="center" valign="bottom">Sent by
+ <a href="https://mynetdata.io/" target="_blank">netdata</a>, the real-time performance and health monitoring, on <code>${this_host}</code>.
</td>
</tr>
</tbody>
@@ -1404,6 +1482,7 @@ if [ ${SENT_EMAIL} -eq 0 \
-o ${SENT_PUSHBULLET} -eq 0 \
-o ${SENT_KAFKA} -eq 0 \
-o ${SENT_PD} -eq 0 \
+ -o ${SENT_CUSTOM} -eq 0 \
]
then
# we did send something
diff --git a/plugins.d/alarm-test.sh b/plugins.d/alarm-test.sh
index 1963111a..9df5361a 100755
--- a/plugins.d/alarm-test.sh
+++ b/plugins.d/alarm-test.sh
@@ -2,7 +2,7 @@
# netdata
# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
# GPL v3+
#
# Script to test alarm notifications for netdata
diff --git a/plugins.d/cgroup-name.sh b/plugins.d/cgroup-name.sh
index a1e3abe0..dc0bf755 100755
--- a/plugins.d/cgroup-name.sh
+++ b/plugins.d/cgroup-name.sh
@@ -51,7 +51,7 @@ debug() {
# -----------------------------------------------------------------------------
-NETDATA_CONFIG_DIR="${NETDATA_CONFIG_DIR-/etc/netdata}"
+[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
CONFIG="${NETDATA_CONFIG_DIR}/cgroups-names.conf"
CGROUP="${1}"
NAME=
@@ -74,22 +74,22 @@ if [ -f "${CONFIG}" ]
# info "configuration file '${CONFIG}' is not available."
fi
-function get_name_classic {
- local DOCKERID="$1"
- info "Running command: docker ps --filter=id=\"${DOCKERID}\" --format=\"{{.Names}}\""
- NAME="$( docker ps --filter=id="${DOCKERID}" --format="{{.Names}}" )"
+function docker_get_name_classic {
+ local id="${1}"
+ info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\""
+ NAME="$( docker ps --filter=id="${id}" --format="{{.Names}}" )"
return 0
}
-function get_name_api {
- local DOCKERID="$1"
+function docker_get_name_api {
+ local id="${1}"
if [ ! -S "/var/run/docker.sock" ]
then
warning "Can't find /var/run/docker.sock"
return 1
fi
- info "Running API command: /containers/${DOCKERID}/json"
- JSON=$(echo -e "GET /containers/${DOCKERID}/json HTTP/1.0\r\n" | nc -U /var/run/docker.sock | egrep '^{.*')
+ info "Running API command: /containers/${id}/json"
+ JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\r\n" | nc -U /var/run/docker.sock | grep '^{.*')
NAME=$(echo $JSON | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||')
return 0
}
@@ -107,9 +107,9 @@ if [ -z "${NAME}" ]
then
if hash docker 2>/dev/null
then
- get_name_classic $DOCKERID
+ docker_get_name_classic ${DOCKERID}
else
- get_name_api $DOCKERID || get_name_classic $DOCKERID
+ docker_get_name_api ${DOCKERID} || docker_get_name_classic ${DOCKERID}
fi
if [ -z "${NAME}" ]
then
@@ -123,7 +123,31 @@ if [ -z "${NAME}" ]
then
# libvirtd / qemu virtual machines
- NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
+ # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
+ NAME="qemu_$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')"
+
+ elif [[ "${CGROUP}" =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]
+ then
+ # Proxmox VMs
+
+ FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
+ if [[ -f $FILENAME && -r $FILENAME ]]
+ then
+ NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ elif [[ "${CGROUP}" =~ lxc_([0-9]+) && -d /etc/pve ]]
+ then
+ # Proxmox Containers (LXC)
+
+ FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
+ if [[ -f ${FILENAME} && -r ${FILENAME} ]]
+ then
+ NAME=$(grep -e '^hostname: ' /etc/pve/lxc/${BASH_REMATCH[1]}.conf | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
fi
[ -z "${NAME}" ] && NAME="${CGROUP}"
diff --git a/plugins.d/charts.d.plugin b/plugins.d/charts.d.plugin
index 00206f95..eda5c0de 100755
--- a/plugins.d/charts.d.plugin
+++ b/plugins.d/charts.d.plugin
@@ -2,7 +2,7 @@
# netdata
# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
# GPL v3+
#
# charts.d.plugin allows easy development of BASH plugins
@@ -115,10 +115,11 @@ info "started from '$PROGRAM_FILE' with options: $*"
# internal defaults
# netdata exposes a few environment variables for us
-pluginsd="${NETDATA_PLUGINS_DIR}"
-[ -z "$pluginsd" ] && pluginsd="$( dirname $PROGRAM_FILE )"
+[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
+[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
-confd="${NETDATA_CONFIG_DIR-/etc/netdata}"
+pluginsd="${NETDATA_PLUGINS_DIR}"
+confd="${NETDATA_CONFIG_DIR}"
chartsd="$pluginsd/../charts.d"
myconfig="$confd/$PROGRAM_NAME.conf"
diff --git a/plugins.d/fping.plugin b/plugins.d/fping.plugin
index 232c0063..b6d981a8 100755
--- a/plugins.d/fping.plugin
+++ b/plugins.d/fping.plugin
@@ -2,7 +2,7 @@
# netdata
# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
# GPL v3+
#
# This plugin requires a latest version of fping.
@@ -37,25 +37,15 @@ if [ "${1}" = "install" ]
run cd /usr/src
- if [ -d fping-3.15 ]
+ if [ -d fping-4.0 ]
then
- run rm -rf fping-3.15 || exit 1
+ run rm -rf fping-4.0 || exit 1
fi
- download 'https://github.com/schweikert/fping/archive/3.15.tar.gz' | run tar -zxvpf -
+ download 'https://github.com/schweikert/fping/releases/download/v4.0/fping-4.0.tar.gz' | run tar -zxvpf -
[ $? -ne 0 ] && exit 1
- run cd fping-3.15 || exit 1
-
- #if [ -d fping-ktsaou.git ]
- # then
- # run cd fping-ktsaou.git
- # run git pull
- #else
- # run git clone https://github.com/ktsaou/fping.git fping-ktsaou.git
- # run cd fping-ktsaou.git
- #fi
-
- run ./autogen.sh
+ run cd fping-4.0 || exit 1
+
run ./configure --prefix=/usr/local
run make clean
run make
@@ -139,7 +129,7 @@ update_every="${1-1}"
# the netdata configuration directory
# passed by netdata as an environment variable
-NETDATA_CONFIG_DIR="${NETDATA_CONFIG_DIR-/etc/netdata}"
+[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
# -----------------------------------------------------------------------------
# configuration options
@@ -173,7 +163,7 @@ source "${NETDATA_CONFIG_DIR}/${plugin}.conf"
if [ -z "${hosts}" ]
then
- fatal "no hosts configued in '${NETDATA_CONFIG_DIR}/${plugin}.conf' - nothing to do."
+ fatal "no hosts configured in '${NETDATA_CONFIG_DIR}/${plugin}.conf' - nothing to do."
fi
if [ -z "${fping}" -o ! -x "${fping}" ]
diff --git a/plugins.d/node.d.plugin b/plugins.d/node.d.plugin
index 8b7047fc..b1620391 100755
--- a/plugins.d/node.d.plugin
+++ b/plugins.d/node.d.plugin
@@ -4,13 +4,13 @@
// shebang hack from:
// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang
-// Initially this is run as a shell script (#!/bin/sh).
+// Initially this is run as a shell script.
// Then, the second line, finds nodejs or node or js in the system path
// and executes it with the shell parameters.
// netdata
// real-time performance and health monitoring, done right!
-// (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+// (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
// GPL v3+
// --------------------------------------------------------------------------------------------------------------------
@@ -21,7 +21,7 @@
// get NETDATA environment variables
var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname;
-var NETDATA_CONFIG_DIR = process.env.NETDATA_CONFIG_DIR || '/etc/netdata';
+var NETDATA_CONFIG_DIR = process.env.NETDATA_CONFIG_DIR || __dirname + '/../../../../etc/netdata';
var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1;
var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d';
diff --git a/plugins.d/python.d.plugin b/plugins.d/python.d.plugin
index efa62cbc..03c156f4 100755
--- a/plugins.d/python.d.plugin
+++ b/plugins.d/python.d.plugin
@@ -20,8 +20,11 @@ BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1),
'retries': 10}
MODULES_DIR = os.path.abspath(os.getenv('NETDATA_PLUGINS_DIR',
- os.path.dirname(__file__)) + "/../python.d") + "/"
-CONFIG_DIR = os.getenv('NETDATA_CONFIG_DIR', "/etc/netdata/")
+ os.path.dirname(__file__)) + "/../python.d") + "/"
+
+CONFIG_DIR = os.getenv('NETDATA_CONFIG_DIR',
+ os.path.dirname(__file__) + "/../../../../etc/netdata")
+
# directories should end with '/'
if CONFIG_DIR[-1] != "/":
CONFIG_DIR += "/"
@@ -307,7 +310,7 @@ class PythonCharts(object):
("/" + str(name) if name is not None else "") +
": cannot start job: '" +
str(e))
- return None
+ continue
else:
# set chart_name (needed to plot run time graphs)
job.chart_name = module.__name__
@@ -460,11 +463,11 @@ def read_config(path):
config = ordered_load(stream, yaml.SafeLoader)
else:
config = yaml.load(stream)
- except (OSError, IOError):
- msg.error(str(path), "is not a valid configuration file")
+ except (OSError, IOError) as error:
+ msg.error(str(path), 'reading error:', str(error))
return None
- except yaml.YAMLError as e:
- msg.error(str(path), "is malformed:", e)
+ except yaml.YAMLError as error:
+ msg.error(str(path), "is malformed:", str(error))
return None
return config
@@ -516,7 +519,7 @@ def run():
global DEBUG_FLAG, TRACE_FLAG, BASE_CONFIG
# read configuration file
- disabled = ['nginx_log', 'gunicorn_log']
+ disabled = ['nginx_log', 'gunicorn_log', 'apache_cache']
enabled = list()
default_run = True
configfile = CONFIG_DIR + "python.d.conf"
diff --git a/plugins.d/tc-qos-helper.sh b/plugins.d/tc-qos-helper.sh
index 074fece9..9153f22e 100755
--- a/plugins.d/tc-qos-helper.sh
+++ b/plugins.d/tc-qos-helper.sh
@@ -2,7 +2,7 @@
# netdata
# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
# GPL v3+
#
# This script is a helper to allow netdata collect tc data.
@@ -101,10 +101,11 @@ debug() {
# -----------------------------------------------------------------------------
-plugins_dir="${NETDATA_PLUGINS_DIR}"
-[ -z "$plugins_dir" ] && plugins_dir="$( dirname $PROGRAM_FILE )"
+[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
+[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
-config_dir=${NETDATA_CONFIG_DIR-/etc/netdata}
+plugins_dir="${NETDATA_PLUGINS_DIR}"
+config_dir="${NETDATA_CONFIG_DIR}"
tc="$(which tc 2>/dev/null || command -v tc 2>/dev/null)"
diff --git a/python.d/Makefile.am b/python.d/Makefile.am
index bfe28ff2..43f25cff 100644
--- a/python.d/Makefile.am
+++ b/python.d/Makefile.am
@@ -18,12 +18,14 @@ dist_python_DATA = \
bind_rndc.chart.py \
cpufreq.chart.py \
cpuidle.chart.py \
+ dns_query_time.chart.py \
dovecot.chart.py \
elasticsearch.chart.py \
example.chart.py \
exim.chart.py \
fail2ban.chart.py \
freeradius.chart.py \
+ go_expvar.chart.py \
haproxy.chart.py \
hddtemp.chart.py \
ipfs.chart.py \
@@ -38,8 +40,10 @@ dist_python_DATA = \
phpfpm.chart.py \
postfix.chart.py \
postgres.chart.py \
+ rabbitmq.chart.py \
redis.chart.py \
retroshare.chart.py \
+ samba.chart.py \
sensors.chart.py \
squid.chart.py \
smartd_log.chart.py \
diff --git a/python.d/Makefile.in b/python.d/Makefile.in
index 9b784668..33efd42d 100644
--- a/python.d/Makefile.in
+++ b/python.d/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11.3 from Makefile.am.
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
-# Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -17,6 +16,51 @@
VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -35,10 +79,10 @@ PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
-DIST_COMMON = $(dist_python_DATA) $(dist_python_SCRIPTS) \
- $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
- $(dist_pythonyaml3_DATA) $(srcdir)/Makefile.am \
- $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
+DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
+ $(srcdir)/Makefile.am $(dist_python_SCRIPTS) \
+ $(dist_python_DATA) $(dist_pythonmodules_DATA) \
+ $(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA)
subdir = python.d
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
@@ -86,13 +130,32 @@ am__installdirs = "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(pythondir)" \
"$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" \
"$(DESTDIR)$(pythonyaml3dir)"
SCRIPTS = $(dist_python_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
SOURCES =
DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
DATA = $(dist_python_DATA) $(dist_pythonmodules_DATA) \
$(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
@@ -251,12 +314,14 @@ dist_python_DATA = \
bind_rndc.chart.py \
cpufreq.chart.py \
cpuidle.chart.py \
+ dns_query_time.chart.py \
dovecot.chart.py \
elasticsearch.chart.py \
example.chart.py \
exim.chart.py \
fail2ban.chart.py \
freeradius.chart.py \
+ go_expvar.chart.py \
haproxy.chart.py \
hddtemp.chart.py \
ipfs.chart.py \
@@ -271,8 +336,10 @@ dist_python_DATA = \
phpfpm.chart.py \
postfix.chart.py \
postgres.chart.py \
+ rabbitmq.chart.py \
redis.chart.py \
retroshare.chart.py \
+ samba.chart.py \
sensors.chart.py \
squid.chart.py \
smartd_log.chart.py \
@@ -368,8 +435,11 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
$(am__aclocal_m4_deps):
install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS)
@$(NORMAL_INSTALL)
- test -z "$(pythondir)" || $(MKDIR_P) "$(DESTDIR)$(pythondir)"
@list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
@@ -400,8 +470,11 @@ uninstall-dist_pythonSCRIPTS:
dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
install-dist_pythonDATA: $(dist_python_DATA)
@$(NORMAL_INSTALL)
- test -z "$(pythondir)" || $(MKDIR_P) "$(DESTDIR)$(pythondir)"
@list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -418,8 +491,11 @@ uninstall-dist_pythonDATA:
dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA)
@$(NORMAL_INSTALL)
- test -z "$(pythonmodulesdir)" || $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)"
@list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -436,8 +512,11 @@ uninstall-dist_pythonmodulesDATA:
dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir)
install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA)
@$(NORMAL_INSTALL)
- test -z "$(pythonyaml2dir)" || $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)"
@list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -454,8 +533,11 @@ uninstall-dist_pythonyaml2DATA:
dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir)
install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA)
@$(NORMAL_INSTALL)
- test -z "$(pythonyaml3dir)" || $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)"
@list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -470,11 +552,11 @@ uninstall-dist_pythonyaml3DATA:
@list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir)
-tags: TAGS
-TAGS:
+tags TAGS:
+
+ctags CTAGS:
-ctags: CTAGS
-CTAGS:
+cscope cscopelist:
distdir: $(DISTFILES)
@@ -618,20 +700,20 @@ uninstall-am: uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
.MAKE: install-am install-strip
-.PHONY: all all-am check check-am clean clean-generic distclean \
- distclean-generic distdir dvi dvi-am html html-am info info-am \
- install install-am install-data install-data-am \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_pythonmodulesDATA install-dist_pythonyaml2DATA \
- install-dist_pythonyaml3DATA install-dvi install-dvi-am \
- install-exec install-exec-am install-html install-html-am \
- install-info install-info-am install-man install-pdf \
- install-pdf-am install-ps install-ps-am install-strip \
- installcheck installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am uninstall uninstall-am \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_pythonmodulesDATA \
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_pythonDATA \
+ install-dist_pythonSCRIPTS install-dist_pythonmodulesDATA \
+ install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
+ install-dvi install-dvi-am install-exec install-exec-am \
+ install-html install-html-am install-info install-info-am \
+ install-man install-pdf install-pdf-am install-ps \
+ install-ps-am install-strip installcheck installcheck-am \
+ installdirs maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
+ uninstall uninstall-am uninstall-dist_pythonDATA \
+ uninstall-dist_pythonSCRIPTS uninstall-dist_pythonmodulesDATA \
uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA
.in:
diff --git a/python.d/README.md b/python.d/README.md
index 7df6e3e8..c4504a7c 100644
--- a/python.d/README.md
+++ b/python.d/README.md
@@ -228,6 +228,16 @@ It produces one stacked chart per CPU, showing the percentage of time spent in
each state.
---
+# dns_query_time
+
+This module provides dns query time statistics.
+
+**Requirement:**
+* `python-dnspython` package
+
+It produces one aggregate chart or one chart per dns server, showing the query time.
+
+---
# dovecot
@@ -473,6 +483,44 @@ and restart/reload your FREERADIUS server.
---
+# go_expvar
+
+---
+
+The `go_expvar` module can monitor any Go application that exposes its metrics with the use of `expvar` package from the Go standard library.
+
+`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts. Please see the [wiki page](https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications) for more info.
+
+For the memory statistics, it produces the following charts:
+
+1. **Heap allocations** in kB
+ * alloc: size of objects allocated on the heap
+ * inuse: size of allocated heap spans
+
+2. **Stack allocations** in kB
+ * inuse: size of allocated stack spans
+
+3. **MSpan allocations** in kB
+ * inuse: size of allocated mspan structures
+
+4. **MCache allocations** in kB
+ * inuse: size of allocated mcache structures
+
+5. **Virtual memory** in kB
+ * sys: size of reserved virtual address space
+
+6. **Live objects**
+ * live: number of live objects in memory
+
+7. **GC pauses average** in ns
+ * avg: average duration of all GC stop-the-world pauses
+
+### configuration
+
+Please see the [wiki page](https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications#using-netdata-go_expvar-module) for detailed info about module configuration.
+
+---
+
# haproxy
Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
@@ -1198,6 +1246,60 @@ When no configuration file is found, module tries to connect to TCP/IP socket: `
---
+# rabbitmq
+
+Module monitor rabbitmq performance and health metrics.
+
+Following charts are drawn:
+
+1. **Queued Messages**
+ * ready
+ * unacknowledged
+
+2. **Message Rates**
+ * ack
+ * redelivered
+ * deliver
+ * publish
+
+3. **Global Counts**
+ * channels
+ * consumers
+ * connections
+ * queues
+ * exchanges
+
+4. **File Descriptors**
+ * used descriptors
+
+5. **Socket Descriptors**
+ * used descriptors
+
+6. **Erlang processes**
+ * used processes
+
+7. **Memory**
+ * free memory in megabytes
+
+8. **Disk Space**
+ * free disk space in gigabytes
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 15672
+ user : 'guest'
+ pass : 'guest'
+
+```
+
+When no configuration file is found, module tries to connect to: `localhost:15672`.
+
+---
+
# redis
Get INFO data from redis instance.
@@ -1241,6 +1343,68 @@ When no configuration file is found, module tries to connect to TCP/IP socket: `
---
+# samba
+
+Performance metrics of Samba file sharing.
+
+It produces the following charts:
+
+1. **Syscall R/Ws** in kilobytes/s
+ * sendfile
+ * recvfle
+
+2. **Smb2 R/Ws** in kilobytes/s
+ * readout
+ * writein
+ * readin
+ * writeout
+
+3. **Smb2 Create/Close** in operations/s
+ * create
+ * close
+
+4. **Smb2 Info** in operations/s
+ * getinfo
+ * setinfo
+
+5. **Smb2 Find** in operations/s
+ * find
+
+6. **Smb2 Notify** in operations/s
+ * notify
+
+7. **Smb2 Lesser Ops** as counters
+ * tcon
+ * negprot
+ * tdis
+ * cancel
+ * logoff
+ * flush
+ * lock
+ * keepalive
+ * break
+ * sessetup
+
+### configuration
+
+Requires that smbd has been compiled with profiling enabled. Also required
+that `smbd` was started either with the `-P 1` option or inside `smb.conf`
+using `smbd profiling level`.
+
+This plugin uses `smbstatus -P` which can only be executed by root. It uses
+sudo and assumes that it is configured such that the `netdata` user can
+execute smbstatus as root without password.
+
+For example:
+
+ netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
+
+```yaml
+update_every : 5 # update frequency
+```
+
+---
+
# sensors
System sensors information.
@@ -1251,6 +1415,12 @@ Charts are created dynamically.
For detailed configuration information please read [`sensors.conf`](https://github.com/firehol/netdata/blob/master/conf.d/python.d/sensors.conf) file.
+### possible issues
+
+There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed.
+We are tracking such cases in issue [#827](https://github.com/firehol/netdata/issues/827).
+Please join this discussion for help.
+
---
# squid
diff --git a/python.d/apache.chart.py b/python.d/apache.chart.py
index 2e4d16dd..71fe0300 100644
--- a/python.d/apache.chart.py
+++ b/python.d/apache.chart.py
@@ -22,7 +22,8 @@ ORDER = ['requests', 'connections', 'conns_async', 'net', 'workers', 'reqpersec'
CHARTS = {
'bytesperreq': {
- 'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request', 'statistics', 'apache.bytesperreq', 'area'],
+ 'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request',
+ 'statistics', 'apache.bytesperreq', 'area'],
'lines': [
["size_req"]
]},
@@ -30,15 +31,19 @@ CHARTS = {
'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
'lines': [
["idle"],
- ["busy"]
+ ["idle_servers", 'idle'],
+ ["busy"],
+ ["busy_servers", 'busy']
]},
'reqpersec': {
- 'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics', 'apache.reqpersec', 'area'],
+ 'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics',
+ 'apache.reqpersec', 'area'],
'lines': [
["requests_sec"]
]},
'bytespersec': {
- 'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobytes/s', 'statistics', 'apache.bytesperreq', 'area'],
+ 'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobytes/s', 'statistics',
+ 'apache.bytesperreq', 'area'],
'lines': [
["size_sec", None, 'absolute', 1, 1000]
]},
@@ -66,43 +71,54 @@ CHARTS = {
]}
}
+ASSIGNMENT = {"BytesPerReq": 'size_req',
+ "IdleWorkers": 'idle',
+ "IdleServers": 'idle_servers',
+ "BusyWorkers": 'busy',
+ "BusyServers": 'busy_servers',
+ "ReqPerSec": 'requests_sec',
+ "BytesPerSec": 'size_sec',
+ "Total Accesses": 'requests',
+ "Total kBytes": 'sent',
+ "ConnsTotal": 'connections',
+ "ConnsAsyncKeepAlive": 'keepalive',
+ "ConnsAsyncClosing": 'closing',
+ "ConnsAsyncWriting": 'writing'}
+
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- if len(self.url) == 0:
- self.url = "http://localhost/server-status?auto"
self.order = ORDER
self.definitions = CHARTS
- self.assignment = {"BytesPerReq": 'size_req',
- "IdleWorkers": 'idle',
- "BusyWorkers": 'busy',
- "ReqPerSec": 'requests_sec',
- "BytesPerSec": 'size_sec',
- "Total Accesses": 'requests',
- "Total kBytes": 'sent',
- "ConnsTotal": 'connections',
- "ConnsAsyncKeepAlive": 'keepalive',
- "ConnsAsyncClosing": 'closing',
- "ConnsAsyncWriting": 'writing'}
+ self.url = self.configuration.get('url', 'http://localhost/server-status?auto')
+
+ def check(self):
+ if UrlService.check(self):
+ if 'idle_servers' in self._data_from_check:
+ self.__module__ = 'lighttpd'
+ for chart in self.definitions:
+ opts = self.definitions[chart]['options']
+ opts[1] = opts[1].replace('apache', 'lighttpd')
+ opts[4] = opts[4].replace('apache', 'lighttpd')
+ return True
+ return False
def _get_data(self):
"""
Format data received from http request
:return: dict
"""
- try:
- raw = self._get_raw_data().split("\n")
- except AttributeError:
+ raw_data = self._get_raw_data()
+ if not raw_data:
return None
- data = {}
- for row in raw:
+ data = dict()
+
+ for row in raw_data.split('\n'):
tmp = row.split(":")
- if str(tmp[0]) in self.assignment:
+ if tmp[0] in ASSIGNMENT:
try:
- data[self.assignment[tmp[0]]] = int(float(tmp[1]))
+ data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1]))
except (IndexError, ValueError):
- pass
- if len(data) == 0:
- return None
- return data
+ continue
+ return data or None
diff --git a/python.d/bind_rndc.chart.py b/python.d/bind_rndc.chart.py
index a4d75370..5a974928 100644
--- a/python.d/bind_rndc.chart.py
+++ b/python.d/bind_rndc.chart.py
@@ -2,100 +2,141 @@
# Description: bind rndc netdata python.d module
# Author: l2isbad
-from base import SimpleService
-from re import compile, findall
-from os.path import getsize, split
-from os import access as is_accessible, R_OK
+from os.path import getsize
+from os import access, R_OK
from subprocess import Popen
+from collections import defaultdict
+from base import SimpleService
priority = 60000
retries = 60
update_every = 30
-NMS = ['requests', 'responses', 'success', 'auth_answer', 'nonauth_answer', 'nxrrset', 'failure',
- 'nxdomain', 'recursion', 'duplicate', 'rejections']
-QUERIES = ['RESERVED0', 'A', 'NS', 'CNAME', 'SOA', 'PTR', 'MX', 'TXT', 'X25', 'AAAA', 'SRV', 'NAPTR',
- 'A6', 'DS', 'RRSIG', 'DNSKEY', 'SPF', 'ANY', 'DLV']
+ORDER = ['name_server_statistics', 'incoming_queries', 'outgoing_queries', 'named_stats_size']
+
+CHARTS = {
+ 'name_server_statistics': {
+ 'options': [None, 'Name Server Statistics', 'stats', 'name server statistics',
+ 'bind_rndc.name_server_statistics', 'line'],
+ 'lines': [
+ ['nms_requests', 'requests', 'incremental'],
+ ['nms_rejected_queries', 'rejected_queries', 'incremental'],
+ ['nms_success', 'success', 'incremental'],
+ ['nms_failure', 'failure', 'incremental'],
+ ['nms_responses', 'responses', 'incremental'],
+ ['nms_duplicate', 'duplicate', 'incremental'],
+ ['nms_recursion', 'recursion', 'incremental'],
+ ['nms_nxrrset', 'nxrrset', 'incremental'],
+ ['nms_nxdomain', 'nxdomain', 'incremental'],
+ ['nms_non_auth_answer', 'non_auth_answer', 'incremental'],
+ ['nms_auth_answer', 'auth_answer', 'incremental'],
+ ['nms_dropped_queries', 'dropped_queries', 'incremental'],
+ ]},
+ 'incoming_queries': {
+ 'options': [None, 'Incoming Queries', 'queries', 'incoming queries',
+ 'bind_rndc.incoming_queries', 'line'],
+ 'lines': [
+ ]},
+ 'outgoing_queries': {
+ 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries',
+ 'bind_rndc.outgoing_queries', 'line'],
+ 'lines': [
+ ]},
+ 'named_stats_size': {
+ 'options': [None, 'Named Stats File Size', 'MB', 'file size',
+ 'bind_rndc.stats_size', 'line'],
+ 'lines': [
+ ['stats_size', None, 'absolute', 1, 1 << 20]
+ ]}
+}
+
+NMS = {
+ 'nms_requests':
+ ['IPv4 requests received',
+ 'IPv6 requests received',
+ 'TCP requests received',
+ 'requests with EDNS(0) receive'],
+ 'nms_responses':
+ ['responses sent',
+ 'truncated responses sent',
+ 'responses with EDNS(0) sent',
+ 'requests with unsupported EDNS version received'],
+ 'nms_failure':
+ ['other query failures',
+ 'queries resulted in SERVFAIL'],
+ 'nms_auth_answer':
+ ['queries resulted in authoritative answer'],
+ 'nms_non_auth_answer':
+ ['queries resulted in non authoritative answer'],
+ 'nms_nxrrset':
+ ['queries resulted in nxrrset'],
+ 'nms_success':
+ ['queries resulted in successful answer'],
+ 'nms_nxdomain':
+ ['queries resulted in NXDOMAIN'],
+ 'nms_recursion':
+ ['queries caused recursion'],
+ 'nms_duplicate':
+ ['duplicate queries received'],
+ 'nms_rejected_queries':
+ ['auth queries rejected',
+ 'recursive queries rejected'],
+ 'nms_dropped_queries':
+ ['queries dropped']
+}
+
+STATS = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
- self.regex_values = compile(r'([0-9]+) ([^\n]+)')
- # self.options = ['Incoming Requests', 'Incoming Queries', 'Outgoing Queries',
- # 'Name Server Statistics', 'Zone Maintenance Statistics', 'Resolver Statistics',
- # 'Cache DB RRsets', 'Socket I/O Statistics']
- self.options = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
- self.regex_options = [r'(%s(?= \+\+)) \+\+([^\+]+)' % option for option in self.options]
self.rndc = self.find_binary('rndc')
+ self.data = dict(nms_requests=0, nms_responses=0, nms_failure=0, nms_auth=0,
+ nms_non_auth=0, nms_nxrrset=0, nms_success=0, nms_nxdomain=0,
+ nms_recursion=0, nms_duplicate=0, nms_rejected_queries=0,
+ nms_dropped_queries=0)
def check(self):
- # We cant start without 'rndc' command
if not self.rndc:
self.error('Can\'t locate \'rndc\' binary or binary is not executable by netdata')
return False
- # We cant start if stats file is not exist or not readable by netdata user
- if not is_accessible(self.named_stats_path, R_OK):
+ if not access(self.named_stats_path, R_OK):
self.error('Cannot access file %s' % self.named_stats_path)
return False
- size_before = getsize(self.named_stats_path)
run_rndc = Popen([self.rndc, 'stats'], shell=False)
run_rndc.wait()
- size_after = getsize(self.named_stats_path)
- # We cant start if netdata user has no permissions to run 'rndc stats'
if not run_rndc.returncode:
- # 'rndc' was found, stats file is exist and readable and we can run 'rndc stats'. Lets go!
- self.create_charts()
-
- # BIND APPEND dump on every run 'rndc stats'
- # that is why stats file size can be VERY large if update_interval too small
- dump_size_24hr = round(86400 / self.update_every * (int(size_after) - int(size_before)) / 1048576, 3)
-
- # If update_every too small we should WARN user
- if self.update_every < 30:
- self.info('Update_every %s is NOT recommended for use. Increase the value to > 30' % self.update_every)
-
- self.info('With current update_interval it will be + %s MB every 24hr. '
- 'Don\'t forget to create logrotate conf file for %s' % (dump_size_24hr, self.named_stats_path))
-
- self.info('Plugin was started successfully.')
-
return True
- else:
- self.error('Not enough permissions to run "%s stats"' % self.rndc)
- return False
+ self.error('Not enough permissions to run "%s stats"' % self.rndc)
+ return False
def _get_raw_data(self):
"""
Run 'rndc stats' and read last dump from named.stats
- :return: tuple(
- file.read() obj,
- named.stats file size
- )
+ :return: dict
"""
+ result = dict()
try:
current_size = getsize(self.named_stats_path)
- except OSError:
- return None, None
-
- run_rndc = Popen([self.rndc, 'stats'], shell=False)
- run_rndc.wait()
-
- if run_rndc.returncode:
- return None, None
-
- try:
- with open(self.named_stats_path) as bind_rndc:
- bind_rndc.seek(current_size)
- result = bind_rndc.read()
- except OSError:
- return None, None
- else:
- return result, current_size
+ run_rndc = Popen([self.rndc, 'stats'], shell=False)
+ run_rndc.wait()
+
+ if run_rndc.returncode:
+ return None
+ with open(self.named_stats_path) as named_stats:
+ named_stats.seek(current_size)
+ result['stats'] = named_stats.readlines()
+ result['size'] = current_size
+ return result
+ except (OSError, IOError):
+ return None
def _get_data(self):
"""
@@ -103,72 +144,98 @@ class Service(SimpleService):
:return: dict
"""
- raw_data, size = self._get_raw_data()
+ raw_data = self._get_raw_data()
if raw_data is None:
return None
-
- rndc_stats = dict()
-
- # Result: dict.
- # topic = Cache DB RRsets; body = A 178303 NS 86790 ... ; desc = A; value = 178303
- # {'Cache DB RRsets': [('A', 178303), ('NS', 286790), ...],
- # {Incoming Queries': [('RESERVED0', 8), ('A', 4557317680), ...],
- # ......
- for regex in self.regex_options:
- rndc_stats.update(dict([(topic, [(desc, int(value)) for value, desc in self.regex_values.findall(body)])
- for topic, body in findall(regex, raw_data)]))
-
- nms = dict(rndc_stats.get('Name Server Statistics', []))
-
- inc_queries = dict([('i' + k, 0) for k in QUERIES])
- inc_queries.update(dict([('i' + k, v) for k, v in rndc_stats.get('Incoming Queries', [])]))
- out_queries = dict([('o' + k, 0) for k in QUERIES])
- out_queries.update(dict([('o' + k, v) for k, v in rndc_stats.get('Outgoing Queries', [])]))
-
- to_netdata = dict()
- to_netdata['requests'] = sum([v for k, v in nms.items() if 'request' in k and 'received' in k])
- to_netdata['responses'] = sum([v for k, v in nms.items() if 'responses' in k and 'sent' in k])
- to_netdata['success'] = nms.get('queries resulted in successful answer', 0)
- to_netdata['auth_answer'] = nms.get('queries resulted in authoritative answer', 0)
- to_netdata['nonauth_answer'] = nms.get('queries resulted in non authoritative answer', 0)
- to_netdata['nxrrset'] = nms.get('queries resulted in nxrrset', 0)
- to_netdata['failure'] = sum([nms.get('queries resulted in SERVFAIL', 0), nms.get('other query failures', 0)])
- to_netdata['nxdomain'] = nms.get('queries resulted in NXDOMAIN', 0)
- to_netdata['recursion'] = nms.get('queries caused recursion', 0)
- to_netdata['duplicate'] = nms.get('duplicate queries received', 0)
- to_netdata['rejections'] = nms.get('recursive queries rejected', 0)
- to_netdata['stats_size'] = size
-
- to_netdata.update(inc_queries)
- to_netdata.update(out_queries)
- return to_netdata
-
- def create_charts(self):
- self.order = ['stats_size', 'bind_stats', 'incoming_q', 'outgoing_q']
- self.definitions = {
- 'bind_stats': {
- 'options': [None, 'Name Server Statistics', 'stats', 'name server statistics', 'bind_rndc.stats', 'line'],
- 'lines': [
- ]},
- 'incoming_q': {
- 'options': [None, 'Incoming queries', 'queries','incoming queries', 'bind_rndc.incq', 'line'],
- 'lines': [
- ]},
- 'outgoing_q': {
- 'options': [None, 'Outgoing queries', 'queries','outgoing queries', 'bind_rndc.outq', 'line'],
- 'lines': [
- ]},
- 'stats_size': {
- 'options': [None, '%s file size' % split(self.named_stats_path)[1].capitalize(), 'megabytes',
- '%s size' % split(self.named_stats_path)[1], 'bind_rndc.size', 'line'],
- 'lines': [
- ["stats_size", None, "absolute", 1, 1048576]
- ]}
- }
- for elem in QUERIES:
- self.definitions['incoming_q']['lines'].append(['i' + elem, elem, 'incremental'])
- self.definitions['outgoing_q']['lines'].append(['o' + elem, elem, 'incremental'])
-
- for elem in NMS:
- self.definitions['bind_stats']['lines'].append([elem, None, 'incremental'])
+ parsed = dict()
+ for stat in STATS:
+ parsed[stat] = parse_stats(field=stat,
+ named_stats=raw_data['stats'])
+
+ self.data.update(nms_mapper(data=parsed['Name Server Statistics']))
+
+ for elem in zip(['Incoming Queries', 'Outgoing Queries'], ['incoming_queries', 'outgoing_queries']):
+ parsed_key, chart_name = elem[0], elem[1]
+ for dimension_id, value in queries_mapper(data=parsed[parsed_key],
+ add=chart_name[:9]).items():
+ if dimension_id not in self.data:
+ dimension = dimension_id.replace(chart_name[:9], '')
+ self._add_new_dimension(dimension_id=dimension_id,
+ dimension=dimension,
+ chart_name=chart_name,
+ priority=self.priority + self.order.index(chart_name))
+ self.data[dimension_id] = value
+
+ self.data['stats_size'] = raw_data['size']
+ return self.data
+
+
+def parse_stats(field, named_stats):
+ """
+ :param field: str:
+ :param named_stats: list:
+ :return: dict
+
+ Example:
+ filed: 'Incoming Queries'
+ names_stats (list of lines):
+ ++ Incoming Requests ++
+ 1405660 QUERY
+ 3 NOTIFY
+ ++ Incoming Queries ++
+ 1214961 A
+ 75 NS
+ 2 CNAME
+ 2897 SOA
+ 35544 PTR
+ 14 MX
+ 5822 TXT
+ 145974 AAAA
+ 371 SRV
+ ++ Outgoing Queries ++
+ ...
+
+ result:
+ {'A', 1214961, 'NS': 75, 'CNAME': 2, 'SOA': 2897, ...}
+ """
+ data = dict()
+ ns = iter(named_stats)
+ for line in ns:
+ if field not in line:
+ continue
+ while True:
+ try:
+ line = next(ns)
+ except StopIteration:
+ break
+ if '++' not in line:
+ if '[' in line:
+ continue
+ v, k = line.strip().split(' ', 1)
+ data[k] = int(v)
+ continue
+ break
+ break
+ return data
+
+
+def nms_mapper(data):
+ """
+ :param data: dict
+ :return: dict(defaultdict)
+ """
+ result = defaultdict(int)
+ for k, v in NMS.items():
+ for elem in v:
+ result[k] += data.get(elem, 0)
+ return result
+
+
+def queries_mapper(data, add):
+ """
+ :param data: dict
+ :param add: str
+ :return: dict
+ """
+ return dict([(add + k, v) for k, v in data.items()])
diff --git a/python.d/cpufreq.chart.py b/python.d/cpufreq.chart.py
index e28bdea8..d5544b7b 100644
--- a/python.d/cpufreq.chart.py
+++ b/python.d/cpufreq.chart.py
@@ -100,7 +100,7 @@ class Service(SimpleService):
self.error("couldn't find a method to read cpufreq statistics")
return False
- for name in self.assignment.keys():
+ for name in sorted(self.assignment, key=lambda v: int(v[3:])):
self.definitions[ORDER[0]]['lines'].append([name, name, 'absolute', 1, 1000])
return True
diff --git a/python.d/dns_query_time.chart.py b/python.d/dns_query_time.chart.py
new file mode 100644
index 00000000..9053d9a1
--- /dev/null
+++ b/python.d/dns_query_time.chart.py
@@ -0,0 +1,135 @@
+# -*- coding: utf-8 -*-
+# Description: dns_query_time netdata python.d module
+# Author: l2isbad
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+try:
+ import dns.message, dns.query, dns.name
+ DNS_PYTHON = True
+except ImportError:
+ DNS_PYTHON = False
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+from random import choice
+from threading import Thread
+from socket import gethostbyname, gaierror
+from base import SimpleService
+
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+priority = 60000
+retries = 60
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.timeout = self.configuration.get('response_timeout', 4)
+ self.aggregate = self.configuration.get('aggregate', True)
+ self.domains = self.configuration.get('domains')
+ self.server_list = self.configuration.get('dns_servers')
+
+ def check(self):
+ if not DNS_PYTHON:
+ self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py')
+ return False
+
+ self.timeout = self.timeout if isinstance(self.timeout, int) else 4
+ self.update_every = self.timeout + 1 if self.update_every <= self.timeout else self.update_every
+
+ if not all([self.domains, self.server_list,
+ isinstance(self.server_list, str), isinstance(self.domains, str)]):
+ self.error('server_list and domain_list can\'t be empty')
+ return False
+ else:
+ self.domains, self.server_list = self.domains.split(), self.server_list.split()
+
+ for ns in self.server_list:
+ if not check_ns(ns):
+ self.info('Bad NS: %s' % ns)
+ self.server_list.remove(ns)
+ if not self.server_list:
+ return False
+
+ data = self._get_data(timeout=1)
+
+ down_servers = [s for s in data if data[s] == -100]
+ for down in down_servers:
+ down = down[3:].replace('_', '.')
+ self.info('Removed due to non response %s' % down)
+ self.server_list.remove(down)
+ if not self.server_list:
+ return False
+
+ self._data_from_check = data
+ self.order, self.definitions = create_charts(aggregate=self.aggregate, server_list=self.server_list)
+ self.info(str({'domains': len(self.domains), 'servers': self.server_list}))
+ return True
+
+ def _get_data(self, timeout=None):
+ return dns_request(self.server_list, timeout or self.timeout, self.domains)
+
+
+def dns_request(server_list, timeout, domains):
+ threads = list()
+ que = Queue()
+ result = dict()
+
+ def dns_req(ns, t, q):
+ domain = dns.name.from_text(choice(domains))
+ request = dns.message.make_query(domain, dns.rdatatype.A)
+
+ try:
+ dns_start = time()
+ dns.query.udp(request, ns, timeout=t)
+ dns_end = time()
+ query_time = round((dns_end - dns_start) * 1000)
+ q.put({'_'.join(['ns', ns.replace('.', '_')]): query_time})
+ except dns.exception.Timeout:
+ q.put({'_'.join(['ns', ns.replace('.', '_')]): -100})
+
+ for server in server_list:
+ th = Thread(target=dns_req, args=(server, timeout, que))
+ th.start()
+ threads.append(th)
+
+ for th in threads:
+ th.join()
+ result.update(que.get())
+
+ return result
+
+
+def check_ns(ns):
+ try:
+ return gethostbyname(ns)
+ except gaierror:
+ return False
+
+
+def create_charts(aggregate, server_list):
+ if aggregate:
+ order = ['dns_group']
+ definitions = {'dns_group': {'options': [None, 'DNS Response Time', 'ms', 'name servers',
+ 'dns_query_time.response_time', 'line'], 'lines': []}}
+ for ns in server_list:
+ definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'])
+
+ return order, definitions
+ else:
+ order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
+ definitions = dict()
+ for ns in server_list:
+ definitions[''.join(['dns_', ns.replace('.', '_')])] = {'options': [None, 'DNS Response Time', 'ms', ns,
+ 'dns_query_time.response_time', 'area'],
+ 'lines': [['_'.join(['ns', ns.replace('.', '_')]),
+ ns, 'absolute']]}
+ return order, definitions
diff --git a/python.d/elasticsearch.chart.py b/python.d/elasticsearch.chart.py
index 430227f6..9ec08719 100644
--- a/python.d/elasticsearch.chart.py
+++ b/python.d/elasticsearch.chart.py
@@ -85,6 +85,21 @@ HEALTH_STATS = [
('active_shards_percent_as_number', 'health_active_shards_percent_as_number', None)
]
+LATENCY = {
+ 'query_latency':
+ {'total': 'query_total',
+ 'spent_time': 'query_time_in_millis'},
+ 'fetch_latency':
+ {'total': 'fetch_total',
+ 'spent_time': 'fetch_time_in_millis'},
+ 'indexing_latency':
+ {'total': 'indexing_index_total',
+ 'spent_time': 'indexing_index_time_in_millis'},
+ 'flushing_latency':
+ {'total': 'flush_total',
+ 'spent_time': 'flush_total_time_in_millis'}
+}
+
# charts order (can be overridden if you want less charts, or different order)
ORDER = ['search_perf_total', 'search_perf_current', 'search_perf_time', 'search_latency', 'index_perf_total',
'index_perf_current', 'index_perf_time', 'index_latency', 'jvm_mem_heap', 'jvm_gc_count',
@@ -95,34 +110,34 @@ ORDER = ['search_perf_total', 'search_perf_current', 'search_perf_time', 'search
CHARTS = {
'search_perf_total': {
- 'options': [None, 'Total number of queries, fetches', 'number of', 'search performance',
+ 'options': [None, 'Queries And Fetches', 'number of', 'search performance',
'es.search_query_total', 'stacked'],
'lines': [
['query_total', 'queries', 'incremental'],
['fetch_total', 'fetches', 'incremental']
]},
'search_perf_current': {
- 'options': [None, 'Number of queries, fetches in progress', 'number of', 'search performance',
+ 'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance',
'es.search_query_current', 'stacked'],
'lines': [
['query_current', 'queries', 'absolute'],
['fetch_current', 'fetches', 'absolute']
]},
'search_perf_time': {
- 'options': [None, 'Time spent on queries, fetches', 'seconds', 'search performance',
+ 'options': [None, 'Time Spent On Queries And Fetches', 'seconds', 'search performance',
'es.search_time', 'stacked'],
'lines': [
['query_time_in_millis', 'query', 'incremental', 1, 1000],
['fetch_time_in_millis', 'fetch', 'incremental', 1, 1000]
]},
'search_latency': {
- 'options': [None, 'Query and fetch latency', 'ms', 'search performance', 'es.search_latency', 'stacked'],
+ 'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'es.search_latency', 'stacked'],
'lines': [
['query_latency', 'query', 'absolute', 1, 1000],
['fetch_latency', 'fetch', 'absolute', 1, 1000]
]},
'index_perf_total': {
- 'options': [None, 'Total number of documents indexed, index refreshes, index flushes to disk', 'number of',
+ 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of',
'indexing performance', 'es.index_performance_total', 'stacked'],
'lines': [
['indexing_index_total', 'indexed', 'incremental'],
@@ -130,13 +145,13 @@ CHARTS = {
['flush_total', 'flushes', 'incremental']
]},
'index_perf_current': {
- 'options': [None, 'Number of documents currently being indexed', 'currently indexed',
+ 'options': [None, 'Number Of Documents Currently Being Indexed', 'currently indexed',
'indexing performance', 'es.index_performance_current', 'stacked'],
'lines': [
['indexing_index_current', 'documents', 'absolute']
]},
'index_perf_time': {
- 'options': [None, 'Time spent on indexing, refreshing, flushing', 'seconds', 'indexing performance',
+ 'options': [None, 'Time Spent On Indexing, Refreshing, Flushing', 'seconds', 'indexing performance',
'es.search_time', 'stacked'],
'lines': [
['indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000],
@@ -144,33 +159,33 @@ CHARTS = {
['flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000]
]},
'index_latency': {
- 'options': [None, 'Indexing and flushing latency', 'ms', 'indexing performance',
+ 'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance',
'es.index_latency', 'stacked'],
'lines': [
['indexing_latency', 'indexing', 'absolute', 1, 1000],
['flushing_latency', 'flushing', 'absolute', 1, 1000]
]},
'jvm_mem_heap': {
- 'options': [None, 'JVM heap currently in use/committed', 'percent/MB', 'memory usage and gc',
+ 'options': [None, 'JVM Heap Currently in Use/Committed', 'percent/MB', 'memory usage and gc',
'es.jvm_heap', 'area'],
'lines': [
['jvm_heap_percent', 'inuse', 'absolute'],
['jvm_heap_commit', 'commit', 'absolute', -1, 1048576]
]},
'jvm_gc_count': {
- 'options': [None, 'Count of garbage collections', 'counts', 'memory usage and gc', 'es.gc_count', 'stacked'],
+ 'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'es.gc_count', 'stacked'],
'lines': [
['young_collection_count', 'young', 'incremental'],
['old_collection_count', 'old', 'incremental']
]},
'jvm_gc_time': {
- 'options': [None, 'Time spent on garbage collections', 'ms', 'memory usage and gc', 'es.gc_time', 'stacked'],
+ 'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc', 'es.gc_time', 'stacked'],
'lines': [
['young_collection_time_in_millis', 'young', 'incremental'],
['old_collection_time_in_millis', 'old', 'incremental']
]},
'thread_pool_qr_q': {
- 'options': [None, 'Number of queued threads in thread pool', 'queued threads', 'queues and rejections',
+ 'options': [None, 'Number Of Queued Threads In Thread Pool', 'queued threads', 'queues and rejections',
'es.thread_pool_queued', 'stacked'],
'lines': [
['bulk_queue', 'bulk', 'absolute'],
@@ -179,7 +194,7 @@ CHARTS = {
['merge_queue', 'merge', 'absolute']
]},
'thread_pool_qr_r': {
- 'options': [None, 'Number of rejected threads in thread pool', 'rejected threads', 'queues and rejections',
+ 'options': [None, 'Rejected Threads In Thread Pool', 'rejected threads', 'queues and rejections',
'es.thread_pool_rejected', 'stacked'],
'lines': [
['bulk_rejected', 'bulk', 'absolute'],
@@ -188,19 +203,19 @@ CHARTS = {
['merge_rejected', 'merge', 'absolute']
]},
'fdata_cache': {
- 'options': [None, 'Fielddata cache size', 'MB', 'fielddata cache', 'es.fdata_cache', 'line'],
+ 'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'es.fdata_cache', 'line'],
'lines': [
['index_fdata_memory', 'cache', 'absolute', 1, 1048576]
]},
'fdata_ev_tr': {
- 'options': [None, 'Fielddata evictions and circuit breaker tripped count', 'number of events',
+ 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events',
'fielddata cache', 'es.evictions_tripped', 'line'],
'lines': [
['evictions', None, 'incremental'],
['tripped', None, 'incremental']
]},
'cluster_health_nodes': {
- 'options': [None, 'Nodes and tasks statistics', 'units', 'cluster health API',
+ 'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API',
'es.cluster_health_nodes', 'stacked'],
'lines': [
['health_number_of_nodes', 'nodes', 'absolute'],
@@ -209,7 +224,7 @@ CHARTS = {
['health_number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
]},
'cluster_health_status': {
- 'options': [None, 'Cluster status', 'status', 'cluster health API',
+ 'options': [None, 'Cluster Status', 'status', 'cluster health API',
'es.cluster_health_status', 'area'],
'lines': [
['status_green', 'green', 'absolute'],
@@ -220,7 +235,7 @@ CHARTS = {
['status_yellow', 'yellow', 'absolute']
]},
'cluster_health_shards': {
- 'options': [None, 'Shards statistics', 'shards', 'cluster health API',
+ 'options': [None, 'Shards Statistics', 'shards', 'cluster health API',
'es.cluster_health_shards', 'stacked'],
'lines': [
['health_active_shards', 'active_shards', 'absolute'],
@@ -231,7 +246,7 @@ CHARTS = {
['health_active_shards_percent_as_number', 'active_percent', 'absolute']
]},
'cluster_stats_nodes': {
- 'options': [None, 'Nodes statistics', 'nodes', 'cluster stats API',
+ 'options': [None, 'Nodes Statistics', 'nodes', 'cluster stats API',
'es.cluster_nodes', 'stacked'],
'lines': [
['count_data_only', 'data_only', 'absolute'],
@@ -241,46 +256,46 @@ CHARTS = {
['count_client', 'client', 'absolute']
]},
'cluster_stats_query_cache': {
- 'options': [None, 'Query cache statistics', 'queries', 'cluster stats API',
+ 'options': [None, 'Query Cache Statistics', 'queries', 'cluster stats API',
'es.cluster_query_cache', 'stacked'],
'lines': [
['query_cache_hit_count', 'hit', 'incremental'],
['query_cache_miss_count', 'miss', 'incremental']
]},
'cluster_stats_docs': {
- 'options': [None, 'Docs statistics', 'count', 'cluster stats API',
+ 'options': [None, 'Docs Statistics', 'count', 'cluster stats API',
'es.cluster_docs', 'line'],
'lines': [
['docs_count', 'docs', 'absolute']
]},
'cluster_stats_store': {
- 'options': [None, 'Store statistics', 'MB', 'cluster stats API',
+ 'options': [None, 'Store Statistics', 'MB', 'cluster stats API',
'es.cluster_store', 'line'],
'lines': [
['store_size_in_bytes', 'size', 'absolute', 1, 1048567]
]},
'cluster_stats_indices_shards': {
- 'options': [None, 'Indices and shards statistics', 'count', 'cluster stats API',
+ 'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API',
'es.cluster_indices_shards', 'stacked'],
'lines': [
['indices_count', 'indices', 'absolute'],
['shards_total', 'shards', 'absolute']
]},
'host_metrics_transport': {
- 'options': [None, 'Cluster communication transport metrics', 'kbit/s', 'host metrics',
+ 'options': [None, 'Cluster Communication Transport Metrics', 'kilobit/s', 'host metrics',
'es.host_transport', 'area'],
'lines': [
['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000],
['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000]
]},
'host_metrics_file_descriptors': {
- 'options': [None, 'Available file descriptors in percent', 'percent', 'host metrics',
+ 'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics',
'es.host_descriptors', 'area'],
'lines': [
['file_descriptors_used', 'used', 'absolute', 1, 10]
]},
'host_metrics_http': {
- 'options': [None, 'Opened HTTP connections', 'connections', 'host metrics',
+ 'options': [None, 'Opened HTTP Connections', 'connections', 'host metrics',
'es.host_http_connections', 'line'],
'lines': [
['http_current_open', 'opened', 'absolute', 1, 1]
@@ -300,12 +315,13 @@ class Service(UrlService):
self.methods = list()
def check(self):
- # We can't start if <host> AND <port> not specified
- if not all([self.host, self.port, isinstance(self.host, str), isinstance(self.port, (str, int))]):
+ if not all([self.host,
+ self.port,
+ isinstance(self.host, str),
+ isinstance(self.port, (str, int))]):
self.error('Host is not defined in the module configuration file')
return False
- # It as a bad idea to use hostname.
# Hostname -> ip address
try:
self.host = gethostbyname(self.host)
@@ -313,45 +329,33 @@ class Service(UrlService):
self.error(str(error))
return False
- scheme = 'http' if self.scheme else 'https'
+ scheme = 'http' if self.scheme == 'http' else 'https'
# Add handlers (auth, self signed cert accept)
self.url = '%s://%s:%s' % (scheme, self.host, self.port)
- self._UrlService__add_openers()
+ self.opener = self._build_opener()
# Create URL for every Elasticsearch API
url_node_stats = '%s://%s:%s/_nodes/_local/stats' % (scheme, self.host, self.port)
url_cluster_health = '%s://%s:%s/_cluster/health' % (scheme, self.host, self.port)
url_cluster_stats = '%s://%s:%s/_cluster/stats' % (scheme, self.host, self.port)
- # Create list of enabled API calls
user_choice = [bool(self.configuration.get('node_stats', True)),
bool(self.configuration.get('cluster_health', True)),
bool(self.configuration.get('cluster_stats', True))]
- avail_methods = [METHODS(get_data_function=self._get_node_stats_, url=url_node_stats),
- METHODS(get_data_function=self._get_cluster_health_, url=url_cluster_health),
- METHODS(get_data_function=self._get_cluster_stats_, url=url_cluster_stats)]
+ avail_methods = [METHODS(get_data_function=self._get_node_stats_,
+ url=url_node_stats),
+ METHODS(get_data_function=self._get_cluster_health_,
+ url=url_cluster_health),
+ METHODS(get_data_function=self._get_cluster_stats_,
+ url=url_cluster_stats)]
# Remove disabled API calls from 'avail methods'
self.methods = [avail_methods[e[0]] for e in enumerate(avail_methods) if user_choice[e[0]]]
-
- # Run _get_data for ALL active API calls.
- api_check_result = dict()
- data_from_check = dict()
- for method in self.methods:
- try:
- api_check_result[method.url] = method.get_data_function(None, method.url)
- data_from_check.update(api_check_result[method.url] or dict())
- except KeyError as error:
- self.error('Failed to parse %s. Error: %s' % (method.url, str(error)))
- return False
-
- # We can start ONLY if all active API calls returned NOT None
- if not all(api_check_result.values()):
- self.error('Plugin could not get data from all APIs')
+ data = self._get_data()
+ if not data:
return False
- else:
- self._data_from_check = data_from_check
- return True
+ self._data_from_check = data
+ return True
def _get_data(self):
threads = list()
@@ -359,7 +363,8 @@ class Service(UrlService):
result = dict()
for method in self.methods:
- th = Thread(target=method.get_data_function, args=(queue, method.url))
+ th = Thread(target=method.get_data_function,
+ args=(queue, method.url))
th.start()
threads.append(th)
@@ -378,18 +383,18 @@ class Service(UrlService):
raw_data = self._get_raw_data(url)
if not raw_data:
- return queue.put(dict()) if queue else None
- else:
- data = loads(raw_data)
+ return queue.put(dict())
- to_netdata = fetch_data_(raw_data=data, metrics_list=HEALTH_STATS)
+ data = loads(raw_data)
+ to_netdata = fetch_data_(raw_data=data,
+ metrics_list=HEALTH_STATS)
- to_netdata.update({'status_green': 0, 'status_red': 0, 'status_yellow': 0,
- 'status_foo1': 0, 'status_foo2': 0, 'status_foo3': 0})
- current_status = 'status_' + data['status']
- to_netdata[current_status] = 1
+ to_netdata.update({'status_green': 0, 'status_red': 0, 'status_yellow': 0,
+ 'status_foo1': 0, 'status_foo2': 0, 'status_foo3': 0})
+ current_status = 'status_' + data['status']
+ to_netdata[current_status] = 1
- return queue.put(to_netdata) if queue else to_netdata
+ return queue.put(to_netdata)
def _get_cluster_stats_(self, queue, url):
"""
@@ -400,13 +405,13 @@ class Service(UrlService):
raw_data = self._get_raw_data(url)
if not raw_data:
- return queue.put(dict()) if queue else None
- else:
- data = loads(raw_data)
+ return queue.put(dict())
- to_netdata = fetch_data_(raw_data=data, metrics_list=CLUSTER_STATS)
+ data = loads(raw_data)
+ to_netdata = fetch_data_(raw_data=data,
+ metrics_list=CLUSTER_STATS)
- return queue.put(to_netdata) if queue else to_netdata
+ return queue.put(to_netdata)
def _get_node_stats_(self, queue, url):
"""
@@ -417,47 +422,46 @@ class Service(UrlService):
raw_data = self._get_raw_data(url)
if not raw_data:
- return queue.put(dict()) if queue else None
- else:
- data = loads(raw_data)
-
- node = list(data['nodes'].keys())[0]
- to_netdata = fetch_data_(raw_data=data['nodes'][node], metrics_list=NODE_STATS)
+ return queue.put(dict())
- # Search performance latency
- to_netdata['query_latency'] = self.find_avg_(to_netdata['query_total'],
- to_netdata['query_time_in_millis'], 'query_latency')
- to_netdata['fetch_latency'] = self.find_avg_(to_netdata['fetch_total'],
- to_netdata['fetch_time_in_millis'], 'fetch_latency')
+ data = loads(raw_data)
- # Indexing performance latency
- to_netdata['indexing_latency'] = self.find_avg_(to_netdata['indexing_index_total'],
- to_netdata['indexing_index_time_in_millis'], 'index_latency')
- to_netdata['flushing_latency'] = self.find_avg_(to_netdata['flush_total'],
- to_netdata['flush_total_time_in_millis'], 'flush_latency')
+ node = list(data['nodes'].keys())[0]
+ to_netdata = fetch_data_(raw_data=data['nodes'][node],
+ metrics_list=NODE_STATS)
+ # Search, index, flush, fetch performance latency
+ for key in LATENCY:
+ try:
+ to_netdata[key] = self.find_avg_(total=to_netdata[LATENCY[key]['total']],
+ spent_time=to_netdata[LATENCY[key]['spent_time']],
+ key=key)
+ except KeyError:
+ continue
+ if 'open_file_descriptors' in to_netdata and 'max_file_descriptors' in to_netdata:
to_netdata['file_descriptors_used'] = round(float(to_netdata['open_file_descriptors'])
/ to_netdata['max_file_descriptors'] * 1000)
- return queue.put(to_netdata) if queue else to_netdata
+ return queue.put(to_netdata)
- def find_avg_(self, value1, value2, key):
+ def find_avg_(self, total, spent_time, key):
if key not in self.latency:
- self.latency.update({key: [value1, value2]})
+ self.latency[key] = dict(total=total,
+ spent_time=spent_time)
return 0
- else:
- if not self.latency[key][0] == value1:
- latency = round(float(value2 - self.latency[key][1]) / float(value1 - self.latency[key][0]) * 1000)
- self.latency.update({key: [value1, value2]})
- return latency
- else:
- self.latency.update({key: [value1, value2]})
- return 0
+ if self.latency[key]['total'] != total:
+ latency = float(spent_time - self.latency[key]['spent_time'])\
+ / float(total - self.latency[key]['total']) * 1000
+ self.latency[key]['total'] = total
+ self.latency[key]['spent_time'] = spent_time
+ return latency
+ self.latency[key]['spent_time'] = spent_time
+ return 0
def fetch_data_(raw_data, metrics_list):
to_netdata = dict()
- for metric, new_name, function in metrics_list:
+ for metric, new_name, func in metrics_list:
value = raw_data
for key in metric.split('.'):
try:
@@ -465,7 +469,7 @@ def fetch_data_(raw_data, metrics_list):
except KeyError:
break
if not isinstance(value, dict) and key:
- to_netdata[new_name or key] = value if not function else function(value)
+ to_netdata[new_name or key] = value if not func else func(value)
return to_netdata
diff --git a/python.d/fail2ban.chart.py b/python.d/fail2ban.chart.py
index c7d24e8c..35761e89 100644
--- a/python.d/fail2ban.chart.py
+++ b/python.d/fail2ban.chart.py
@@ -2,131 +2,210 @@
# Description: fail2ban log netdata python.d module
# Author: l2isbad
-from base import LogService
-from re import compile
-
-try:
- from itertools import filterfalse
-except ImportError:
- from itertools import ifilterfalse as filterfalse
+from re import compile as r_compile
from os import access as is_accessible, R_OK
-from os.path import isdir
+from os.path import isdir, getsize
from glob import glob
+import bisect
+from base import LogService
priority = 60000
retries = 60
-REGEX = compile(r'\[([A-Za-z-_]+)][^\[\]]*?(?<!# )enabled = true')
-ORDER = ['jails_group']
+REGEX_JAILS = r_compile(r'\[([A-Za-z-_0-9]+)][^\[\]]*?(?<!# )enabled = (?:(true|false))')
+REGEX_DATA = r_compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>(?:(U|B)))[a-z]+ (?P<ipaddr>\d{1,3}(?:\.\d{1,3}){3})')
+ORDER = ['jails_bans', 'jails_in_jail']
class Service(LogService):
+ """
+ fail2ban log class
+ Reads logs line by line
+ Jail auto detection included
+ It produces following charts:
+ * Bans per second for every jail
+ * Banned IPs for every jail (since the last restart of netdata)
+ """
def __init__(self, configuration=None, name=None):
LogService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
+ self.definitions = dict()
self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
- self.conf_dir = self.configuration.get('conf_dir', '')
- try:
- self.exclude = self.configuration['exclude'].split()
- except (KeyError, AttributeError):
- self.exclude = []
+ self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
+ self.exclude = self.configuration.get('exclude')
def _get_data(self):
"""
Parse new log lines
:return: dict
"""
- try:
- raw = self._get_raw_data()
- if raw is None:
- return None
- elif not raw:
- return self.data
- except (ValueError, AttributeError):
+ raw = self._get_raw_data()
+ if raw is None:
return None
+ elif not raw:
+ return self.to_netdata
# Fail2ban logs looks like
# 2016-12-25 12:36:04,711 fail2ban.actions[2455]: WARNING [ssh] Ban 178.156.32.231
- data = dict(
- zip(
- self.jails_list,
- [len(list(filterfalse(lambda line: (jail + '] Ban') not in line, raw))) for jail in self.jails_list]
- ))
-
- for jail in data:
- self.data[jail] += data[jail]
-
- return self.data
+ for row in raw:
+ match = REGEX_DATA.search(row)
+ if match:
+ match_dict = match.groupdict()
+ jail, action, ipaddr = match_dict['jail'], match_dict['action'], match_dict['ipaddr']
+ if jail in self.jails_list:
+ if action == 'B':
+ self.to_netdata[jail] += 1
+ if address_not_in_jail(self.banned_ips[jail], ipaddr, self.to_netdata[jail + '_in_jail']):
+ self.to_netdata[jail + '_in_jail'] += 1
+ else:
+ if ipaddr in self.banned_ips[jail]:
+ self.banned_ips[jail].remove(ipaddr)
+ self.to_netdata[jail + '_in_jail'] -= 1
+
+ return self.to_netdata
def check(self):
+ """
+ :return: bool
- # Check "log_path" is accessible.
- # If NOT STOP plugin
- if not is_accessible(self.log_path, R_OK):
- self.error('Cannot access file %s' % self.log_path)
- return False
- jails_list = list()
-
- if self.conf_dir:
- dir_jails, error = parse_conf_dir(self.conf_dir)
- jails_list.extend(dir_jails)
- if not dir_jails:
- self.error(error)
-
- if self.conf_path:
- path_jails, error = parse_conf_path(self.conf_path)
- jails_list.extend(path_jails)
- if not path_jails:
- self.error(error)
+ Check if the "log_path" is not empty and readable
+ """
- # If for some reason parse failed we still can START with default jails_list.
- self.jails_list = list(set(jails_list) - set(self.exclude)) or ['ssh']
- self.data = dict([(jail, 0) for jail in self.jails_list])
- self.create_dimensions()
- self.info('Plugin successfully started. Jails: %s' % self.jails_list)
+ if not (is_accessible(self.log_path, R_OK) and getsize(self.log_path) != 0):
+ self.error('%s is not readable or empty' % self.log_path)
+ return False
+ self.jails_list, self.to_netdata, self.banned_ips = self.jails_auto_detection_()
+ self.definitions = create_definitions_(self.jails_list)
+ self.info('Jails: %s' % self.jails_list)
return True
- def create_dimensions(self):
- self.definitions = {
- 'jails_group': {'options': [None, "Jails ban statistics", "bans/s", 'jails', 'jail.ban', 'line'],
- 'lines': []}}
- for jail in self.jails_list:
- self.definitions['jails_group']['lines'].append([jail, jail, 'incremental'])
-
+ def jails_auto_detection_(self):
+ """
+ return: <tuple>
-def parse_conf_dir(conf_dir):
- if not isdir(conf_dir):
- return list(), '%s is not a directory' % conf_dir
+ * jails_list - list of enabled jails (['ssh', 'apache', ...])
+ * to_netdata - dict ({'ssh': 0, 'ssh_in_jail': 0, ...})
+ * banned_ips - here will be stored all the banned ips ({'ssh': ['1.2.3.4', '5.6.7.8', ...], ...})
+ """
+ raw_jails_list = list()
+ jails_list = list()
- jail_local = list(filter(lambda local: is_accessible(local, R_OK), glob(conf_dir + '/*.local')))
- jail_conf = list(filter(lambda conf: is_accessible(conf, R_OK), glob(conf_dir + '/*.conf')))
+ for raw_jail in parse_configuration_files_(self.conf_path, self.conf_dir, self.error):
+ raw_jails_list.extend(raw_jail)
- if not (jail_local or jail_conf):
- return list(), '%s is empty or not readable' % conf_dir
+ for jail, status in raw_jails_list:
+ if status == 'true' and jail not in jails_list:
+ jails_list.append(jail)
+ elif status == 'false' and jail in jails_list:
+ jails_list.remove(jail)
- # According "man jail.conf" files could be *.local AND *.conf
- # *.conf files parsed first. Changes in *.local overrides configuration in *.conf
- if jail_conf:
- jail_local.extend([conf for conf in jail_conf if conf[:-5] not in [local[:-6] for local in jail_local]])
+ # If for some reason parse failed we still can START with default jails_list.
+ jails_list = list(set(jails_list) - set(self.exclude.split()
+ if isinstance(self.exclude, str) else list())) or ['ssh']
+
+ to_netdata = dict([(jail, 0) for jail in jails_list])
+ to_netdata.update(dict([(jail + '_in_jail', 0) for jail in jails_list]))
+ banned_ips = dict([(jail, list()) for jail in jails_list])
+
+ return jails_list, to_netdata, banned_ips
+
+
+def create_definitions_(jails_list):
+ """
+ Chart definitions creating
+ """
+
+ definitions = {
+ 'jails_bans': {'options': [None, 'Jails Ban Statistics', 'bans/s', 'bans', 'jail.bans', 'line'],
+ 'lines': []},
+ 'jails_in_jail': {'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs',
+ 'in jail', 'jail.in_jail', 'line'],
+ 'lines': []}}
+ for jail in jails_list:
+ definitions['jails_bans']['lines'].append([jail, jail, 'incremental'])
+ definitions['jails_in_jail']['lines'].append([jail + '_in_jail', jail, 'absolute'])
+
+ return definitions
+
+
+def parse_configuration_files_(jails_conf_path, jails_conf_dir, print_error):
+ """
+ :param jails_conf_path: <str>
+ :param jails_conf_dir: <str>
+ :param print_error: <function>
+ :return: <tuple>
+
+ Uses "find_jails_in_files" function to find all jails in the "jails_conf_dir" directory
+ and in the "jails_conf_path"
+
+ All files must endswith ".local" or ".conf"
+ Return order is important.
+ According man jail.conf it should be
+ * jail.conf
+ * jail.d/*.conf (in alphabetical order)
+ * jail.local
+ * jail.d/*.local (in alphabetical order)
+ """
+ path_conf, path_local, dir_conf, dir_local = list(), list(), list(), list()
+
+ # Parse files in the directory
+ if not (isinstance(jails_conf_dir, str) and isdir(jails_conf_dir)):
+ print_error('%s is not a directory' % jails_conf_dir)
+ else:
+ dir_conf = list(filter(lambda conf: is_accessible(conf, R_OK), glob(jails_conf_dir + '/*.conf')))
+ dir_local = list(filter(lambda local: is_accessible(local, R_OK), glob(jails_conf_dir + '/*.local')))
+ if not (dir_conf or dir_local):
+ print_error('%s is empty or not readable' % jails_conf_dir)
+ else:
+ dir_conf, dir_local = (find_jails_in_files(dir_conf, print_error),
+ find_jails_in_files(dir_local, print_error))
+
+ # Parse .conf and .local files
+ if isinstance(jails_conf_path, str) and jails_conf_path.endswith(('.local', '.conf')):
+ path_conf, path_local = (find_jails_in_files([jails_conf_path.split('.')[0] + '.conf'], print_error),
+ find_jails_in_files([jails_conf_path.split('.')[0] + '.local'], print_error))
+
+ return path_conf, dir_conf, path_local, dir_local
+
+
+def find_jails_in_files(list_of_files, print_error):
+ """
+ :param list_of_files: <list>
+ :param print_error: <function>
+ :return: <list>
+
+ Open a file and parse it to find all (enabled and disabled) jails
+ The output is a list of tuples:
+ [('ssh', 'true'), ('apache', 'false'), ...]
+ """
jails_list = list()
- for conf in jail_local:
- with open(conf, 'rt') as f:
- raw_data = f.read()
-
- data = ' '.join(raw_data.split())
- jails_list.extend(REGEX.findall(data))
- jails_list = list(set(jails_list))
-
- return jails_list, 'can\'t locate any jails in %s. Default jail is [\'ssh\']' % conf_dir
-
-
-def parse_conf_path(conf_path):
- if not is_accessible(conf_path, R_OK):
- return list(), '%s is not readable' % conf_path
-
- with open(conf_path, 'rt') as jails_conf:
- raw_data = jails_conf.read()
-
- data = raw_data.split()
- jails_list = REGEX.findall(' '.join(data))
- return jails_list, 'can\'t locate any jails in %s. Default jail is [\'ssh\']' % conf_path
+ for conf in list_of_files:
+ if is_accessible(conf, R_OK):
+ with open(conf, 'rt') as conf:
+ raw_data = conf.read()
+ data = ' '.join(raw_data.split())
+ jails_list.extend(REGEX_JAILS.findall(data))
+ else:
+ print_error('%s is not readable or not exist' % conf)
+ return jails_list
+
+
+def address_not_in_jail(pool, address, pool_size):
+ """
+ :param pool: <list>
+ :param address: <str>
+ :param pool_size: <int>
+ :return: bool
+
+ Checks if the address is in the pool.
+ If not address will be added
+ """
+ index = bisect.bisect_left(pool, address)
+ if index < pool_size:
+ if pool[index] == address:
+ return False
+ bisect.insort_left(pool, address)
+ return True
+ else:
+ bisect.insort_left(pool, address)
+ return True
diff --git a/python.d/go_expvar.chart.py b/python.d/go_expvar.chart.py
new file mode 100644
index 00000000..e1a334cc
--- /dev/null
+++ b/python.d/go_expvar.chart.py
@@ -0,0 +1,228 @@
+# -*- coding: utf-8 -*-
+# Description: go_expvar netdata python.d module
+# Author: Jan Kral (kralewitz)
+
+from __future__ import division
+from base import UrlService
+import json
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+
+MEMSTATS_CHARTS = {
+ 'memstats_heap': {
+ 'options': ['heap', 'memory: size of heap memory structures', 'kB', 'memstats', 'expvar.memstats.heap', 'line'],
+ 'lines': [
+ ['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
+ ['memstats_heap_inuse', 'inuse', 'absolute', 1, 1024]
+ ]},
+ 'memstats_stack': {
+ 'options': ['stack', 'memory: size of stack memory structures', 'kB', 'memstats', 'expvar.memstats.stack', 'line'],
+ 'lines': [
+ ['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
+ ]},
+ 'memstats_mspan': {
+ 'options': ['mspan', 'memory: size of mspan memory structures', 'kB', 'memstats', 'expvar.memstats.mspan', 'line'],
+ 'lines': [
+ ['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
+ ]},
+ 'memstats_mcache': {
+ 'options': ['mcache', 'memory: size of mcache memory structures', 'kB', 'memstats', 'expvar.memstats.mcache', 'line'],
+ 'lines': [
+ ['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
+ ]},
+ 'memstats_live_objects': {
+ 'options': ['live_objects', 'memory: number of live objects', 'objects', 'memstats', 'expvar.memstats.live_objects', 'line'],
+ 'lines': [
+ ['memstats_live_objects', 'live']
+ ]},
+ 'memstats_sys': {
+ 'options': ['sys', 'memory: size of reserved virtual address space', 'kB', 'memstats', 'expvar.memstats.sys', 'line'],
+ 'lines': [
+ ['memstats_sys', 'sys', 'absolute', 1, 1024]
+ ]},
+ 'memstats_gc_pauses': {
+ 'options': ['gc_pauses', 'memory: average duration of GC pauses', 'ns', 'memstats', 'expvar.memstats.gc_pauses', 'line'],
+ 'lines': [
+ ['memstats_gc_pauses', 'avg']
+ ]},
+}
+
+MEMSTATS_ORDER = ['memstats_heap', 'memstats_stack', 'memstats_mspan', 'memstats_mcache', 'memstats_sys', 'memstats_live_objects', 'memstats_gc_pauses']
+
+
+def flatten(d, top='', sep='.'):
+ items = []
+ for key, val in d.items():
+ nkey = top + sep + key if top else key
+ if isinstance(val, dict):
+ items.extend(flatten(val, nkey, sep=sep).items())
+ else:
+ items.append((nkey, val))
+ return dict(items)
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+
+ # if memstats collection is enabled, add the charts and their order
+ if self.configuration.get('collect_memstats'):
+ self.definitions = MEMSTATS_CHARTS
+ self.order = MEMSTATS_ORDER
+ else:
+ self.definitions = dict()
+ self.order = list()
+
+ # if extra charts are defined, parse their config
+ extra_charts = self.configuration.get('extra_charts')
+ if extra_charts:
+ self._parse_extra_charts_config(extra_charts)
+
+ def check(self):
+ """
+ Check if the module can collect data:
+ 1) At least one JOB configuration has to be specified
+ 2) The JOB configuration needs to define the URL and either collect_memstats must be enabled or at least one
+ extra_chart must be defined.
+
+ The configuration and URL check is provided by the UrlService class.
+ """
+
+ if not (self.configuration.get('extra_charts') or self.configuration.get('collect_memstats')):
+ self.error('Memstats collection is disabled and no extra_charts are defined, disabling module.')
+ return False
+
+ return UrlService.check(self)
+
+ def _parse_extra_charts_config(self, extra_charts_config):
+
+ # a place to store the expvar keys and their types
+ self.expvars = dict()
+
+ for chart in extra_charts_config:
+
+ chart_dict = dict()
+ chart_id = chart.get('id')
+ chart_lines = chart.get('lines')
+ chart_opts = chart.get('options', dict())
+
+ if not all([chart_id, chart_lines]):
+ self.info('Chart {0} has no ID or no lines defined, skipping'.format(chart))
+ continue
+
+ chart_dict['options'] = [
+ chart_opts.get('name', ''),
+ chart_opts.get('title', ''),
+ chart_opts.get('units', ''),
+ chart_opts.get('family', ''),
+ chart_opts.get('context', ''),
+ chart_opts.get('chart_type', 'line')
+ ]
+ chart_dict['lines'] = list()
+
+ # add the lines to the chart
+ for line in chart_lines:
+
+ ev_key = line.get('expvar_key')
+ ev_type = line.get('expvar_type')
+ line_id = line.get('id')
+
+ if not all([ev_key, ev_type, line_id]):
+ self.info('Line missing expvar_key, expvar_type, or line_id, skipping: {0}'.format(line))
+ continue
+
+ if ev_type not in ['int', 'float']:
+ self.info('Unsupported expvar_type "{0}". Must be "int" or "float"'.format(ev_type))
+ continue
+
+ if ev_key in self.expvars:
+ self.info('Duplicate expvar key {0}: skipping line.'.format(ev_key))
+ continue
+
+ self.expvars[ev_key] = (ev_type, line_id)
+
+ chart_dict['lines'].append(
+ [
+ line.get('id', ''),
+ line.get('name', ''),
+ line.get('algorithm', ''),
+ line.get('multiplier', 1),
+ line.get('divisor', 100 if ev_type == 'float' else 1),
+ line.get('hidden', False)
+ ]
+ )
+
+ self.order.append(chart_id)
+ self.definitions[chart_id] = chart_dict
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ data = json.loads(raw_data)
+
+ expvars = dict()
+ if self.configuration.get('collect_memstats'):
+ expvars.update(self._parse_memstats(data))
+
+ if self.configuration.get('extra_charts'):
+ # the memstats part of the data has been already parsed, so we remove it before flattening and checking
+ # the rest of the data, thus avoiding needless iterating over the multiply nested memstats dict.
+ del (data['memstats'])
+ flattened = flatten(data)
+ for k, v in flattened.items():
+ ev = self.expvars.get(k)
+ if not ev:
+ # expvar is not defined in config, skip it
+ continue
+ try:
+ key_type, line_id = ev
+ if key_type == 'int':
+ expvars[line_id] = int(v)
+ elif key_type == 'float':
+ # if the value type is float, multiply it by 1000 and set line divisor to 1000
+ expvars[line_id] = float(v) * 100
+ except ValueError:
+ self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(k, key_type))
+ del self.expvars[k]
+
+ return expvars
+
+ @staticmethod
+ def _parse_memstats(data):
+
+ memstats = data['memstats']
+
+ # calculate the number of live objects in memory
+ live_objs = int(memstats['Mallocs']) - int(memstats['Frees'])
+
+ # calculate GC pause times average
+ # the Go runtime keeps the last 256 GC pause durations in a circular buffer,
+ # so we need to filter out the 0 values before the buffer is filled
+ gc_pauses = memstats['PauseNs']
+ try:
+ gc_pause_avg = sum(gc_pauses) / len([x for x in gc_pauses if x > 0])
+ # no GC cycles have occured yet
+ except ZeroDivisionError:
+ gc_pause_avg = 0
+
+ return {
+ 'memstats_heap_alloc': memstats['HeapAlloc'],
+ 'memstats_heap_inuse': memstats['HeapInuse'],
+ 'memstats_stack_inuse': memstats['StackInuse'],
+ 'memstats_mspan_inuse': memstats['MSpanInuse'],
+ 'memstats_mcache_inuse': memstats['MCacheInuse'],
+ 'memstats_sys': memstats['Sys'],
+ 'memstats_live_objects': live_objs,
+ 'memstats_gc_pauses': gc_pause_avg,
+ }
diff --git a/python.d/haproxy.chart.py b/python.d/haproxy.chart.py
index 2fb97d75..67a6f782 100644
--- a/python.d/haproxy.chart.py
+++ b/python.d/haproxy.chart.py
@@ -3,6 +3,13 @@
# Author: l2isbad
from base import UrlService, SocketService
+from collections import defaultdict
+from re import compile as re_compile
+
+try:
+ from urlparse import urlparse
+except ImportError:
+ from urllib.parse import urlparse
# default module values (can be overridden per job in `config`)
# update_every = 2
@@ -10,135 +17,140 @@ priority = 60000
retries = 60
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['fbin', 'fbout', 'fscur', 'fqcur', 'bbin', 'bbout', 'bscur', 'bqcur', 'health_sdown', 'health_bdown']
+ORDER = ['fbin', 'fbout', 'fscur', 'fqcur', 'bbin', 'bbout', 'bscur', 'bqcur',
+ 'health_sdown', 'health_bdown', 'health_idle']
CHARTS = {
'fbin': {
- 'options': [None, "Kilobytes in", "kilobytes in/s", 'Frontend', 'haproxy_f.bin', 'line'],
+ 'options': [None, "Kilobytes In", "KB/s", 'frontend', 'haproxy_f.bin', 'line'],
'lines': [
]},
'fbout': {
- 'options': [None, "Kilobytes out", "kilobytes out/s", 'Frontend', 'haproxy_f.bout', 'line'],
+ 'options': [None, "Kilobytes Out", "KB/s", 'frontend', 'haproxy_f.bout', 'line'],
'lines': [
]},
'fscur': {
- 'options': [None, "Sessions active", "sessions", 'Frontend', 'haproxy_f.scur', 'line'],
+ 'options': [None, "Sessions Active", "sessions", 'frontend', 'haproxy_f.scur', 'line'],
'lines': [
]},
'fqcur': {
- 'options': [None, "Session in queue", "sessions", 'Frontend', 'haproxy_f.qcur', 'line'],
+ 'options': [None, "Session In Queue", "sessions", 'frontend', 'haproxy_f.qcur', 'line'],
'lines': [
]},
'bbin': {
- 'options': [None, "Kilobytes in", "kilobytes in/s", 'Backend', 'haproxy_b.bin', 'line'],
+ 'options': [None, "Kilobytes In", "KB/s", 'backend', 'haproxy_b.bin', 'line'],
'lines': [
]},
'bbout': {
- 'options': [None, "Kilobytes out", "kilobytes out/s", 'Backend', 'haproxy_b.bout', 'line'],
+ 'options': [None, "Kilobytes Out", "KB/s", 'backend', 'haproxy_b.bout', 'line'],
'lines': [
]},
'bscur': {
- 'options': [None, "Sessions active", "sessions", 'Backend', 'haproxy_b.scur', 'line'],
+ 'options': [None, "Sessions Active", "sessions", 'backend', 'haproxy_b.scur', 'line'],
'lines': [
]},
'bqcur': {
- 'options': [None, "Sessions in queue", "sessions", 'Backend', 'haproxy_b.qcur', 'line'],
+ 'options': [None, "Sessions In Queue", "sessions", 'backend', 'haproxy_b.qcur', 'line'],
'lines': [
]},
'health_sdown': {
- 'options': [None, "Number of servers in backend in DOWN state", "failed servers", 'Health', 'haproxy_hs.down', 'line'],
+ 'options': [None, "Backend Servers In DOWN State", "failed servers", 'health',
+ 'haproxy_hs.down', 'line'],
'lines': [
]},
'health_bdown': {
- 'options': [None, "Is backend alive? 1 = DOWN", "failed backend", 'Health', 'haproxy_hb.down', 'line'],
+ 'options': [None, "Is Backend Alive? 1 = DOWN", "failed backend", 'health', 'haproxy_hb.down', 'line'],
+ 'lines': [
+ ]},
+ 'health_idle': {
+ 'options': [None, "The Ratio Of Polling Time Vs Total Time", "percent", 'health', 'haproxy.idle', 'line'],
'lines': [
+ ['idle', None, 'absolute']
]}
}
+METRICS = {'bin': {'algorithm': 'incremental', 'divisor': 1024},
+ 'bout': {'algorithm': 'incremental', 'divisor': 1024},
+ 'scur': {'algorithm': 'absolute', 'divisor': 1},
+ 'qcur': {'algorithm': 'absolute', 'divisor': 1}}
+
+REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'), socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
+
class Service(UrlService, SocketService):
def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.user = self.configuration.get('user')
- self.password = self.configuration.get('pass')
- self.request = 'show stat\n'
- self.poll_method = (UrlService, SocketService)
+ if 'socket' in configuration:
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.poll = SocketService
+ self.options_ = dict(regex=REGEX['socket'], stat='show stat\n', info='show info\n')
+ else:
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.poll = UrlService
+ self.options_ = dict(regex=REGEX['url'], stat=self.url, info=url_remove_params(self.url))
self.order = ORDER
- self.order_front = [_ for _ in ORDER if _.startswith('f')]
- self.order_back = [_ for _ in ORDER if _.startswith('b')]
self.definitions = CHARTS
- self.charts = True
def check(self):
- if self.configuration.get('url'):
- self.poll_method = self.poll_method[0]
- url = self.configuration.get('url')
- if not url.endswith(';csv;norefresh'):
- self.error('Bad url(%s). Must be http://<ip.address>:<port>/<url>;csv;norefresh' % url)
- return False
- elif self.configuration.get('socket'):
- self.poll_method = self.poll_method[1]
- else:
- self.error('No configuration is specified')
- return False
-
- if self.poll_method.check(self):
- self.info('Plugin was started succesfully. We are using %s.' % self.poll_method.__name__)
+ if self.poll.check(self):
+ self.create_charts()
+ self.info('We are using %s.' % self.poll.__name__)
return True
+ return False
- def create_charts(self, front_ends, back_ends):
- for _ in range(len(front_ends)):
- self.definitions['fbin']['lines'].append(['_'.join(['fbin', front_ends[_]['# pxname']]), front_ends[_]['# pxname'], 'incremental', 1, 1024])
- self.definitions['fbout']['lines'].append(['_'.join(['fbout', front_ends[_]['# pxname']]), front_ends[_]['# pxname'], 'incremental', 1, 1024])
- self.definitions['fscur']['lines'].append(['_'.join(['fscur', front_ends[_]['# pxname']]), front_ends[_]['# pxname'], 'absolute'])
- self.definitions['fqcur']['lines'].append(['_'.join(['fqcur', front_ends[_]['# pxname']]), front_ends[_]['# pxname'], 'absolute'])
-
- for _ in range(len(back_ends)):
- self.definitions['bbin']['lines'].append(['_'.join(['bbin', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'incremental', 1, 1024])
- self.definitions['bbout']['lines'].append(['_'.join(['bbout', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'incremental', 1, 1024])
- self.definitions['bscur']['lines'].append(['_'.join(['bscur', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'absolute'])
- self.definitions['bqcur']['lines'].append(['_'.join(['bqcur', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'absolute'])
- self.definitions['health_sdown']['lines'].append(['_'.join(['hsdown', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'absolute'])
- self.definitions['health_bdown']['lines'].append(['_'.join(['hbdown', back_ends[_]['# pxname']]), back_ends[_]['# pxname'], 'absolute'])
-
def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- try:
- raw_data = self.poll_method._get_raw_data(self).splitlines()
- except Exception as e:
- self.error(str(e))
- return None
-
- all_instances = [dict(zip(raw_data[0].split(','), raw_data[_].split(','))) for _ in range(1, len(raw_data))]
-
- back_ends = list(filter(is_backend, all_instances))
- front_ends = list(filter(is_frontend, all_instances))
- servers = list(filter(is_server, all_instances))
-
- if self.charts:
- self.create_charts(front_ends, back_ends)
- self.charts = False
-
to_netdata = dict()
+ self.request, self.url = self.options_['stat'], self.options_['stat']
+ stat_data = self._get_stat_data()
+ self.request, self.url = self.options_['info'], self.options_['info']
+ info_data = self._get_info_data(regex=self.options_['regex'])
- for frontend in front_ends:
- for _ in self.order_front:
- to_netdata.update({'_'.join([_, frontend['# pxname']]): int(frontend[_[1:]]) if frontend.get(_[1:]) else 0})
+ to_netdata.update(stat_data)
+ to_netdata.update(info_data)
+ return to_netdata or None
- for backend in back_ends:
- for _ in self.order_back:
- to_netdata.update({'_'.join([_, backend['# pxname']]): int(backend[_[1:]]) if backend.get(_[1:]) else 0})
-
- for _ in range(len(back_ends)):
- to_netdata.update({'_'.join(['hsdown', back_ends[_]['# pxname']]):
- len([server for server in servers if is_server_down(server, back_ends, _)])})
- to_netdata.update({'_'.join(['hbdown', back_ends[_]['# pxname']]): 1 if is_backend_down(back_ends, _) else 0})
+ def _get_stat_data(self):
+ """
+ :return: dict
+ """
+ raw_data = self.poll._get_raw_data(self)
+
+ if not raw_data:
+ return dict()
+
+ raw_data = raw_data.splitlines()
+ self.data = parse_data_([dict(zip(raw_data[0].split(','), raw_data[_].split(',')))
+ for _ in range(1, len(raw_data))])
+ if not self.data:
+ return dict()
+
+ stat_data = dict()
+
+ for frontend in self.data['frontend']:
+ for metric in METRICS:
+ idx = frontend['# pxname'].replace('.', '_')
+ stat_data['_'.join(['frontend', metric, idx])] = frontend.get(metric) or 0
+
+ for backend in self.data['backend']:
+ name, idx = backend['# pxname'], backend['# pxname'].replace('.', '_')
+ stat_data['hsdown_' + idx] = len([server for server in self.data['servers']
+ if server_down(server, name)])
+ stat_data['hbdown_' + idx] = 1 if backend.get('status') == 'DOWN' else 0
+ for metric in METRICS:
+ stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
+ return stat_data
+
+ def _get_info_data(self, regex):
+ """
+ :return: dict
+ """
+ raw_data = self.poll._get_raw_data(self)
+ if not raw_data:
+ return dict()
- return to_netdata
+ match = regex.search(raw_data)
+ return match.groupdict() if match else dict()
- def _check_raw_data(self, data):
+ @staticmethod
+ def _check_raw_data(data):
"""
Check if all data has been gathered from socket
:param data: str
@@ -146,32 +158,54 @@ class Service(UrlService, SocketService):
"""
return not bool(data)
-def is_backend(backend):
- try:
- return backend['svname'] == 'BACKEND' and backend['# pxname'] != 'stats'
- except Exception:
- return False
-
-def is_frontend(frontend):
- try:
- return frontend['svname'] == 'FRONTEND' and frontend['# pxname'] != 'stats'
- except Exception:
- return False
-
-def is_server(server):
- try:
- return not server['svname'].startswith(('FRONTEND', 'BACKEND'))
- except Exception:
- return False
-
-def is_server_down(server, back_ends, _):
- try:
- return server['# pxname'] == back_ends[_]['# pxname'] and server['status'] == 'DOWN'
- except Exception:
- return False
-
-def is_backend_down(back_ends, _):
- try:
- return back_ends[_]['status'] == 'DOWN'
- except Exception:
- return False
+ def create_charts(self):
+ for front in self.data['frontend']:
+ name, idx = front['# pxname'], front['# pxname'].replace('.', '_')
+ for metric in METRICS:
+ self.definitions['f' + metric]['lines'].append(['_'.join(['frontend', metric, idx]),
+ name, METRICS[metric]['algorithm'], 1,
+ METRICS[metric]['divisor']])
+ for back in self.data['backend']:
+ name, idx = back['# pxname'], back['# pxname'].replace('.', '_')
+ for metric in METRICS:
+ self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
+ name, METRICS[metric]['algorithm'], 1,
+ METRICS[metric]['divisor']])
+ self.definitions['health_sdown']['lines'].append(['hsdown_' + idx, name, 'absolute'])
+ self.definitions['health_bdown']['lines'].append(['hbdown_' + idx, name, 'absolute'])
+
+
+def parse_data_(data):
+ def is_backend(backend):
+ return backend.get('svname') == 'BACKEND' and backend.get('# pxname') != 'stats'
+
+ def is_frontend(frontend):
+ return frontend.get('svname') == 'FRONTEND' and frontend.get('# pxname') != 'stats'
+
+ def is_server(server):
+ return not server.get('svname', '').startswith(('FRONTEND', 'BACKEND'))
+
+ if not data:
+ return None
+
+ result = defaultdict(list)
+ for elem in data:
+ if is_backend(elem):
+ result['backend'].append(elem)
+ continue
+ elif is_frontend(elem):
+ result['frontend'].append(elem)
+ continue
+ elif is_server(elem):
+ result['servers'].append(elem)
+
+ return result or None
+
+
+def server_down(server, backend_name):
+ return server.get('# pxname') == backend_name and server.get('status') == 'DOWN'
+
+
+def url_remove_params(url):
+ parsed = urlparse(url or str())
+ return '%s://%s%s' % (parsed.scheme, parsed.netloc, parsed.path)
diff --git a/python.d/isc_dhcpd.chart.py b/python.d/isc_dhcpd.chart.py
index bb9ba5cb..a437f803 100644
--- a/python.d/isc_dhcpd.chart.py
+++ b/python.d/isc_dhcpd.chart.py
@@ -2,30 +2,56 @@
# Description: isc dhcpd lease netdata python.d module
# Author: l2isbad
-from base import SimpleService
from time import mktime, strptime, gmtime, time
-from os import stat
+from os import stat, access, R_OK
+from os.path import isfile
try:
- from ipaddress import IPv4Address as ipaddress
- from ipaddress import ip_network
+ from ipaddress import ip_network, ip_address
have_ipaddress = True
except ImportError:
have_ipaddress = False
try:
- from itertools import filterfalse as filterfalse
+ from itertools import filterfalse
except ImportError:
from itertools import ifilterfalse as filterfalse
-
+from base import SimpleService
priority = 60000
retries = 60
-update_every = 60
+update_every = 5
+
+ORDER = ['pools_utilization', 'pools_active_leases', 'leases_total', 'parse_time', 'leases_size']
+
+CHARTS = {
+ 'pools_utilization': {
+ 'options': [None, 'Pools Utilization', 'used in percent', 'utilization',
+ 'isc_dhcpd.utilization', 'line'],
+ 'lines': []},
+ 'pools_active_leases': {
+ 'options': [None, 'Active Leases', 'leases per pool', 'active leases',
+ 'isc_dhcpd.active_leases', 'line'],
+ 'lines': []},
+ 'leases_total': {
+ 'options': [None, 'Total All Pools', 'number', 'active leases',
+ 'isc_dhcpd.leases_total', 'line'],
+ 'lines': [['leases_total', 'leases', 'absolute']]},
+ 'parse_time': {
+ 'options': [None, 'Parse Time', 'ms', 'parse stats',
+ 'isc_dhcpd.parse_time', 'line'],
+ 'lines': [['parse_time', 'time', 'absolute']]},
+ 'leases_size': {
+ 'options': [None, 'Dhcpd Leases File Size', 'kilobytes',
+ 'parse stats', 'isc_dhcpd.leases_size', 'line'],
+ 'lines': [['leases_size', 'size', 'absolute', 1, 1024]]}}
+
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.leases_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
- self.pools = self.configuration.get('pools')
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.pools = dict()
# Will work only with 'default' db-time-format (weekday year/month/day hour:minute:second)
# TODO: update algorithm to parse correctly 'local' db-time-format
@@ -33,50 +59,30 @@ class Service(SimpleService):
# Also only ipv4 supported
def check(self):
- if not self._get_raw_data():
+ if not have_ipaddress:
+ self.error('\'python-ipaddress\' module is needed')
+ return False
+ if not (isfile(self.leases_path) and access(self.leases_path, R_OK)):
self.error('Make sure leases_path is correct and leases log file is readable by netdata')
return False
- elif not have_ipaddress:
- self.error('No ipaddress module. Please install (py2-ipaddress in case of python2)')
+ if not self.configuration.get('pools'):
+ self.error('Pools are not defined')
return False
- else:
+ if not isinstance(self.configuration['pools'], dict):
+ self.error('Invalid \'pools\' format')
+ return False
+
+ for pool in self.configuration['pools']:
try:
- self.pools = self.pools.split()
- if not [ip_network(return_utf(pool)) for pool in self.pools]:
- self.error('Pools list is empty')
- return False
- except (ValueError, IndexError, AttributeError, SyntaxError) as e:
- self.error('Pools configurations is incorrect', str(e))
- return False
-
- # Creating static charts
- self.order = ['parse_time', 'leases_size', 'utilization', 'total']
- self.definitions = {'utilization':
- {'options':
- [None, 'Pools utilization', 'used %', 'utilization', 'isc_dhcpd.util', 'line'],
- 'lines': []},
- 'total':
- {'options':
- [None, 'Total all pools', 'leases', 'utilization', 'isc_dhcpd.total', 'line'],
- 'lines': [['total', 'leases', 'absolute']]},
- 'parse_time':
- {'options':
- [None, 'Parse time', 'ms', 'parse stats', 'isc_dhcpd.parse', 'line'],
- 'lines': [['ptime', 'time', 'absolute']]},
- 'leases_size':
- {'options':
- [None, 'dhcpd.leases file size', 'kilobytes', 'parse stats', 'isc_dhcpd.lsize', 'line'],
- 'lines': [['lsize', 'size', 'absolute']]}}
- # Creating dynamic charts
- for pool in self.pools:
- self.definitions['utilization']['lines'].append([''.join(['ut_', pool]), pool, 'absolute'])
- self.order.append(''.join(['leases_', pool]))
- self.definitions[''.join(['leases_', pool])] = \
- {'options': [None, 'Active leases', 'leases', 'pools', 'isc_dhcpd.lease', 'area'],
- 'lines': [[''.join(['le_', pool]), pool, 'absolute']]}
-
- self.info('Plugin was started succesfully')
- return True
+ net = ip_network(u'%s' % self.configuration['pools'][pool])
+ self.pools[pool] = dict(net=net, num_hosts=net.num_addresses - 2)
+ except ValueError as error:
+ self.error('%s removed, error: %s' % (self.configuration['pools'][pool], error))
+
+ if not self.pools:
+ return False
+ self.create_charts()
+ return True
def _get_raw_data(self):
"""
@@ -87,65 +93,60 @@ class Service(SimpleService):
)
"""
try:
- with open(self.leases_path, 'rt') as dhcp_leases:
+ with open(self.leases_path) as leases:
time_start = time()
- part1 = filterfalse(find_lease, dhcp_leases)
- part2 = filterfalse(find_ends, dhcp_leases)
- raw_result = dict(zip(part1, part2))
+ part1 = filterfalse(find_lease, leases)
+ part2 = filterfalse(find_ends, leases)
+ result = dict(zip(part1, part2))
time_end = time()
file_parse_time = round((time_end - time_start) * 1000)
- except Exception as e:
- self.error("Failed to parse leases file:", str(e))
+ return result, file_parse_time
+ except (OSError, IOError) as error:
+ self.error("Failed to parse leases file:", str(error))
return None
- else:
- result = (raw_result, file_parse_time)
- return result
def _get_data(self):
"""
:return: dict
"""
- raw_leases = self._get_raw_data()
- if not raw_leases:
+ raw_data = self._get_raw_data()
+ if not raw_data:
return None
- # Result: {ipaddress: end lease time, ...}
- all_leases = dict([(k[6:len(k)-3], v[7:len(v)-2]) for k, v in raw_leases[0].items()])
-
- # Result: [active binding, active binding....]. (Expire time (ends date;) - current time > 0)
- active_leases = [k for k, v in all_leases.items() if is_binding_active(all_leases[k])]
-
- # Result: {pool: number of active bindings in pool, ...}
- pools_count = dict([(pool, len([lease for lease in active_leases if is_address_in(lease, pool)]))
- for pool in self.pools])
-
- # Result: {pool: number of host ip addresses in pool, ...}
- pools_max = dict([(pool, (2 ** (32 - int(pool.split('/')[1])) - 2))
- for pool in self.pools])
-
- # Result: {pool: % utilization, ....} (percent)
- pools_util = dict([(pool, int(round(float(pools_count[pool]) / pools_max[pool] * 100, 0)))
- for pool in self.pools])
-
- # Bulding dicts to send to netdata
- final_count = dict([(''.join(['le_', k]), v) for k, v in pools_count.items()])
- final_util = dict([(''.join(['ut_', k]), v) for k, v in pools_util.items()])
-
- to_netdata = {'total': len(active_leases)}
- to_netdata.update({'lsize': int(stat(self.leases_path)[6] / 1024)})
- to_netdata.update({'ptime': int(raw_leases[1])})
- to_netdata.update(final_util)
- to_netdata.update(final_count)
+ raw_leases, parse_time = raw_data[0], raw_data[1]
+ # Result: {ipaddress: end lease time, ...}
+ active_leases, to_netdata = list(), dict()
+ current_time = mktime(gmtime())
+
+ for ip, lease_end_time in raw_leases.items():
+ # Result: [active binding, active binding....]. (Expire time (ends date;) - current time > 0)
+ if binding_active(lease_end_time=lease_end_time[7:-2],
+ current_time=current_time):
+ active_leases.append(ip_address(u'%s' % ip[6:-3]))
+
+ for pool in self.pools:
+ dim_id = pool.replace('.', '_')
+ pool_leases_count = len([ip for ip in active_leases if ip in self.pools[pool]['net']])
+ to_netdata[dim_id + '_active_leases'] = pool_leases_count
+ to_netdata[dim_id + '_utilization'] = float(pool_leases_count) / self.pools[pool]['num_hosts'] * 10000
+
+ to_netdata['leases_total'] = len(active_leases)
+ to_netdata['leases_size'] = stat(self.leases_path)[6]
+ to_netdata['parse_time'] = parse_time
return to_netdata
+ def create_charts(self):
+ for pool in self.pools:
+ dim, dim_id = pool, pool.replace('.', '_')
+ self.definitions['pools_utilization']['lines'].append([dim_id + '_utilization',
+ dim, 'absolute', 1, 100])
+ self.definitions['pools_active_leases']['lines'].append([dim_id + '_active_leases',
+ dim, 'absolute'])
-def is_binding_active(binding):
- return mktime(strptime(binding, '%w %Y/%m/%d %H:%M:%S')) - mktime(gmtime()) > 0
-
-def is_address_in(address, pool):
- return ipaddress(return_utf(address)) in ip_network(return_utf(pool))
+def binding_active(lease_end_time, current_time):
+ return mktime(strptime(lease_end_time, '%w %Y/%m/%d %H:%M:%S')) - current_time > 0
def find_lease(value):
@@ -155,10 +156,3 @@ def find_lease(value):
def find_ends(value):
return value[2:6] != 'ends'
-
-def return_utf(s):
- # python2 returns "<type 'str'>" for simple strings
- # python3 returns "<class 'str'>" for unicode strings
- if str(type(s)) == "<type 'str'>":
- return unicode(s, 'utf-8')
- return s
diff --git a/python.d/mdstat.chart.py b/python.d/mdstat.chart.py
index 0f7d2b44..ca9aba56 100644
--- a/python.d/mdstat.chart.py
+++ b/python.d/mdstat.chart.py
@@ -2,98 +2,125 @@
# Description: mdstat netdata python.d module
# Author: l2isbad
+from re import compile as re_compile
+from collections import defaultdict
from base import SimpleService
-from re import compile
+
priority = 60000
retries = 60
update_every = 1
+OPERATIONS = ('check', 'resync', 'reshape', 'recovery', 'finish', 'speed')
+
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ['agr_health']
- self.definitions = {'agr_health':
- {'options':
- [None, 'Faulty devices in MD', 'failed disks', 'health', 'md.health', 'line'],
- 'lines': []}}
- self.proc_mdstat = '/proc/mdstat'
- self.regex_disks = compile(r'((?<=\ )[a-zA-Z_0-9]+(?= : active)).*?((?<= \[)[0-9]+)/([0-9]+(?=\] ))')
- self.regex_status = compile(r'([a-zA-Z_0-9]+)( : active)[^:]*?([a-z]+) = ([0-9.]+(?=%)).*?((?<=finish=)[0-9.]+)min speed=([0-9]+)')
+ self.regex = dict(disks=re_compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+\['
+ r'(?P<total_disks>[0-9]+)/'
+ r'(?P<inuse_disks>[0-9])\]'),
+ status=re_compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+ '
+ r'(?P<operation>[a-z]+) = '
+ r'(?P<operation_status>[0-9.]+).+finish='
+ r'(?P<finish>([0-9.]+))min speed='
+ r'(?P<speed>[0-9]+)'))
def check(self):
- raw_data = self._get_raw_data()
- if not raw_data:
- self.error('Cant read mdstat data from %s' % (self.proc_mdstat))
- return False
-
- md_list = [md[0] for md in self.regex_disks.findall(raw_data)]
-
- if not md_list:
- self.error('No active arrays in %s' % (self.proc_mdstat))
- return False
- else:
- for md in md_list:
- self.order.append(md)
- self.order.append(''.join([md, '_status']))
- self.order.append(''.join([md, '_rate']))
- self.definitions['agr_health']['lines'].append([''.join([md, '_health']), md, 'absolute'])
- self.definitions[md] = {'options':
- [None, '%s disks stats' % md, 'disks', md, 'md.disks', 'stacked'],
- 'lines': [[''.join([md, '_total']), 'total', 'absolute'],
- [''.join([md, '_inuse']), 'inuse', 'absolute']]}
- self.definitions[''.join([md, '_status'])] = {'options':
- [None, '%s current status' % md, 'percent', md, 'md.status', 'line'],
- 'lines': [[''.join([md, '_resync']), 'resync', 'absolute', 1, 100],
- [''.join([md, '_recovery']), 'recovery', 'absolute', 1, 100],
- [''.join([md, '_reshape']), 'reshape', 'absolute', 1, 100],
- [''.join([md, '_check']), 'check', 'absolute', 1, 100]]}
- self.definitions[''.join([md, '_rate'])] = {'options':
- [None, '%s operation status' % md, 'rate', md, 'md.rate', 'line'],
- 'lines': [[''.join([md, '_finishin']), 'finish min', 'absolute', 1, 100],
- [''.join([md, '_rate']), 'megabyte/s', 'absolute', -1, 100]]}
- self.info('Plugin was started successfully. MDs to monitor %s' % (md_list))
-
- return True
-
- def _get_raw_data(self):
+ arrays = find_arrays(self._get_raw_data(), self.regex)
+ if not arrays:
+ self.error('Failed to read data from /proc/mdstat or there is no active arrays')
+ return None
+
+ self.order, self.definitions = create_charts(arrays.keys())
+ return True
+
+ @staticmethod
+ def _get_raw_data():
"""
Read data from /proc/mdstat
:return: str
"""
try:
- with open(self.proc_mdstat, 'rt') as proc_mdstat:
- raw_result = proc_mdstat.read()
- except Exception:
+ with open('/proc/mdstat', 'rt') as proc_mdstat:
+ return proc_mdstat.readlines() or None
+ except (OSError, IOError):
return None
- else:
- raw_result = ' '.join(raw_result.split())
- return raw_result
def _get_data(self):
"""
Parse data from _get_raw_data()
:return: dict
"""
- raw_mdstat = self._get_raw_data()
- mdstat_disks = self.regex_disks.findall(raw_mdstat)
- mdstat_status = self.regex_status.findall(raw_mdstat)
- to_netdata = {}
-
- for md in mdstat_disks:
- to_netdata[''.join([md[0], '_total'])] = int(md[1])
- to_netdata[''.join([md[0], '_inuse'])] = int(md[2])
- to_netdata[''.join([md[0], '_health'])] = int(md[1]) - int(md[2])
- to_netdata[''.join([md[0], '_check'])] = 0
- to_netdata[''.join([md[0], '_resync'])] = 0
- to_netdata[''.join([md[0], '_reshape'])] = 0
- to_netdata[''.join([md[0], '_recovery'])] = 0
- to_netdata[''.join([md[0], '_finishin'])] = 0
- to_netdata[''.join([md[0], '_rate'])] = 0
-
- for md in mdstat_status:
- to_netdata[''.join([md[0], '_' + md[2]])] = round(float(md[3]) * 100)
- to_netdata[''.join([md[0], '_finishin'])] = round(float(md[4]) * 100)
- to_netdata[''.join([md[0], '_rate'])] = round(float(md[5]) / 1000 * 100)
+ raw_data = self._get_raw_data()
+ arrays = find_arrays(raw_data, self.regex)
+ if not arrays:
+ return None
+
+ to_netdata = dict()
+ for array, values in arrays.items():
+ for key, value in values.items():
+ to_netdata['_'.join([array, key])] = value
return to_netdata
+
+
+def find_arrays(raw_data, regex):
+ if raw_data is None:
+ return None
+ data = defaultdict(str)
+ counter = 1
+
+ for row in (elem.strip() for elem in raw_data):
+ if not row:
+ counter += 1
+ continue
+ data[counter] = ' '.join([data[counter], row])
+
+ arrays = dict()
+ for value in data.values():
+ match = regex['disks'].search(value)
+ if not match:
+ continue
+
+ match = match.groupdict()
+ array = match.pop('array')
+ arrays[array] = match
+ arrays[array]['health'] = int(match['total_disks']) - int(match['inuse_disks'])
+ for operation in OPERATIONS:
+ arrays[array][operation] = 0
+
+ match = regex['status'].search(value)
+ if match:
+ match = match.groupdict()
+ if match['operation'] in OPERATIONS:
+ arrays[array][match['operation']] = float(match['operation_status']) * 100
+ arrays[array]['finish'] = float(match['finish']) * 100
+ arrays[array]['speed'] = float(match['speed']) / 1000 * 100
+
+ return arrays or None
+
+
+def create_charts(arrays):
+ order = ['mdstat_health']
+ definitions = dict(mdstat_health={'options': [None, 'Faulty devices in MD', 'failed disks',
+ 'health', 'md.health', 'line'],
+ 'lines': []})
+ for md in arrays:
+ order.append(md)
+ order.append(md + '_status')
+ order.append(md + '_rate')
+ definitions['mdstat_health']['lines'].append([md + '_health', md, 'absolute'])
+ definitions[md] = {'options': [None, '%s disks stats' % md, 'disks', md, 'md.disks', 'stacked'],
+ 'lines': [[md + '_total_disks', 'total', 'absolute'],
+ [md + '_inuse_disks', 'inuse', 'absolute']]}
+ definitions[md + '_status'] = {'options': [None, '%s current status' % md,
+ 'percent', md, 'md.status', 'line'],
+ 'lines': [[md + '_resync', 'resync', 'absolute', 1, 100],
+ [md + '_recovery', 'recovery', 'absolute', 1, 100],
+ [md + '_reshape', 'reshape', 'absolute', 1, 100],
+ [md + '_check', 'check', 'absolute', 1, 100]]}
+ definitions[md + '_rate'] = {'options': [None, '%s operation status' % md,
+ 'rate', md, 'md.rate', 'line'],
+ 'lines': [[md + '_finish', 'finish min', 'absolute', 1, 100],
+ [md + '_speed', 'MB/s', 'absolute', -1, 100]]}
+ return order, definitions
diff --git a/python.d/mongodb.chart.py b/python.d/mongodb.chart.py
index c01bd293..bb4c44b0 100644
--- a/python.d/mongodb.chart.py
+++ b/python.d/mongodb.chart.py
@@ -19,7 +19,7 @@ except ImportError:
priority = 60000
retries = 60
-REPLSET_STATES = [
+REPL_SET_STATES = [
('1', 'primary'),
('8', 'down'),
('2', 'secondary'),
@@ -358,8 +358,8 @@ CHARTS = {
'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode',
'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'],
'lines': [
- ['Metadata_r', 'intent_shared', 'incremental'],
- ['Metadata_w', 'intent_exclusive', 'incremental']
+ ['oplog_r', 'intent_shared', 'incremental'],
+ ['oplog_w', 'intent_exclusive', 'incremental']
]}
}
@@ -391,13 +391,16 @@ class Service(SimpleService):
self.build_metrics_to_collect_(server_status)
try:
- self._get_data()
+ data = self._get_data()
except (LookupError, SyntaxError, AttributeError):
self.error('Type: %s, error: %s' % (str(exc_info()[0]), str(exc_info()[1])))
return False
- else:
+ if isinstance(data, dict) and data:
+ self._data_from_check = data
self.create_charts_(server_status)
return True
+ self.error('_get_data() returned no data or type is not <dict>')
+ return False
def build_metrics_to_collect_(self, server_status):
@@ -473,7 +476,7 @@ class Service(SimpleService):
lines.append([dim_id, description, 'absolute', 1, 1])
return lines
- all_hosts = server_status['repl']['hosts']
+ all_hosts = server_status['repl']['hosts'] + server_status['repl'].get('arbiters', list())
this_host = server_status['repl']['me']
other_hosts = [host for host in all_hosts if host != this_host]
@@ -503,19 +506,19 @@ class Service(SimpleService):
self.definitions[chart_name] = {
'options': [None, 'Replica set member (%s) current state' % host, 'state',
'replication and oplog', 'mongodb.replication_state', 'line'],
- 'lines': create_state_lines(REPLSET_STATES)}
+ 'lines': create_state_lines(REPL_SET_STATES)}
def _get_raw_data(self):
raw_data = dict()
- raw_data.update(self.get_serverstatus_() or dict())
- raw_data.update(self.get_dbstats_() or dict())
- raw_data.update(self.get_replsetgetstatus_() or dict())
- raw_data.update(self.get_getreplicationinfo_() or dict())
+ raw_data.update(self.get_server_status() or dict())
+ raw_data.update(self.get_db_stats() or dict())
+ raw_data.update(self.get_repl_set_get_status() or dict())
+ raw_data.update(self.get_get_replication_info() or dict())
return raw_data or None
- def get_serverstatus_(self):
+ def get_server_status(self):
raw_data = dict()
try:
raw_data['serverStatus'] = self.connection.admin.command('serverStatus')
@@ -524,7 +527,7 @@ class Service(SimpleService):
else:
return raw_data
- def get_dbstats_(self):
+ def get_db_stats(self):
if not self.databases:
return None
@@ -533,24 +536,22 @@ class Service(SimpleService):
try:
for dbase in self.databases:
raw_data['dbStats'][dbase] = self.connection[dbase].command('dbStats')
+ return raw_data
except PyMongoError:
return None
- else:
- return raw_data
- def get_replsetgetstatus_(self):
+ def get_repl_set_get_status(self):
if not self.do_replica:
return None
raw_data = dict()
try:
raw_data['replSetGetStatus'] = self.connection.admin.command('replSetGetStatus')
+ return raw_data
except PyMongoError:
return None
- else:
- return raw_data
- def get_getreplicationinfo_(self):
+ def get_get_replication_info(self):
if not (self.do_replica and 'local' in self.databases):
return None
@@ -561,10 +562,9 @@ class Service(SimpleService):
"$natural", ASCENDING).limit(1)[0]
raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort(
"$natural", DESCENDING).limit(1)[0]
+ return raw_data
except PyMongoError:
return None
- else:
- return raw_data
def _get_data(self):
"""
@@ -583,7 +583,7 @@ class Service(SimpleService):
utc_now = datetime.utcnow()
# serverStatus
- for metric, new_name, function in self.metrics_to_collect:
+ for metric, new_name, func in self.metrics_to_collect:
value = serverStatus
for key in metric.split('.'):
try:
@@ -592,7 +592,7 @@ class Service(SimpleService):
break
if not isinstance(value, dict) and key:
- to_netdata[new_name or key] = value if not function else function(value)
+ to_netdata[new_name or key] = value if not func else func(value)
to_netdata['nonmapped'] = to_netdata['virtual'] - serverStatus['mem'].get('mappedWithJournal',
to_netdata['mapped'])
@@ -620,13 +620,13 @@ class Service(SimpleService):
if not member.get('self'):
other_hosts.append(member)
# Replica set time diff between current time and time when last entry from the oplog was applied
- if member['optimeDate'] != unix_epoch:
+ if member.get('optimeDate', unix_epoch) != unix_epoch:
member_optimedate = member['name'] + '_optimedate'
to_netdata.update({member_optimedate: int(delta_calculation(delta=utc_now - member['optimeDate'],
multiplier=1000))})
# Replica set members state
member_state = member['name'] + '_state'
- for elem in REPLSET_STATES:
+ for elem in REPL_SET_STATES:
state = elem[0]
to_netdata.update({'_'.join([member_state, state]): 0})
to_netdata.update({'_'.join([member_state, str(member['state'])]): member['state']})
@@ -668,5 +668,4 @@ class Service(SimpleService):
def delta_calculation(delta, multiplier=1):
if hasattr(delta, 'total_seconds'):
return delta.total_seconds() * multiplier
- else:
- return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier
+ return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier
diff --git a/python.d/mysql.chart.py b/python.d/mysql.chart.py
index abf6bf71..cdabe971 100644
--- a/python.d/mysql.chart.py
+++ b/python.d/mysql.chart.py
@@ -117,7 +117,7 @@ GLOBAL_STATS = [
'Connection_errors_tcpwrap']
def slave_seconds(value):
- return value if value else -1
+ return value if value is not '' else -1
def slave_running(value):
return 1 if value == 'Yes' else -1
@@ -278,10 +278,10 @@ CHARTS = {
'innodb_rows': {
'options': [None, 'mysql InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
'lines': [
- ['Innodb_rows_inserted', 'read', 'incremental'],
- ['Innodb_rows_read', 'deleted', 'incremental', -1, 1],
- ['Innodb_rows_updated', 'inserted', 'incremental', 1, 1],
- ['Innodb_rows_deleted', 'updated', 'incremental', -1, 1],
+ ['Innodb_rows_inserted', 'inserted', 'incremental'],
+ ['Innodb_rows_read', 'read', 'incremental', 1, 1],
+ ['Innodb_rows_updated', 'updated', 'incremental', 1, 1],
+ ['Innodb_rows_deleted', 'deleted', 'incremental', -1, 1],
]},
'innodb_buffer_pool_pages': {
'options': [None, 'mysql InnoDB Buffer Pool Pages', 'pages', 'innodb', 'mysql.innodb_buffer_pool_pages', 'line'],
diff --git a/python.d/ovpn_status_log.chart.py b/python.d/ovpn_status_log.chart.py
index c5fca002..3a7e8200 100644
--- a/python.d/ovpn_status_log.chart.py
+++ b/python.d/ovpn_status_log.chart.py
@@ -2,8 +2,10 @@
# Description: openvpn status log netdata python.d module
# Author: l2isbad
+from re import compile as r_compile
+from collections import defaultdict
from base import SimpleService
-from re import compile, findall, search, subn
+
priority = 60000
retries = 60
update_every = 10
@@ -11,67 +13,101 @@ update_every = 10
ORDER = ['users', 'traffic']
CHARTS = {
'users': {
- 'options': [None, 'OpenVPN active users', 'active users', 'Users', 'openvpn_status.users', 'line'],
+ 'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'],
'lines': [
- ["users", None, "absolute"],
+ ['users', None, 'absolute'],
]},
'traffic': {
- 'options': [None, 'OpenVPN traffic', 'kilobit/s', 'Traffic', 'openvpn_status.traffic', 'area'],
+ 'options': [None, 'OpenVPN Traffic', 'KB/s', 'traffic', 'openvpn_status.traffic', 'area'],
'lines': [
- ["in", None, "incremental", 8, 1000], ["out", None, "incremental", 8, -1000]
+ ['bytes_in', 'in', 'incremental', 1, 1 << 10], ['bytes_out', 'out', 'incremental', 1, -1 << 10]
]},
}
+
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
self.log_path = self.configuration.get('log_path')
- self.regex_data_inter = compile(r'(?<=Since ).*?(?=.ROUTING)')
- self.regex_data_final = compile(r'\d{1,3}(?:\.\d{1,3}){3}[:0-9,. ]*')
- self.regex_users = compile(r'\d{1,3}(?:\.\d{1,3}){3}:\d+')
- self.regex_traffic = compile(r'(?<=(?:,| ))\d+(?=(?:,| ))')
+ self.regex = dict(tls=r_compile(r'\d{1,3}(?:\.\d{1,3}){3}(?::\d+)? (?P<bytes_in>\d+) (?P<bytes_out>\d+)'),
+ static_key=r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)'))
+ self.to_netdata = dict(bytes_in=0, bytes_out=0)
def check(self):
- if not self._get_raw_data():
- self.error('Make sure that the openvpn status log file exists and netdata has permission to read it')
+ if not (self.log_path and isinstance(self.log_path, str)):
+ self.error('\'log_path\' is not defined')
return False
- else:
- self.info('Plugin was started succesfully')
+
+ data = False
+ for method in (self._get_data_tls, self._get_data_static_key):
+ data = method()
+ if data:
+ self._get_data = method
+ self._data_from_check = data
+ break
+
+ if data:
return True
+ self.error('Make sure that the openvpn status log file exists and netdata has permission to read it')
+ return False
def _get_raw_data(self):
"""
Open log file
:return: str
"""
+
try:
- with open(self.log_path, 'rt') as log:
- result = log.read()
- except Exception:
+ with open(self.log_path) as log:
+ raw_data = log.readlines() or None
+ except OSError:
return None
else:
- return result
+ return raw_data
- def _get_data(self):
+ def _get_data_static_key(self):
"""
Parse openvpn-status log file.
- Current regex version is ok for status-version 1, 2 and 3. Both users and bytes in/out are collecting.
"""
raw_data = self._get_raw_data()
- try:
- data_inter = self.regex_data_inter.search(' '.join(raw_data.splitlines())).group()
- except AttributeError:
- data_inter = ''
+ if not raw_data:
+ return None
+
+ data = defaultdict(lambda: 0)
+
+ for row in raw_data:
+ match = self.regex['static_key'].search(row)
+ if match:
+ match = match.groupdict()
+ if match['direction'] == 'read':
+ data['bytes_in'] += int(match['bytes'])
+ else:
+ data['bytes_out'] += int(match['bytes'])
+
+ return data or None
+
+ def _get_data_tls(self):
+ """
+ Parse openvpn-status log file.
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
- data_final = ' '.join(self.regex_data_final.findall(data_inter))
- users = self.regex_users.subn('', data_final)[1]
- traffic = self.regex_traffic.findall(data_final)
+ data = defaultdict(lambda: 0)
+ for row in raw_data:
+ row = ' '.join(row.split(',')) if ',' in row else ' '.join(row.split())
+ match = self.regex['tls'].search(row)
+ if match:
+ match = match.groupdict()
+ data['users'] += 1
+ data['bytes_in'] += int(match['bytes_in'])
+ data['bytes_out'] += int(match['bytes_out'])
- bytes_in = sum([int(traffic[i]) for i in range(len(traffic)) if (i + 1) % 2 is 1])
- bytes_out = sum([int(traffic[i]) for i in range(len(traffic)) if (i + 1) % 2 is 0])
+ return data or None
- return {'users': users, 'in': bytes_in, 'out': bytes_out}
diff --git a/python.d/postgres.chart.py b/python.d/postgres.chart.py
index d359bb4f..ef710cb8 100644
--- a/python.d/postgres.chart.py
+++ b/python.d/postgres.chart.py
@@ -99,8 +99,8 @@ SELECT
sum(conflicts) AS conflicts,
pg_database_size(datname) AS size
FROM pg_stat_database
-WHERE NOT datname ~* '^template\d+'
-GROUP BY database_name;
+WHERE datname IN %(databases)s
+GROUP BY datname;
""",
BGWRITER="""
SELECT
@@ -146,7 +146,6 @@ SELECT current_setting('is_superuser') = 'on' AS is_superuser;
QUERY_STATS = {
QUERIES['DATABASE']: METRICS['DATABASE'],
QUERIES['BACKENDS']: METRICS['BACKENDS'],
- QUERIES['ARCHIVE']: METRICS['ARCHIVE'],
QUERIES['LOCKS']: METRICS['LOCKS']
}
@@ -242,6 +241,7 @@ class Service(SimpleService):
self.definitions = deepcopy(CHARTS)
self.table_stats = configuration.pop('table_stats', False)
self.index_stats = configuration.pop('index_stats', False)
+ self.database_poll = configuration.pop('database_poll', None)
self.configuration = configuration
self.connection = False
self.is_superuser = False
@@ -281,6 +281,9 @@ class Service(SimpleService):
is_superuser = check_if_superuser_(cursor, QUERIES['IF_SUPERUSER'])
cursor.close()
+ if (self.database_poll and isinstance(self.database_poll, str)):
+ self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()] or self.databases
+
self.locks_zeroed = populate_lock_types(self.databases)
self.add_additional_queries_(is_superuser)
self.create_dynamic_charts_()
@@ -296,6 +299,7 @@ class Service(SimpleService):
QUERY_STATS[QUERIES['TABLE_STATS']] = METRICS['TABLE_STATS']
if is_superuser:
QUERY_STATS[QUERIES['BGWRITER']] = METRICS['BGWRITER']
+ QUERY_STATS[QUERIES['ARCHIVE']] = METRICS['ARCHIVE']
def create_dynamic_charts_(self):
@@ -328,7 +332,7 @@ class Service(SimpleService):
return None
def query_stats_(self, cursor, query, metrics):
- cursor.execute(query)
+ cursor.execute(query, dict(databases=tuple(self.databases)))
for row in cursor:
for metric in metrics:
dimension_id = '_'.join([row['database_name'], metric]) if 'database_name' in row else metric
diff --git a/python.d/python_modules/base.py b/python.d/python_modules/base.py
index 859300ec..a643cc6a 100644
--- a/python.d/python_modules/base.py
+++ b/python.d/python_modules/base.py
@@ -20,23 +20,20 @@
import time
import os
import socket
-import select
import threading
-import msg
import ssl
from subprocess import Popen, PIPE
from sys import exc_info
-
+from glob import glob
+import re
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
-
try:
import urllib.request as urllib2
except ImportError:
import urllib2
-
try:
import MySQLdb
PYMYSQL = True
@@ -46,6 +43,7 @@ except ImportError:
PYMYSQL = True
except ImportError:
PYMYSQL = False
+import msg
try:
PATH = os.getenv('PATH').split(':')
@@ -175,14 +173,15 @@ class SimpleService(threading.Thread):
# it is important to do this in a loop
# sleep() is interruptable
while now < next:
- self.debug("sleeping for", str(next - now), "secs to reach frequency of", str(step), "secs, now:", str(now), " next:", str(next), " penalty:", str(penalty))
+ self.debug("sleeping for", str(next - now), "secs to reach frequency of",
+ str(step), "secs, now:", str(now), " next:", str(next), " penalty:", str(penalty))
time.sleep(next - now)
now = float(time.time())
# do the job
try:
status = self._run_once()
- except Exception as e:
+ except Exception:
status = False
if status:
@@ -202,10 +201,12 @@ class SimpleService(threading.Thread):
penalty = 600
self.retries_left = self.retries
- self.alert("failed to collect data for " + str(self.retries) + " times - increasing penalty to " + str(penalty) + " sec and trying again")
+ self.alert("failed to collect data for " + str(self.retries) +
+ " times - increasing penalty to " + str(penalty) + " sec and trying again")
else:
- self.error("failed to collect data - " + str(self.retries_left) + " retries left - penalty: " + str(penalty) + " sec")
+ self.error("failed to collect data - " + str(self.retries_left)
+ + " retries left - penalty: " + str(penalty) + " sec")
# --- CHART ---
@@ -460,11 +461,42 @@ class SimpleService(threading.Thread):
return next(('/'.join([p, binary]) for p in PATH
if os.path.isfile('/'.join([p, binary]))
and os.access('/'.join([p, binary]), os.X_OK)))
- else:
- return None
+ return None
except StopIteration:
return None
+ def _add_new_dimension(self, dimension_id, chart_name, dimension=None, algorithm='incremental',
+ multiplier=1, divisor=1, priority=65000):
+ """
+ :param dimension_id:
+ :param chart_name:
+ :param dimension:
+ :param algorithm:
+ :param multiplier:
+ :param divisor:
+ :param priority:
+ :return:
+ """
+ if not all([dimension_id not in self._dimensions,
+ chart_name in self.order,
+ chart_name in self.definitions]):
+ return
+ self._dimensions.append(dimension_id)
+ dimension_list = list(map(str, [dimension_id,
+ dimension if dimension else dimension_id,
+ algorithm,
+ multiplier,
+ divisor]))
+ self.definitions[chart_name]['lines'].append(dimension_list)
+ add_to_name = self.override_name or self.name
+ job_name = ('_'.join([self.__module__, re.sub('\s+', '_', add_to_name)])
+ if add_to_name != 'None' else self.__module__)
+ chart = 'CHART {0}.{1} '.format(job_name, chart_name)
+ options = '"" "{0}" {1} "{2}" {3} {4} '.format(*self.definitions[chart_name]['options'][1:6])
+ other = '{0} {1}\n'.format(priority, self.update_every)
+ new_dimension = "DIMENSION {0}\n".format(' '.join(dimension_list))
+ print(chart + options + other + new_dimension)
+
class UrlService(SimpleService):
def __init__(self, configuration=None, name=None):
@@ -473,47 +505,73 @@ class UrlService(SimpleService):
self.user = self.configuration.get('user')
self.password = self.configuration.get('pass')
self.ss_cert = self.configuration.get('ss_cert')
+ self.proxy = self.configuration.get('proxy')
- def __add_openers(self):
- def self_signed_cert(ss_cert):
- if ss_cert:
- try:
- ctx = ssl.create_default_context()
- ctx.check_hostname = False
- ctx.verify_mode = ssl.CERT_NONE
- return urllib2.build_opener(urllib2.HTTPSHandler(context=ctx))
- except AttributeError:
- return None
- else:
- return None
+ def __add_openers(self, user=None, password=None, ss_cert=None, proxy=None, url=None):
+ user = user or self.user
+ password = password or self.password
+ ss_cert = ss_cert or self.ss_cert
+ proxy = proxy or self.proxy
- self.opener = self_signed_cert(self.ss_cert) or urllib2.build_opener()
+ handlers = list()
- # HTTP Basic Auth
- if self.user and self.password:
- url_parse = urlparse(self.url)
+ # HTTP Basic Auth handler
+ if all([user, password, isinstance(user, str), isinstance(password, str)]):
+ url = url or self.url
+ url_parse = urlparse(url)
top_level_url = '://'.join([url_parse.scheme, url_parse.netloc])
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
- passman.add_password(None, top_level_url, self.user, self.password)
- self.opener.add_handler(urllib2.HTTPBasicAuthHandler(passman))
+ passman.add_password(None, top_level_url, user, password)
+ handlers.append(urllib2.HTTPBasicAuthHandler(passman))
self.debug("Enabling HTTP basic auth")
- def _get_raw_data(self, custom_url=None):
+ # HTTPS handler
+ # Self-signed certificate ignore
+ if ss_cert:
+ try:
+ ctx = ssl.create_default_context()
+ ctx.check_hostname = False
+ ctx.verify_mode = ssl.CERT_NONE
+ except AttributeError:
+ self.error('HTTPS self-signed certificate ignore not enabled')
+ else:
+ handlers.append(urllib2.HTTPSHandler(context=ctx))
+ self.debug("Enabling HTTP self-signed certificate ignore")
+
+ # PROXY handler
+ if proxy and isinstance(proxy, str) and not ss_cert:
+ handlers.append(urllib2.ProxyHandler(dict(http=proxy)))
+ self.debug("Enabling HTTP proxy handler (%s)" % proxy)
+
+ opener = urllib2.build_opener(*handlers)
+ return opener
+
+ def _build_opener(self, **kwargs):
+ try:
+ return self.__add_openers(**kwargs)
+ except TypeError as error:
+ self.error('build_opener() error:', str(error))
+ return None
+
+ def _get_raw_data(self, url=None, opener=None):
"""
Get raw data from http request
:return: str
"""
- raw_data = None
- f = None
+ data = None
try:
- f = self.opener.open(custom_url or self.url, timeout=self.update_every * 2)
- raw_data = f.read().decode('utf-8', 'ignore')
+ opener = opener or self.opener
+ data = opener.open(url or self.url, timeout=self.update_every * 2)
+ raw_data = data.read().decode('utf-8', 'ignore')
+ except urllib2.URLError as error:
+ self.error('Url: %s. Error: %s' % (url or self.url, str(error)))
+ return None
except Exception as error:
- self.error('Url: %s. Error: %s' %(custom_url or self.url, str(error)))
+ self.error(str(error))
return None
finally:
- if f is not None: f.close()
-
+ if data is not None:
+ data.close()
return raw_data or None
def check(self):
@@ -525,7 +583,7 @@ class UrlService(SimpleService):
self.error('URL is not defined or type is not <str>')
return False
- self.__add_openers()
+ self.opener = self.__add_openers()
try:
data = self._get_data()
@@ -781,57 +839,69 @@ class SocketService(SimpleService):
class LogService(SimpleService):
def __init__(self, configuration=None, name=None):
- self.log_path = ""
- self._last_position = 0
- # self._log_reader = None
SimpleService.__init__(self, configuration=configuration, name=name)
+ self.log_path = self.configuration.get('path')
+ self.__glob_path = self.log_path
+ self._last_position = 0
self.retries = 100000 # basically always retry
+ self.__re_find = dict(current=0, run=0, maximum=60)
def _get_raw_data(self):
"""
Get log lines since last poll
:return: list
"""
- lines = []
+ lines = list()
try:
- if os.path.getsize(self.log_path) < self._last_position:
+ if self.__re_find['current'] == self.__re_find['run']:
+ self._find_recent_log_file()
+ size = os.path.getsize(self.log_path)
+ if size == self._last_position:
+ self.__re_find['current'] += 1
+ return list() # return empty list if nothing has changed
+ elif size < self._last_position:
self._last_position = 0 # read from beginning if file has shrunk
- elif os.path.getsize(self.log_path) == self._last_position:
- self.debug("Log file hasn't changed. No new data.")
- return [] # return empty list if nothing has changed
- with open(self.log_path, "r") as fp:
+
+ with open(self.log_path) as fp:
fp.seek(self._last_position)
- for i, line in enumerate(fp):
+ for line in fp:
lines.append(line)
self._last_position = fp.tell()
- except Exception as e:
- self.error(str(e))
+ self.__re_find['current'] = 0
+ except (OSError, IOError) as error:
+ self.__re_find['current'] += 1
+ self.error(str(error))
- if len(lines) != 0:
- return lines
- else:
- self.error("No data collected.")
- return None
+ return lines or None
+
+ def _find_recent_log_file(self):
+ """
+ :return:
+ """
+ self.__re_find['run'] = self.__re_find['maximum']
+ self.__re_find['current'] = 0
+ self.__glob_path = self.__glob_path or self.log_path # workaround for modules w/o config files
+ path_list = glob(self.__glob_path)
+ if path_list:
+ self.log_path = max(path_list)
+ return True
+ return False
def check(self):
"""
Parse basic configuration and check if log file exists
:return: boolean
"""
- if self.name is not None or self.name != str(None):
- self.name = ""
- else:
- self.name = str(self.name)
- try:
- self.log_path = str(self.configuration['path'])
- except (KeyError, TypeError):
- self.info("No path to log specified. Using: '" + self.log_path + "'")
+ if not self.log_path:
+ self.error("No path to log specified")
+ return None
- if os.access(self.log_path, os.R_OK):
+ if all([self._find_recent_log_file(),
+ os.access(self.log_path, os.R_OK),
+ os.path.isfile(self.log_path)]):
return True
- else:
- self.error("Cannot access file: '" + self.log_path + "'")
- return False
+ self.error("Cannot access %s" % self.log_path)
+ return False
def create(self):
# set cursor at last byte of log file
@@ -847,7 +917,7 @@ class ExecutableService(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name)
self.command = None
- def _get_raw_data(self):
+ def _get_raw_data(self, stderr=False):
"""
Get raw data from executed command
:return: <list>
@@ -855,10 +925,11 @@ class ExecutableService(SimpleService):
try:
p = Popen(self.command, stdout=PIPE, stderr=PIPE)
except Exception as error:
- self.error("Executing command", self.command, "resulted in error:", str(error))
+ self.error("Executing command", " ".join(self.command), "resulted in error:", str(error))
return None
data = list()
- for line in p.stdout.readlines():
+ std = p.stderr if stderr else p.stdout
+ for line in std.readlines():
data.append(line.decode())
return data or None
diff --git a/python.d/rabbitmq.chart.py b/python.d/rabbitmq.chart.py
new file mode 100644
index 00000000..15a6a80f
--- /dev/null
+++ b/python.d/rabbitmq.chart.py
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+# Description: rabbitmq netdata python.d module
+# Author: l2isbad
+
+from base import UrlService
+from socket import gethostbyname, gaierror
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+from threading import Thread
+from collections import namedtuple
+from json import loads
+
+# default module values (can be overridden per job in `config`)
+update_every = 1
+priority = 60000
+retries = 60
+
+METHODS = namedtuple('METHODS', ['get_data_function', 'url', 'stats'])
+
+NODE_STATS = [('fd_used', None),
+ ('mem_used', None),
+ ('sockets_used', None),
+ ('proc_used', None),
+ ('disk_free', None)
+ ]
+OVERVIEW_STATS = [('object_totals.channels', None),
+ ('object_totals.consumers', None),
+ ('object_totals.connections', None),
+ ('object_totals.queues', None),
+ ('object_totals.exchanges', None),
+ ('queue_totals.messages_ready', None),
+ ('queue_totals.messages_unacknowledged', None),
+ ('message_stats.ack', None),
+ ('message_stats.redeliver', None),
+ ('message_stats.deliver', None),
+ ('message_stats.publish', None)
+ ]
+ORDER = ['queued_messages', 'message_rates', 'global_counts',
+ 'file_descriptors', 'socket_descriptors', 'erlang_processes', 'memory', 'disk_space']
+
+CHARTS = {
+ 'file_descriptors': {
+ 'options': [None, 'File Descriptors', 'descriptors', 'overview',
+ 'rabbitmq.file_descriptors', 'line'],
+ 'lines': [
+ ['fd_used', 'used', 'absolute']
+ ]},
+ 'memory': {
+ 'options': [None, 'Memory', 'MB', 'overview',
+ 'rabbitmq.memory', 'line'],
+ 'lines': [
+ ['mem_used', 'used', 'absolute', 1, 1024 << 10]
+ ]},
+ 'disk_space': {
+ 'options': [None, 'Disk Space', 'GB', 'overview',
+ 'rabbitmq.disk_space', 'line'],
+ 'lines': [
+ ['disk_free', 'free', 'absolute', 1, 1024 ** 3]
+ ]},
+ 'socket_descriptors': {
+ 'options': [None, 'Socket Descriptors', 'descriptors', 'overview',
+ 'rabbitmq.sockets', 'line'],
+ 'lines': [
+ ['sockets_used', 'used', 'absolute']
+ ]},
+ 'erlang_processes': {
+ 'options': [None, 'Erlang Processes', 'processes', 'overview',
+ 'rabbitmq.processes', 'line'],
+ 'lines': [
+ ['proc_used', 'used', 'absolute']
+ ]},
+ 'global_counts': {
+ 'options': [None, 'Global Counts', 'counts', 'overview',
+ 'rabbitmq.global_counts', 'line'],
+ 'lines': [
+ ['channels', None, 'absolute'],
+ ['consumers', None, 'absolute'],
+ ['connections', None, 'absolute'],
+ ['queues', None, 'absolute'],
+ ['exchanges', None, 'absolute']
+ ]},
+ 'queued_messages': {
+ 'options': [None, 'Queued Messages', 'messages', 'overview',
+ 'rabbitmq.queued_messages', 'stacked'],
+ 'lines': [
+ ['messages_ready', 'ready', 'absolute'],
+ ['messages_unacknowledged', 'unacknowledged', 'absolute']
+ ]},
+ 'message_rates': {
+ 'options': [None, 'Message Rates', 'messages/s', 'overview',
+ 'rabbitmq.message_rates', 'stacked'],
+ 'lines': [
+ ['ack', None, 'incremental'],
+ ['redeliver', None, 'incremental'],
+ ['deliver', None, 'incremental'],
+ ['publish', None, 'incremental']
+ ]}
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 15672)
+ self.scheme = self.configuration.get('scheme', 'http')
+
+ def check(self):
+ # We can't start if <host> AND <port> not specified
+ if not (self.host and self.port):
+ self.error('Host is not defined in the module configuration file')
+ return False
+
+ # Hostname -> ip address
+ try:
+ self.host = gethostbyname(self.host)
+ except gaierror as error:
+ self.error(str(error))
+ return False
+
+ # Add handlers (auth, self signed cert accept)
+ url = '%s://%s:%s/api' % (self.scheme, self.host, self.port)
+ self.opener = self._build_opener(url=url)
+ if not self.opener:
+ return False
+ # Add methods
+ api_node = url + '/nodes'
+ api_overview = url + '/overview'
+ self.methods = [METHODS(get_data_function=self._get_overview_stats, url=api_node, stats=NODE_STATS),
+ METHODS(get_data_function=self._get_overview_stats, url=api_overview, stats=OVERVIEW_STATS)]
+
+ result = self._get_data()
+ if not result:
+ self.error('_get_data() returned no data')
+ return False
+ self._data_from_check = result
+ return True
+
+ def _get_data(self):
+ threads = list()
+ queue = Queue()
+ result = dict()
+
+ for method in self.methods:
+ th = Thread(target=method.get_data_function, args=(queue, method.url, method.stats))
+ th.start()
+ threads.append(th)
+
+ for thread in threads:
+ thread.join()
+ result.update(queue.get())
+
+ return result or None
+
+ def _get_overview_stats(self, queue, url, stats):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data(url)
+
+ if not raw_data:
+ return queue.put(dict())
+ data = loads(raw_data)
+ data = data[0] if isinstance(data, list) else data
+
+ to_netdata = fetch_data_(raw_data=data, metrics_list=stats)
+ return queue.put(to_netdata)
+
+
+def fetch_data_(raw_data, metrics_list):
+ to_netdata = dict()
+ for metric, new_name in metrics_list:
+ value = raw_data
+ for key in metric.split('.'):
+ try:
+ value = value[key]
+ except (KeyError, TypeError):
+ break
+ if not isinstance(value, dict):
+ to_netdata[new_name or key] = value
+ return to_netdata
diff --git a/python.d/redis.chart.py b/python.d/redis.chart.py
index 61f4f6d6..4bc1d41f 100644
--- a/python.d/redis.chart.py
+++ b/python.d/redis.chart.py
@@ -100,13 +100,12 @@ class Service(SocketService):
:return: dict
"""
if self.passwd:
- info_request = self.request
self.request = "AUTH " + self.passwd + "\r\n"
raw = self._get_raw_data().strip()
if raw != "+OK":
self.error("invalid password")
return None
- self.request = info_request
+ self.request = "INFO\r\n"
response = self._get_raw_data()
if response is None:
# error has already been logged
@@ -155,7 +154,7 @@ class Service(SocketService):
:return: boolean
"""
length = len(data)
- supposed = data.split('\n')[0][1:]
+ supposed = data.split('\n')[0][1:-1]
offset = len(supposed) + 4 # 1 dollar sing, 1 new line character + 1 ending sequence '\r\n'
if not supposed.isdigit():
return True
diff --git a/python.d/samba.chart.py b/python.d/samba.chart.py
new file mode 100644
index 00000000..767c9746
--- /dev/null
+++ b/python.d/samba.chart.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+# Description: samba netdata python.d module
+# Author: Christopher Cox <chris_cox@endlessnow.com>
+#
+# The netdata user needs to be able to be able to sudo the smbstatus program
+# without password:
+# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
+#
+# This makes calls to smbstatus -P
+#
+# This just looks at a couple of values out of syscall, and some from smb2.
+#
+# The Lesser Ops chart is merely a display of current counter values. They
+# didn't seem to change much to me. However, if you notice something changing
+# a lot there, bring one or more out into its own chart and make it incremental
+# (like find and notify... good examples).
+
+from base import ExecutableService
+import re
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+priority = 60000
+retries = 60
+
+ORDER = ['syscall_rw','smb2_rw','smb2_create_close','smb2_info','smb2_find','smb2_notify','smb2_sm_count']
+
+CHARTS = {
+ 'syscall_rw': {
+ 'lines': [
+ ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
+ ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
+ ],
+ 'options': [None, 'R/Ws', 'kilobytes/s', 'syscall', 'syscall.rw', 'area']
+ },
+ 'smb2_rw': {
+ 'lines': [
+ ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
+ ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
+ ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024],
+ ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024]
+ ],
+ 'options': [None, 'R/Ws', 'kilobytes/s', 'smb2', 'smb2.rw', 'area']
+ },
+ 'smb2_create_close': {
+ 'lines': [
+ ['smb2_create_count', 'create', 'incremental', 1, 1],
+ ['smb2_close_count', 'close', 'incremental', -1, 1]
+ ],
+ 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line']
+ },
+ 'smb2_info': {
+ 'lines': [
+ ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1],
+ ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1]
+ ],
+ 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line']
+ },
+ 'smb2_find': {
+ 'lines': [
+ ['smb2_find_count', 'find', 'incremental', 1, 1]
+ ],
+ 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line']
+ },
+ 'smb2_notify': {
+ 'lines': [
+ ['smb2_notify_count', 'notify', 'incremental', 1, 1]
+ ],
+ 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line']
+ },
+ 'smb2_sm_count': {
+ 'lines': [
+ ['smb2_tcon_count', 'tcon', 'absolute', 1, 1],
+ ['smb2_negprot_count', 'negprot', 'absolute', 1, 1],
+ ['smb2_tdis_count', 'tdis', 'absolute', 1, 1],
+ ['smb2_cancel_count', 'cancel', 'absolute', 1, 1],
+ ['smb2_logoff_count', 'logoff', 'absolute', 1, 1],
+ ['smb2_flush_count', 'flush', 'absolute', 1, 1],
+ ['smb2_lock_count', 'lock', 'absolute', 1, 1],
+ ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1],
+ ['smb2_break_count', 'break', 'absolute', 1, 1],
+ ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1]
+ ],
+ 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked']
+ }
+ }
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)')
+
+ def check(self):
+ sudo_binary, smbstatus_binary = self.find_binary('sudo'), self.find_binary('smbstatus')
+
+ if not (sudo_binary and smbstatus_binary):
+ self.error('Can\'t locate \'sudo\' or \'smbstatus\' binary')
+ return False
+
+ self.command = [sudo_binary, '-v']
+ err = self._get_raw_data(stderr=True)
+ if err:
+ self.error(''.join(err))
+ return False
+
+ self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P'])
+
+ return ExecutableService.check(self)
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ parsed = self.rgx_smb2.findall(' '.join(raw_data))
+
+ return dict(parsed) or None
diff --git a/python.d/smartd_log.chart.py b/python.d/smartd_log.chart.py
index e8037237..4039c153 100644
--- a/python.d/smartd_log.chart.py
+++ b/python.d/smartd_log.chart.py
@@ -2,7 +2,7 @@
# Description: smart netdata python.d module
# Author: l2isbad, vorph1
-from re import compile
+from re import compile as r_compile
from os import listdir, access, R_OK
from os.path import isfile, join, getsize, basename, isdir
try:
@@ -101,7 +101,7 @@ NAMED_DISKS = namedtuple('disks', ['name', 'size', 'number'])
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
- self.regex = compile(r'(\d+);(\d+);(\d+)')
+ self.regex = r_compile(r'(\d+);(\d+);(\d+)')
self.log_path = self.configuration.get('log_path', '/var/log/smartd')
self.raw_values = self.configuration.get('raw_values')
self.attr = self.configuration.get('smart_attributes', [])
@@ -197,18 +197,19 @@ class Service(SimpleService):
result.append(['_'.join([name, attrid]), name[:name.index('.')], 'absolute'])
return result
- # Add additional smart attributes to the ORDER. If something goes wrong we don't care.
+ # Use configured attributes, if present. If something goes wrong we don't care.
+ order = ORDER
try:
- ORDER.extend(list(set(self.attr.split()) & SMART_ATTR.keys() - set(ORDER)))
+ order = [attr for attr in self.attr.split() if attr in SMART_ATTR.keys()] or ORDER
except Exception:
pass
- self.order = [''.join(['attrid', i]) for i in ORDER]
+ self.order = [''.join(['attrid', i]) for i in order]
self.definitions = dict()
units = 'raw' if self.raw_values else 'normalized'
for k, v in dict([(k, v) for k, v in SMART_ATTR.items() if k in ORDER]).items():
self.definitions.update({''.join(['attrid', k]): {
- 'options': [None, v, units, v, 'smartd.attrid' + k, 'line'],
+ 'options': [None, v, units, v.lower(), 'smartd.attrid' + k, 'line'],
'lines': create_lines(k)}})
def find_disks_in_log_path(log_path):
diff --git a/python.d/web_log.chart.py b/python.d/web_log.chart.py
index cbc8cd23..564c9f1d 100644
--- a/python.d/web_log.chart.py
+++ b/python.d/web_log.chart.py
@@ -1,37 +1,51 @@
# -*- coding: utf-8 -*-
# Description: web log netdata python.d module
# Author: l2isbad
-
-from base import LogService
import re
import bisect
from os import access, R_OK
from os.path import getsize
-from collections import namedtuple
+from collections import namedtuple, defaultdict
from copy import deepcopy
+try:
+ from itertools import filterfalse
+except ImportError:
+ from itertools import ifilterfalse as filterfalse
+from base import LogService
+import msg
+
priority = 60000
retries = 60
-ORDER = ['response_statuses', 'response_codes', 'bandwidth', 'response_time', 'requests_per_url', 'http_method',
- 'http_version', 'requests_per_ipproto', 'clients', 'clients_all']
-CHARTS = {
+ORDER_APACHE_CACHE = ['apache_cache']
+
+ORDER_WEB = ['response_statuses', 'response_codes', 'bandwidth', 'response_time', 'response_time_upstream',
+ 'requests_per_url', 'requests_per_user_defined', 'http_method', 'http_version',
+ 'requests_per_ipproto', 'clients', 'clients_all']
+
+ORDER_SQUID = ['squid_response_statuses', 'squid_response_codes', 'squid_detailed_response_codes',
+ 'squid_method', 'squid_mime_type', 'squid_hier_code', 'squid_transport_methods',
+ 'squid_transport_errors', 'squid_code', 'squid_handling_opts', 'squid_object_types',
+ 'squid_cache_events', 'squid_bytes', 'squid_duration', 'squid_clients', 'squid_clients_all']
+
+CHARTS_WEB = {
'response_codes': {
'options': [None, 'Response Codes', 'requests/s', 'responses', 'web_log.response_codes', 'stacked'],
'lines': [
- ['2xx', '2xx', 'incremental'],
- ['5xx', '5xx', 'incremental'],
- ['3xx', '3xx', 'incremental'],
- ['4xx', '4xx', 'incremental'],
- ['1xx', '1xx', 'incremental'],
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
['0xx', 'other', 'incremental'],
- ['unmatched', 'unmatched', 'incremental']
+ ['unmatched', None, 'incremental']
]},
'bandwidth': {
- 'options': [None, 'Bandwidth', 'KB/s', 'bandwidth', 'web_log.bandwidth', 'area'],
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'web_log.bandwidth', 'area'],
'lines': [
- ['resp_length', 'received', 'incremental', 1, 1024],
- ['bytes_sent', 'sent', 'incremental', -1, 1024]
+ ['resp_length', 'received', 'incremental', 8, 1000],
+ ['bytes_sent', 'sent', 'incremental', -8, 1000]
]},
'response_time': {
'options': [None, 'Processing Time', 'milliseconds', 'timings', 'web_log.response_time', 'area'],
@@ -40,6 +54,14 @@ CHARTS = {
['resp_time_max', 'max', 'incremental', 1, 1000],
['resp_time_avg', 'avg', 'incremental', 1, 1000]
]},
+ 'response_time_upstream': {
+ 'options': [None, 'Processing Time Upstream', 'milliseconds', 'timings',
+ 'web_log.response_time_upstream', 'area'],
+ 'lines': [
+ ['resp_time_upstream_min', 'min', 'incremental', 1, 1000],
+ ['resp_time_upstream_max', 'max', 'incremental', 1, 1000],
+ ['resp_time_upstream_avg', 'avg', 'incremental', 1, 1000]
+ ]},
'clients': {
'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'],
'lines': [
@@ -77,36 +99,160 @@ CHARTS = {
['redirects', 'redirect', 'incremental', 1, 1],
['bad_requests', 'bad', 'incremental', 1, 1],
['other_requests', 'other', 'incremental', 1, 1]
+ ]},
+ 'requests_per_url': {
+ 'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url',
+ 'stacked'],
+ 'lines': [
+ ['url_pattern_other', 'other', 'incremental', 1, 1]
+ ]},
+ 'requests_per_user_defined': {
+ 'options': [None, 'Requests Per User Defined Pattern', 'requests/s', 'user defined',
+ 'web_log.requests_per_user_defined', 'stacked'],
+ 'lines': [
+ ['user_pattern_other', 'other', 'incremental', 1, 1]
]}
}
-NAMED_URL_PATTERN = namedtuple('URL_PATTERN', ['description', 'pattern'])
+CHARTS_APACHE_CACHE = {
+ 'apache_cache': {
+ 'options': [None, 'Apache Cached Responses', 'percent cached', 'cached', 'web_log.apache_cache_cache',
+ 'stacked'],
+ 'lines': [
+ ["hit", 'cache', "percentage-of-absolute-row"],
+ ["miss", None, "percentage-of-absolute-row"],
+ ["other", None, "percentage-of-absolute-row"]
+ ]}
+}
+
+CHARTS_SQUID = {
+ 'squid_duration': {
+ 'options': [None, 'Elapsed Time The Transaction Busied The Cache',
+ 'milliseconds', 'squid_timings', 'web_log.squid_duration', 'area'],
+ 'lines': [
+ ['duration_min', 'min', 'incremental', 1, 1000],
+ ['duration_max', 'max', 'incremental', 1, 1000],
+ ['duration_avg', 'avg', 'incremental', 1, 1000]
+ ]},
+ 'squid_bytes': {
+ 'options': [None, 'Amount Of Data Delivered To The Clients',
+ 'kilobits/s', 'squid_bandwidth', 'web_log.squid_bytes', 'area'],
+ 'lines': [
+ ['bytes', 'sent', 'incremental', 8, 1000]
+ ]},
+ 'squid_response_statuses': {
+ 'options': [None, 'Response Statuses', 'responses/s', 'squid_responses', 'web_log.squid_response_statuses',
+ 'stacked'],
+ 'lines': [
+ ['successful_requests', 'success', 'incremental', 1, 1],
+ ['server_errors', 'error', 'incremental', 1, 1],
+ ['redirects', 'redirect', 'incremental', 1, 1],
+ ['bad_requests', 'bad', 'incremental', 1, 1],
+ ['other_requests', 'other', 'incremental', 1, 1]
+ ]},
+ 'squid_response_codes': {
+ 'options': [None, 'Response Codes', 'responses/s', 'squid_responses',
+ 'web_log.squid_response_codes', 'stacked'],
+ 'lines': [
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
+ ['0xx', None, 'incremental'],
+ ['other', None, 'incremental'],
+ ['unmatched', None, 'incremental']
+ ]},
+ 'squid_code': {
+ 'options': [None, 'Responses Per Cache Result Of The Request',
+ 'requests/s', 'squid_squid_cache', 'web_log.squid_code', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_detailed_response_codes': {
+ 'options': [None, 'Detailed Response Codes',
+ 'responses/s', 'squid_responses', 'web_log.squid_detailed_response_codes', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_hier_code': {
+ 'options': [None, 'Responses Per Hierarchy Code',
+ 'requests/s', 'squid_hierarchy', 'web_log.squid_hier_code', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_method': {
+ 'options': [None, 'Requests Per Method',
+ 'requests/s', 'squid_requests', 'web_log.squid_method', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_mime_type': {
+ 'options': [None, 'Requests Per MIME Type',
+ 'requests/s', 'squid_requests', 'web_log.squid_mime_type', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_clients': {
+ 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'squid_clients',
+ 'web_log.squid_clients', 'stacked'],
+ 'lines': [
+ ['unique_ipv4', 'ipv4', 'incremental'],
+ ['unique_ipv6', 'ipv6', 'incremental']
+ ]},
+ 'squid_clients_all': {
+ 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'squid_clients',
+ 'web_log.squid_clients_all', 'stacked'],
+ 'lines': [
+ ['unique_tot_ipv4', 'ipv4', 'absolute'],
+ ['unique_tot_ipv6', 'ipv6', 'absolute']
+ ]},
+ 'squid_transport_methods': {
+ 'options': [None, 'Transport Methods', 'requests/s', 'squid_squid_transport',
+ 'web_log.squid_transport_methods', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_transport_errors': {
+ 'options': [None, 'Transport Errors', 'requests/s', 'squid_squid_transport',
+ 'web_log.squid_transport_errors', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_handling_opts': {
+ 'options': [None, 'Handling Opts', 'requests/s', 'squid_squid_cache',
+ 'web_log.squid_handling_opts', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_object_types': {
+ 'options': [None, 'Object Types', 'objects/s', 'squid_squid_cache',
+ 'web_log.squid_object_types', 'stacked'],
+ 'lines': [
+ ]},
+ 'squid_cache_events': {
+ 'options': [None, 'Cache Events', 'events/s', 'squid_squid_cache',
+ 'web_log.squid_cache_events', 'stacked'],
+ 'lines': [
+ ]}
+}
+
+NAMED_PATTERN = namedtuple('PATTERN', ['description', 'func'])
DET_RESP_AGGR = ['', '_1xx', '_2xx', '_3xx', '_4xx', '_5xx', '_Other']
+SQUID_CODES = dict(TCP='squid_transport_methods', UDP='squid_transport_methods', NONE='squid_transport_methods',
+ CLIENT='squid_handling_opts', IMS='squid_handling_opts', ASYNC='squid_handling_opts',
+ SWAPFAIL='squid_handling_opts', REFRESH='squid_handling_opts', SHARED='squid_handling_opts',
+ REPLY='squid_handling_opts', NEGATIVE='squid_object_types', STALE='squid_object_types',
+ OFFLINE='squid_object_types', INVALID='squid_object_types', FAIL='squid_object_types',
+ MODIFIED='squid_object_types', UNMODIFIED='squid_object_types', REDIRECT='squid_object_types',
+ HIT='squid_cache_events', MEM='squid_cache_events', MISS='squid_cache_events',
+ DENIED='squid_cache_events', NOFETCH='squid_cache_events', TUNNEL='squid_cache_events',
+ ABORTED='squid_transport_errors', TIMEOUT='squid_transport_errors')
+
class Service(LogService):
def __init__(self, configuration=None, name=None):
"""
:param configuration:
:param name:
- # self._get_data = None # will be assigned in 'check' method.
- # self.order = None # will be assigned in 'create_*_method' method.
- # self.definitions = None # will be assigned in 'create_*_method' method.
"""
LogService.__init__(self, configuration=configuration, name=name)
- # Variables from module configuration file
- self.log_type = self.configuration.get('type', 'web_access')
+ self.log_type = self.configuration.get('type', 'web')
self.log_path = self.configuration.get('path')
- self.url_pattern = self.configuration.get('categories') # dict
- self.custom_log_format = self.configuration.get('custom_log_format') # dict
- # Instance variables
- self.regex = None # will be assigned in 'find_regex' or 'find_regex_custom' method
- self.data = {'bytes_sent': 0, 'resp_length': 0, 'resp_time_min': 0, 'resp_time_max': 0,
- 'resp_time_avg': 0, 'unique_cur_ipv4': 0, 'unique_cur_ipv6': 0, '2xx': 0,
- '5xx': 0, '3xx': 0, '4xx': 0, '1xx': 0, '0xx': 0, 'unmatched': 0, 'req_ipv4': 0,
- 'req_ipv6': 0, 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0,
- 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0, 'GET': 0}
def check(self):
"""
@@ -117,11 +263,18 @@ class Service(LogService):
3. "log_path' must not be empty. We need at least 1 line to find appropriate pattern to parse
4. other checks depends on log "type"
"""
+
+ log_types = dict(web=Web, apache_cache=ApacheCache, squid=Squid)
+
+ if self.log_type not in log_types:
+ self.error('bad log type (%s). Supported types: %s' % (self.log_type, log_types.keys()))
+ return False
+
if not self.log_path:
self.error('log path is not specified')
return False
- if not access(self.log_path, R_OK):
+ if not (self._find_recent_log_file() and access(self.log_path, R_OK)):
self.error('%s not readable or not exist' % self.log_path)
return False
@@ -129,357 +282,237 @@ class Service(LogService):
self.error('%s is empty' % self.log_path)
return False
+ self.configuration['update_every'] = self.update_every
+ self.configuration['name'] = self.name
+ self.configuration['override_name'] = self.override_name
+ self.configuration['_dimensions'] = self._dimensions
+ self.configuration['path'] = self.log_path
+
+ cls = log_types[self.log_type]
+ self.Job = cls(configuration=self.configuration)
+ if self.Job.check():
+ self.order = self.Job.order
+ self.definitions = self.Job.definitions
+ self.info('Current log file: %s' % self.log_path)
+ return True
+ return False
+
+ def _get_data(self):
+ return self.Job.get_data(self._get_raw_data())
+
+
+class Mixin:
+ def filter_data(self, raw_data):
+ """
+ :param raw_data: list
+ :return:
+ """
+ if not self.pre_filter:
+ return raw_data
+ filtered = raw_data
+ for elem in self.pre_filter:
+ if elem.description == 'filter_include':
+ filtered = filter(elem.func, filtered)
+ elif elem.description == 'filter_exclude':
+ filtered = filterfalse(elem.func, filtered)
+ return filtered
+
+ def add_new_dimension(self, dimension_id, chart_key, dimension=None,
+ algorithm='incremental', multiplier=1, divisor=1):
+ """
+ :param dimension:
+ :param chart_key:
+ :param dimension_id:
+ :param algorithm:
+ :param multiplier:
+ :param divisor:
+ :return:
+ """
+
+ self.data[dimension_id] = 0
+ # SET method check if dim in _dimensions
+ self.conf['_dimensions'].append(dimension_id)
+ # UPDATE method do SET only if dim in definitions
+ dimension_list = list(map(str, [dimension_id,
+ dimension if dimension else dimension_id,
+ algorithm,
+ multiplier,
+ divisor]))
+ self.definitions[chart_key]['lines'].append(dimension_list)
+ job_name = find_job_name(self.conf['override_name'], self.conf['name'])
+ opts = self.definitions[chart_key]['options']
+ chart = 'CHART %s.%s "" "%s" %s "%s" %s %s 60000 %s\n' % (job_name, chart_key,
+ opts[1], opts[2], opts[3],
+ opts[4], opts[5], self.conf['update_every'])
+ print(chart + "DIMENSION %s\n" % ' '.join(dimension_list))
+
+ def get_last_line(self):
+ """
+ Reads last line from the log file
+ :return: str:
+ """
# Read last line (or first if there is only one line)
- with open(self.log_path, 'rb') as logs:
+ with open(self.conf['path'], 'rb') as logs:
logs.seek(-2, 2)
while logs.read(1) != b'\n':
logs.seek(-2, 1)
if logs.tell() == 0:
break
last_line = logs.readline()
-
try:
- last_line = last_line.decode()
+ return last_line.decode()
except UnicodeDecodeError:
try:
- last_line = last_line.decode(encoding='utf-8')
+ return last_line.decode(encoding='utf-8')
except (TypeError, UnicodeDecodeError) as error:
- self.error(str(error))
+ msg.error('web_log', str(error))
return False
- if self.log_type == 'web_access':
- self.unique_all_time = list() # sorted list of unique IPs
- self.detailed_response_codes = self.configuration.get('detailed_response_codes', True)
- self.detailed_response_aggregate = self.configuration.get('detailed_response_aggregate', True)
- self.all_time = self.configuration.get('all_time', True)
+ @staticmethod
+ def error(*params):
+ msg.error('web_log', ' '.join(map(str, params)))
- # Custom_log_format or predefined log format.
- if self.custom_log_format:
- match_dict, error = self.find_regex_custom(last_line)
- else:
- match_dict, error = self.find_regex(last_line)
+ @staticmethod
+ def info(*params):
+ msg.info('web_log', ' '.join(map(str, params)))
- # "match_dict" is None if there are any problems
- if match_dict is None:
- self.error(str(error))
- return False
- # self.url_pattern check
- if self.url_pattern:
- self.url_pattern = check_req_per_url_pattern('rpu', self.url_pattern)
+class Web(Mixin):
+ def __init__(self, configuration):
+ self.conf = configuration
+ self.pre_filter = check_patterns('filter', self.conf.get('filter'))
+ self.storage = dict()
+ self.data = {'bytes_sent': 0, 'resp_length': 0, 'resp_time_min': 0, 'resp_time_max': 0,
+ 'resp_time_avg': 0, 'resp_time_upstream_min': 0, 'resp_time_upstream_max': 0,
+ 'resp_time_upstream_avg': 0, 'unique_cur_ipv4': 0, 'unique_cur_ipv6': 0, '2xx': 0,
+ '5xx': 0, '3xx': 0, '4xx': 0, '1xx': 0, '0xx': 0, 'unmatched': 0, 'req_ipv4': 0,
+ 'req_ipv6': 0, 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0,
+ 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0, 'GET': 0}
- self.create_access_charts(match_dict) # Create charts
- self._get_data = self._get_access_data # _get_data assignment
- else:
- self.error('Not implemented')
+ def check(self):
+ last_line = self.get_last_line()
+ if not last_line:
return False
-
- # Double check
- if not self.regex:
- self.error('That can not happen, but it happened. "regex" is None')
-
- self.info('Collected data: %s' % list(match_dict.keys()))
- return True
-
- def find_regex_custom(self, last_line):
- """
- :param last_line: str: literally last line from log file
- :return: tuple where:
- [0]: dict or None: match_dict or None
- [1]: str: error description
-
- We are here only if "custom_log_format" is in logs. We need to make sure:
- 1. "custom_log_format" is a dict
- 2. "pattern" in "custom_log_format" and pattern is <str> instance
- 3. if "time_multiplier" is in "custom_log_format" it must be <int> instance
-
- If all parameters is ok we need to make sure:
- 1. Pattern search is success
- 2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict")
-
- If pattern search is success we need to make sure:
- 1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict"
-
- If this is True we need to make sure:
- 1. All mandatory key values from "match_dict" have the correct format
- ("code" is integer, "method" is uppercase word, etc)
-
- If non mandatory keys in "match_dict" we need to make sure:
- 1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format
- ("resp_length" is integer or "-", "resp_time" is integer or float)
-
- """
- if not hasattr(self.custom_log_format, 'keys'):
- return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>')
-
- pattern = self.custom_log_format.get('pattern')
- if not (pattern and isinstance(pattern, str)):
- return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>')
-
- resp_time_func = self.custom_log_format.get('time_multiplier') or 0
-
- if not isinstance(resp_time_func, int):
- return find_regex_return(msg='Custom log: "time_multiplier" is not an integer')
-
- try:
- regex = re.compile(pattern)
- except re.error as error:
- return find_regex_return(msg='Pattern compile error: %s' % str(error))
-
- match = regex.search(last_line)
- if match:
- match_dict = match.groupdict() or None
+ # Custom_log_format or predefined log format.
+ if self.conf.get('custom_log_format'):
+ match_dict, error = self.find_regex_custom(last_line)
else:
- return find_regex_return(msg='Custom log: pattern search FAILED')
+ match_dict, error = self.find_regex(last_line)
+ # "match_dict" is None if there are any problems
if match_dict is None:
- find_regex_return(msg='Custom log: search OK but contains no named subgroups'
- ' (you need to use ?P<subgroup_name>)')
- else:
- mandatory_dict = {'address': r'[\da-f.:]+',
- 'code': r'[1-9]\d{2}',
- 'method': r'[A-Z]+',
- 'bytes_sent': r'\d+|-'}
- optional_dict = {'resp_length': r'\d+',
- 'resp_time': r'[\d.]+',
- 'http_version': r'\d\.\d'}
-
- mandatory_values = set(mandatory_dict) - set(match_dict)
- if mandatory_values:
- return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing'
- % list(mandatory_values))
- else:
- for key in mandatory_dict:
- if not re.search(mandatory_dict[key], match_dict[key]):
- return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
- % (key, match_dict[key]))
-
- optional_values = set(optional_dict) & set(match_dict)
- for key in optional_values:
- if not re.search(optional_dict[key], match_dict[key]):
- return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
- % (key, match_dict[key]))
-
- dot_in_time = '.' in match_dict.get('resp_time', '')
- if dot_in_time:
- self.resp_time_func = lambda time: time * (resp_time_func or 1000000)
- else:
- self.resp_time_func = lambda time: time * (resp_time_func or 1)
-
- self.regex = regex
- return find_regex_return(match_dict=match_dict)
-
- def find_regex(self, last_line):
- """
- :param last_line: str: literally last line from log file
- :return: tuple where:
- [0]: dict or None: match_dict or None
- [1]: str: error description
- We need to find appropriate pattern for current log file
- All logic is do a regex search through the string for all predefined patterns
- until we find something or fail.
- """
- # REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code
- # 5. Bytes sent 6. Response length 7. Response process time
- acs_default = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<method>[A-Z]+)'
- r' (?P<url>[^ ]+)'
- r' [A-Z]+/(?P<http_version>\d\.\d)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+|-)')
-
- acs_apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<method>[A-Z]+)'
- r' (?P<url>[^ ]+)'
- r' [A-Z]+/(?P<http_version>\d\.\d)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+|-)'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+) ')
-
- acs_apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<method>[A-Z]+)'
- r' (?P<url>[^ ]+)'
- r' [A-Z]+/(?P<http_version>\d\.\d)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+|-)'
- r' .*?'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+)'
- r'(?: |$)')
-
- acs_nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<method>[A-Z]+)'
- r' (?P<url>[^ ]+)'
- r' [A-Z]+/(?P<http_version>\d\.\d)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+)'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+\.\d+) ')
-
- acs_nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<method>[A-Z]+)'
- r' (?P<url>[^ ]+)'
- r' [A-Z]+/(?P<http_version>\d\.\d)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+)'
- r' .*?'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+\.\d+)')
-
- def func_usec(time):
- return time
-
- def func_sec(time):
- return time * 1000000
-
- r_regex = [acs_apache_ext_insert, acs_apache_ext_append, acs_nginx_ext_insert,
- acs_nginx_ext_append, acs_default]
- r_function = [func_usec, func_usec, func_sec, func_sec, func_usec]
- regex_function = zip(r_regex, r_function)
-
- match_dict = dict()
- for regex, function in regex_function:
- match = regex.search(last_line)
- if match:
- self.regex = regex
- self.resp_time_func = function
- match_dict = match.groupdict()
- break
+ self.error(str(error))
+ return False
+ self.storage['unique_all_time'] = list()
+ self.storage['url_pattern'] = check_patterns('url_pattern', self.conf.get('categories'))
+ self.storage['user_pattern'] = check_patterns('user_pattern', self.conf.get('user_defined'))
- return find_regex_return(match_dict=match_dict or None,
- msg='Unknown log format. You need to use "custom_log_format" feature.')
+ self.create_web_charts(match_dict) # Create charts
+ self.info('Collected data: %s' % list(match_dict.keys()))
+ return True
- def create_access_charts(self, match_dict):
+ def create_web_charts(self, match_dict):
"""
:param match_dict: dict: regex.search.groupdict(). Ex. {'address': '127.0.0.1', 'code': '200', 'method': 'GET'}
:return:
- Create additional charts depending on the 'match_dict' keys and configuration file options
- 1. 'time_response' chart is removed if there is no 'resp_time' in match_dict.
- 2. Other stuff is just remove/add chart depending on yes/no in conf
+ Create/remove additional charts depending on the 'match_dict' keys and configuration file options
"""
+ self.order = ORDER_WEB[:]
+ self.definitions = deepcopy(CHARTS_WEB)
- def find_job_name(override_name, name):
- """
- :param override_name: str: 'name' var from configuration file
- :param name: str: 'job_name' from configuration file
- :return: str: new job name
- We need this for dynamic charts. Actually same logic as in python.d.plugin.
- """
- add_to_name = override_name or name
- if add_to_name:
- return '_'.join(['web_log', re.sub('\s+', '_', add_to_name)])
- else:
- return 'web_log'
-
- self.order = ORDER[:]
- self.definitions = deepcopy(CHARTS)
-
- job_name = find_job_name(self.override_name, self.name)
-
- self.http_method_chart = 'CHART %s.http_method' \
- ' "" "Requests Per HTTP Method" requests/s "http methods"' \
- ' web_log.http_method stacked 11 %s\n' \
- 'DIMENSION GET GET incremental\n' % (job_name, self.update_every)
- self.http_version_chart = 'CHART %s.http_version' \
- ' "" "Requests Per HTTP Version" requests/s "http versions"' \
- ' web_log.http_version stacked 12 %s\n' % (job_name, self.update_every)
-
- # Remove 'request_time' chart from ORDER if resp_time not in match_dict
if 'resp_time' not in match_dict:
self.order.remove('response_time')
- # Remove 'clients_all' chart from ORDER if specified in the configuration
- if not self.all_time:
+ if 'resp_time_upstream' not in match_dict:
+ self.order.remove('response_time_upstream')
+
+ if not self.conf.get('all_time', True):
self.order.remove('clients_all')
+
# Add 'detailed_response_codes' chart if specified in the configuration
- if self.detailed_response_codes:
- self.detailed_chart = list()
- for prio, add_to_dim in enumerate(DET_RESP_AGGR):
- self.detailed_chart.append('CHART %s.detailed_response_codes%s ""'
- ' "Detailed Response Codes %s" requests/s responses'
- ' web_log.detailed_response_codes%s stacked %s %s\n'
- % (job_name, add_to_dim, add_to_dim[1:], add_to_dim,
- str(prio), self.update_every))
-
- codes = DET_RESP_AGGR[:1] if self.detailed_response_aggregate else DET_RESP_AGGR[1:]
+ if self.conf.get('detailed_response_codes', True):
+ codes = DET_RESP_AGGR[:1] if self.conf.get('detailed_response_aggregate', True) else DET_RESP_AGGR[1:]
for code in codes:
self.order.append('detailed_response_codes%s' % code)
- self.definitions['detailed_response_codes%s' % code] = {'options':
- [None,
- 'Detailed Response Codes %s' % code[1:],
- 'requests/s',
- 'responses',
- 'web_log.detailed_response_codes%s' % code,
- 'stacked'],
- 'lines': []}
+ self.definitions['detailed_response_codes%s' % code] \
+ = {'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses',
+ 'web_log.detailed_response_codes%s' % code, 'stacked'],
+ 'lines': []}
# Add 'requests_per_url' chart if specified in the configuration
- if self.url_pattern:
- self.definitions['requests_per_url'] = {'options': [None, 'Requests Per Url', 'requests/s',
- 'urls', 'web_log.requests_per_url', 'stacked'],
- 'lines': [['rpu_other', 'other', 'incremental']]}
- for elem in self.url_pattern:
- self.definitions['requests_per_url']['lines'].append([elem.description, elem.description[4:],
+ if self.storage['url_pattern']:
+ for elem in self.storage['url_pattern']:
+ self.definitions['requests_per_url']['lines'].append([elem.description,
+ elem.description[12:],
'incremental'])
- self.data.update({elem.description: 0})
- self.data.update({'rpu_other': 0})
+ self.data[elem.description] = 0
+ self.data['url_pattern_other'] = 0
else:
self.order.remove('requests_per_url')
- def add_new_dimension(self, dimension, line_list, chart_string, key):
- """
- :param dimension: str: response status code. Ex.: '202', '499'
- :param line_list: list: Ex.: ['202', '202', 'incremental']
- :param chart_string: Current string we need to pass to netdata to rebuild the chart
- :param key: str: CHARTS dict key (chart name). Ex.: 'response_time'
- :return: str: new chart string = previous + new dimensions
- """
- self.data.update({dimension: 0})
- # SET method check if dim in _dimensions
- self._dimensions.append(dimension)
- # UPDATE method do SET only if dim in definitions
- self.definitions[key]['lines'].append(line_list)
- chart = chart_string
- chart += "%s %s\n" % ('DIMENSION', ' '.join(line_list))
- print(chart)
- return chart
+ # Add 'requests_per_user_defined' chart if specified in the configuration
+ if self.storage['user_pattern'] and 'user_defined' in match_dict:
+ for elem in self.storage['user_pattern']:
+ self.definitions['requests_per_user_defined']['lines'].append([elem.description,
+ elem.description[13:],
+ 'incremental'])
+ self.data[elem.description] = 0
+ self.data['user_pattern_other'] = 0
+ else:
+ self.order.remove('requests_per_user_defined')
- def _get_access_data(self):
+ def get_data(self, raw_data=None):
"""
- Parse new log lines
+ Parses new log lines
:return: dict OR None
None if _get_raw_data method fails.
In all other cases - dict.
"""
- raw = self._get_raw_data()
- if raw is None:
- return None
+ if not raw_data:
+ return None if raw_data is None else self.data
+
+ filtered_data = self.filter_data(raw_data=raw_data)
+
+ unique_current = set()
+ timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
- request_time, unique_current = list(), list()
- request_counter = {'count': 0, 'sum': 0}
ip_address_counter = {'unique_cur_ip': 0}
- for line in raw:
- match = self.regex.search(line)
+ for line in filtered_data:
+ match = self.storage['regex'].search(line)
if match:
match_dict = match.groupdict()
try:
- code = ''.join([match_dict['code'][0], 'xx'])
+ code = match_dict['code'][0] + 'xx'
self.data[code] += 1
except KeyError:
self.data['0xx'] += 1
# detailed response code
- if self.detailed_response_codes:
- self._get_data_detailed_response_codes(match_dict['code'])
+ if self.conf.get('detailed_response_codes', True):
+ self.get_data_per_response_codes_detailed(code=match_dict['code'])
# response statuses
- self._get_data_statuses(match_dict['code'])
+ self.get_data_per_statuses(code=match_dict['code'])
# requests per url
- if self.url_pattern:
- self._get_data_per_url(match_dict['url'])
+ if self.storage['url_pattern']:
+ self.get_data_per_pattern(row=match_dict['url'],
+ other='url_pattern_other',
+ pattern=self.storage['url_pattern'])
+ # requests per user defined pattern
+ if self.storage['user_pattern'] and 'user_defined' in match_dict:
+ self.get_data_per_pattern(row=match_dict['user_defined'],
+ other='user_pattern_other',
+ pattern=self.storage['user_pattern'])
# requests per http method
- self._get_data_http_method(match_dict['method'])
+ if match_dict['method'] not in self.data:
+ self.add_new_dimension(dimension_id=match_dict['method'],
+ chart_key='http_method')
+ self.data[match_dict['method']] += 1
# requests per http version
if 'http_version' in match_dict:
- self._get_data_http_version(match_dict['http_version'])
+ dim_id = match_dict['http_version'].replace('.', '_')
+ if dim_id not in self.data:
+ self.add_new_dimension(dimension_id=dim_id,
+ chart_key='http_version',
+ dimension=match_dict['http_version'])
+ self.data[dim_id] += 1
# bandwidth sent
bytes_sent = match_dict['bytes_sent'] if '-' not in match_dict['bytes_sent'] else 0
self.data['bytes_sent'] += int(bytes_sent)
@@ -487,92 +520,245 @@ class Service(LogService):
if 'resp_length' in match_dict:
self.data['resp_length'] += int(match_dict['resp_length'])
if 'resp_time' in match_dict:
- resp_time = self.resp_time_func(float(match_dict['resp_time']))
- bisect.insort_left(request_time, resp_time)
- request_counter['count'] += 1
- request_counter['sum'] += resp_time
+ get_timings(timings=timings['resp_time'],
+ time=self.storage['func_resp_time'](float(match_dict['resp_time'])))
+ if 'resp_time_upstream' in match_dict and match_dict['resp_time_upstream'] != '-':
+ get_timings(timings=timings['resp_time_upstream'],
+ time=self.storage['func_resp_time'](float(match_dict['resp_time_upstream'])))
# requests per ip proto
proto = 'ipv4' if '.' in match_dict['address'] else 'ipv6'
self.data['req_' + proto] += 1
# unique clients ips
- if address_not_in_pool(self.unique_all_time, match_dict['address'],
- self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
+ if address_not_in_pool(pool=self.storage['unique_all_time'],
+ address=match_dict['address'],
+ pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
self.data['unique_tot_' + proto] += 1
- if address_not_in_pool(unique_current, match_dict['address'], ip_address_counter['unique_cur_ip']):
+ if match_dict['address'] not in unique_current:
self.data['unique_cur_' + proto] += 1
- ip_address_counter['unique_cur_ip'] += 1
+ unique_current.add(match_dict['address'])
else:
self.data['unmatched'] += 1
# timings
- if request_time:
- self.data['resp_time_min'] += int(request_time[0])
- self.data['resp_time_avg'] += int(round(float(request_counter['sum']) / request_counter['count']))
- self.data['resp_time_max'] += int(request_time[-1])
+ for elem in timings:
+ self.data[elem + '_min'] += timings[elem]['minimum']
+ self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
+ self.data[elem + '_max'] += timings[elem]['maximum']
return self.data
- def _get_data_detailed_response_codes(self, code):
+ def find_regex(self, last_line):
"""
- :param code: str: CODE from parsed line. Ex.: '202, '499'
- :return:
- Calls add_new_dimension method If the value is found for the first time
+ :param last_line: str: literally last line from log file
+ :return: tuple where:
+ [0]: dict or None: match_dict or None
+ [1]: str: error description
+ We need to find appropriate pattern for current log file
+ All logic is do a regex search through the string for all predefined patterns
+ until we find something or fail.
"""
- if code not in self.data:
- if self.detailed_response_aggregate:
- chart_string_copy = self.detailed_chart[0]
- self.detailed_chart[0] = self.add_new_dimension(code, [code, code, 'incremental'],
- chart_string_copy, 'detailed_response_codes')
- else:
- code_index = int(code[0]) if int(code[0]) < 6 else 6
- chart_string_copy = self.detailed_chart[code_index]
- chart_name = 'detailed_response_codes' + DET_RESP_AGGR[code_index]
- self.detailed_chart[code_index] = self.add_new_dimension(code, [code, code, 'incremental'],
- chart_string_copy, chart_name)
- self.data[code] += 1
+ # REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code
+ # 5. Bytes sent 6. Response length 7. Response process time
+ default = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)')
+
+ apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+) ')
+
+ apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)'
+ r' .*?'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+)'
+ r'(?: |$)')
+
+ nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+) ')
+
+ nginx_ext2_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+)'
+ r' (?P<resp_time_upstream>[\d.-]+) ')
+
+ nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<method>[A-Z]+)'
+ r' (?P<url>[^ ]+)'
+ r' [A-Z]+/(?P<http_version>\d\.\d)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' .*?'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+)')
+
+ def func_usec(time):
+ return time
+
+ def func_sec(time):
+ return time * 1000000
+
+ r_regex = [apache_ext_insert, apache_ext_append,
+ nginx_ext2_insert, nginx_ext_insert, nginx_ext_append,
+ default]
+ r_function = [func_usec, func_usec, func_sec, func_sec, func_sec, func_usec]
+ regex_function = zip(r_regex, r_function)
+
+ match_dict = dict()
+ for regex, func in regex_function:
+ match = regex.search(last_line)
+ if match:
+ self.storage['regex'] = regex
+ self.storage['func_resp_time'] = func
+ match_dict = match.groupdict()
+ break
+
+ return find_regex_return(match_dict=match_dict or None,
+ msg='Unknown log format. You need to use "custom_log_format" feature.')
- def _get_data_http_method(self, method):
+ def find_regex_custom(self, last_line):
"""
- :param method: str: METHOD from parsed line. Ex.: 'GET', 'POST'
- :return:
- Calls add_new_dimension method If the value is found for the first time
+ :param last_line: str: literally last line from log file
+ :return: tuple where:
+ [0]: dict or None: match_dict or None
+ [1]: str: error description
+
+ We are here only if "custom_log_format" is in logs. We need to make sure:
+ 1. "custom_log_format" is a dict
+ 2. "pattern" in "custom_log_format" and pattern is <str> instance
+ 3. if "time_multiplier" is in "custom_log_format" it must be <int> instance
+
+ If all parameters is ok we need to make sure:
+ 1. Pattern search is success
+ 2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict")
+
+ If pattern search is success we need to make sure:
+ 1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict"
+
+ If this is True we need to make sure:
+ 1. All mandatory key values from "match_dict" have the correct format
+ ("code" is integer, "method" is uppercase word, etc)
+
+ If non mandatory keys in "match_dict" we need to make sure:
+ 1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format
+ ("resp_length" is integer or "-", "resp_time" is integer or float)
+
"""
- if method not in self.data:
- chart_string_copy = self.http_method_chart
- self.http_method_chart = self.add_new_dimension(method, [method, method, 'incremental'],
- chart_string_copy, 'http_method')
- self.data[method] += 1
+ if not hasattr(self.conf.get('custom_log_format'), 'keys'):
+ return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>')
+
+ pattern = self.conf.get('custom_log_format', dict()).get('pattern')
+ if not (pattern and isinstance(pattern, str)):
+ return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>')
+
+ resp_time_func = self.conf.get('custom_log_format', dict()).get('time_multiplier') or 0
+
+ if not isinstance(resp_time_func, int):
+ return find_regex_return(msg='Custom log: "time_multiplier" is not an integer')
+
+ try:
+ regex = re.compile(pattern)
+ except re.error as error:
+ return find_regex_return(msg='Pattern compile error: %s' % str(error))
+ match = regex.search(last_line)
+ if not match:
+ return find_regex_return(msg='Custom log: pattern search FAILED')
- def _get_data_http_version(self, http_version):
+ match_dict = match.groupdict() or None
+ if match_dict is None:
+ return find_regex_return(msg='Custom log: search OK but contains no named subgroups'
+ ' (you need to use ?P<subgroup_name>)')
+ mandatory_dict = {'address': r'[\da-f.:]+',
+ 'code': r'[1-9]\d{2}',
+ 'method': r'[A-Z]+',
+ 'bytes_sent': r'\d+|-'}
+ optional_dict = {'resp_length': r'\d+',
+ 'resp_time': r'[\d.]+',
+ 'resp_time_upstream': r'[\d.-]+',
+ 'http_version': r'\d\.\d'}
+
+ mandatory_values = set(mandatory_dict) - set(match_dict)
+ if mandatory_values:
+ return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing'
+ % list(mandatory_values))
+ for key in mandatory_dict:
+ if not re.search(mandatory_dict[key], match_dict[key]):
+ return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
+ % (key, match_dict[key]))
+
+ optional_values = set(optional_dict) & set(match_dict)
+ for key in optional_values:
+ if not re.search(optional_dict[key], match_dict[key]):
+ return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
+ % (key, match_dict[key]))
+
+ dot_in_time = '.' in match_dict.get('resp_time', '')
+ if dot_in_time:
+ self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1000000)
+ else:
+ self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1)
+
+ self.storage['regex'] = regex
+ return find_regex_return(match_dict=match_dict)
+
+ def get_data_per_response_codes_detailed(self, code):
"""
- :param http_version: str: METHOD from parsed line. Ex.: '1.1', '1.0'
+ :param code: str: CODE from parsed line. Ex.: '202, '499'
:return:
Calls add_new_dimension method If the value is found for the first time
"""
- http_version_dim_id = http_version.replace('.', '_')
- if http_version_dim_id not in self.data:
- chart_string_copy = self.http_version_chart
- self.http_version_chart = self.add_new_dimension(http_version_dim_id,
- [http_version_dim_id, http_version, 'incremental'],
- chart_string_copy, 'http_version')
- self.data[http_version_dim_id] += 1
-
- def _get_data_per_url(self, url):
+ if code not in self.data:
+ if self.conf.get('detailed_response_aggregate', True):
+ self.add_new_dimension(dimension_id=code,
+ chart_key='detailed_response_codes')
+ else:
+ code_index = int(code[0]) if int(code[0]) < 6 else 6
+ chart_key = 'detailed_response_codes' + DET_RESP_AGGR[code_index]
+ self.add_new_dimension(dimension_id=code,
+ chart_key=chart_key)
+ self.data[code] += 1
+
+ def get_data_per_pattern(self, row, other, pattern):
"""
- :param url: str: URL from parsed line
+ :param row: str:
+ :param other: str:
+ :param pattern: named tuple: (['pattern_description', 'regular expression'])
:return:
Scan through string looking for the first location where patterns produce a match for all user
defined patterns
"""
match = None
- for elem in self.url_pattern:
- if elem.pattern.search(url):
+ for elem in pattern:
+ if elem.func(row):
self.data[elem.description] += 1
match = True
break
if not match:
- self.data['rpu_other'] += 1
+ self.data[other] += 1
- def _get_data_statuses(self, code):
+ def get_data_per_statuses(self, code):
"""
:param code: str: response status code. Ex.: '202', '499'
:return:
@@ -590,23 +776,209 @@ class Service(LogService):
self.data['other_requests'] += 1
+class ApacheCache:
+ def __init__(self, configuration):
+ self.conf = configuration
+ self.order = ORDER_APACHE_CACHE
+ self.definitions = CHARTS_APACHE_CACHE
+
+ @staticmethod
+ def check():
+ return True
+
+ @staticmethod
+ def get_data(raw_data=None):
+ data = dict(hit=0, miss=0, other=0)
+ if not raw_data:
+ return None if raw_data is None else data
+
+ for line in raw_data:
+ if 'cache hit' in line:
+ data['hit'] += 1
+ elif 'cache miss' in line:
+ data['miss'] += 1
+ else:
+ data['other'] += 1
+ return data
+
+
+class Squid(Mixin):
+ def __init__(self, configuration):
+ self.conf = configuration
+ self.order = ORDER_SQUID
+ self.definitions = CHARTS_SQUID
+ self.pre_filter = check_patterns('filter', self.conf.get('filter'))
+ self.storage = dict()
+ self.data = {'duration_max': 0, 'duration_avg': 0, 'duration_min': 0, 'bytes': 0,
+ '0xx': 0, '1xx': 0, '2xx': 0, '3xx': 0, '4xx': 0, '5xx': 0,
+ 'other': 0, 'unmatched': 0, 'unique_ipv4': 0, 'unique_ipv6': 0,
+ 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0,
+ 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0
+ }
+
+ def check(self):
+ last_line = self.get_last_line()
+ if not last_line:
+ return False
+ self.storage['unique_all_time'] = list()
+ self.storage['regex'] = re.compile(r'[0-9.]+\s+(?P<duration>[0-9]+)'
+ r' (?P<client_address>[\da-f.:]+)'
+ r' (?P<squid_code>[A-Z_]+)/'
+ r'(?P<http_code>[0-9]+)'
+ r' (?P<bytes>[0-9]+)'
+ r' (?P<method>[A-Z_]+)'
+ r' (?P<url>[^ ]+)'
+ r' (?P<user>[^ ]+)'
+ r' (?P<hier_code>[A-Z_]+)/[\da-f.:-]+'
+ r' (?P<mime_type>[^\n]+)')
+
+ match = self.storage['regex'].search(last_line)
+ if not match:
+ self.error('Regex not matches (%s)' % self.storage['regex'].pattern)
+ return False
+ self.storage['dynamic'] = {
+ 'http_code':
+ {'chart': 'squid_detailed_response_codes',
+ 'func_dim_id': None,
+ 'func_dim': None},
+ 'hier_code': {
+ 'chart': 'squid_hier_code',
+ 'func_dim_id': None,
+ 'func_dim': lambda v: v.replace('HIER_', '')},
+ 'method': {
+ 'chart': 'squid_method',
+ 'func_dim_id': None,
+ 'func_dim': None},
+ 'mime_type': {
+ 'chart': 'squid_mime_type',
+ 'func_dim_id': lambda v: v.split('/')[0],
+ 'func_dim': None}}
+ return True
+
+ def get_data(self, raw_data=None):
+ if not raw_data:
+ return None if raw_data is None else self.data
+
+ filtered_data = self.filter_data(raw_data=raw_data)
+
+ unique_ip = set()
+ timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
+
+ for row in filtered_data:
+ match = self.storage['regex'].search(row)
+ if match:
+ match = match.groupdict()
+ if match['duration'] != '0':
+ get_timings(timings=timings['duration'], time=float(match['duration']) * 1000)
+ try:
+ self.data[match['http_code'][0] + 'xx'] += 1
+ except KeyError:
+ self.data['other'] += 1
+
+ self.get_data_per_statuses(match['http_code'])
+
+ self.get_data_per_squid_code(match['squid_code'])
+
+ self.data['bytes'] += int(match['bytes'])
+
+ proto = 'ipv4' if '.' in match['client_address'] else 'ipv6'
+ # unique clients ips
+ if address_not_in_pool(pool=self.storage['unique_all_time'],
+ address=match['client_address'],
+ pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
+ self.data['unique_tot_' + proto] += 1
+
+ if match['client_address'] not in unique_ip:
+ self.data['unique_' + proto] += 1
+ unique_ip.add(match['client_address'])
+
+ for key, values in self.storage['dynamic'].items():
+ if match[key] == '-':
+ continue
+ dimension_id = values['func_dim_id'](match[key]) if values['func_dim_id'] else match[key]
+ if dimension_id not in self.data:
+ dimension = values['func_dim'](match[key]) if values['func_dim'] else dimension_id
+ self.add_new_dimension(dimension_id=dimension_id,
+ chart_key=values['chart'],
+ dimension=dimension)
+ self.data[dimension_id] += 1
+ else:
+ self.data['unmatched'] += 1
+
+ for elem in timings:
+ self.data[elem + '_min'] += timings[elem]['minimum']
+ self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
+ self.data[elem + '_max'] += timings[elem]['maximum']
+ return self.data
+
+ def get_data_per_statuses(self, code):
+ """
+ :param code: str: response status code. Ex.: '202', '499'
+ :return:
+ """
+ code_class = code[0]
+ if code_class == '2' or code == '304' or code_class == '1' or code == '000':
+ self.data['successful_requests'] += 1
+ elif code_class == '3':
+ self.data['redirects'] += 1
+ elif code_class == '4':
+ self.data['bad_requests'] += 1
+ elif code_class == '5' or code_class == '6':
+ self.data['server_errors'] += 1
+ else:
+ self.data['other_requests'] += 1
+
+ def get_data_per_squid_code(self, code):
+ """
+ :param code: str: squid response code. Ex.: 'TCP_MISS', 'TCP_MISS_ABORTED'
+ :return:
+ """
+ if code not in self.data:
+ self.add_new_dimension(dimension_id=code, chart_key='squid_code')
+ self.data[code] += 1
+ if '_' not in code:
+ return
+ for tag in code.split('_'):
+ try:
+ chart_key = SQUID_CODES[tag]
+ except KeyError:
+ continue
+ if tag not in self.data:
+ self.add_new_dimension(dimension_id=tag, chart_key=chart_key)
+ self.data[tag] += 1
+
+
+def get_timings(timings, time):
+ """
+ :param timings:
+ :param time:
+ :return:
+ """
+ if timings['minimum'] is None:
+ timings['minimum'] = time
+ if time > timings['maximum']:
+ timings['maximum'] = time
+ elif time < timings['minimum']:
+ timings['minimum'] = time
+ timings['summary'] += time
+ timings['count'] += 1
+
+
def address_not_in_pool(pool, address, pool_size):
"""
:param pool: list of ip addresses
:param address: ip address
:param pool_size: current pool size
- :return: True if address not in pool. False if address in pool.
+ :return: True if address not in pool. False otherwise.
"""
index = bisect.bisect_left(pool, address)
if index < pool_size:
if pool[index] == address:
return False
- else:
- bisect.insort_left(pool, address)
- return True
- else:
bisect.insort_left(pool, address)
return True
+ bisect.insort_left(pool, address)
+ return True
def find_regex_return(match_dict=None, msg='Generic error message'):
@@ -618,36 +990,53 @@ def find_regex_return(match_dict=None, msg='Generic error message'):
return match_dict, msg
-def check_req_per_url_pattern(string, url_pattern):
+def check_patterns(string, dimension_regex_dict):
"""
:param string: str:
- :param url_pattern: dict: ex. {'dim1': 'pattern1>', 'dim2': '<pattern2>'}
+ :param dimension_regex_dict: dict: ex. {'dim1': '<pattern1>', 'dim2': '<pattern2>'}
:return: list of named tuples or None:
We need to make sure all patterns are valid regular expressions
"""
- if not hasattr(url_pattern, 'keys'):
+ if not hasattr(dimension_regex_dict, 'keys'):
return None
result = list()
- def is_valid_pattern(pattern):
+ def valid_pattern(pattern):
"""
:param pattern: str
:return: re.compile(pattern) or None
"""
if not isinstance(pattern, str):
return False
- else:
- try:
- compile_pattern = re.compile(pattern)
- except re.error:
- return False
- else:
- return compile_pattern
+ try:
+ return re.compile(pattern)
+ except re.error:
+ return False
- for dimension, regex in url_pattern.items():
- valid_pattern = is_valid_pattern(regex)
- if isinstance(dimension, str) and valid_pattern:
- result.append(NAMED_URL_PATTERN(description='_'.join([string, dimension]), pattern=valid_pattern))
+ def func_search(pattern):
+ def closure(v):
+ return pattern.search(v)
+
+ return closure
+ for dimension, regex in dimension_regex_dict.items():
+ valid = valid_pattern(regex)
+ if isinstance(dimension, str) and valid_pattern:
+ func = func_search(valid)
+ result.append(NAMED_PATTERN(description='_'.join([string, dimension]),
+ func=func))
return result or None
+
+
+def find_job_name(override_name, name):
+ """
+ :param override_name: str: 'name' var from configuration file
+ :param name: str: 'job_name' from configuration file
+ :return: str: new job name
+ We need this for dynamic charts. Actually the same logic as in python.d.plugin.
+ """
+ add_to_name = override_name or name
+ if add_to_name:
+ return '_'.join(['web_log', re.sub('\s+', '_', add_to_name)])
+ return 'web_log'
diff --git a/src/Makefile.am b/src/Makefile.am
index 1c1dd338..601d3204 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -44,6 +44,8 @@ netdata_SOURCES = \
appconfig.h \
avl.c \
avl.h \
+ backend_prometheus.c \
+ backend_prometheus.h \
backends.c \
backends.h \
clocks.c \
@@ -119,6 +121,10 @@ netdata_SOURCES = \
simple_pattern.h \
socket.c \
socket.h \
+ statistical.c \
+ statistical.h \
+ statsd.c \
+ statsd.h \
storage_number.c \
storage_number.h \
sys_devices_system_edac_mc.c \
@@ -146,6 +152,13 @@ netdata_SOURCES += \
plugin_freebsd.c \
plugin_freebsd.h \
freebsd_sysctl.c \
+ freebsd_getmntinfo.c \
+ freebsd_getifaddrs.c \
+ freebsd_devstat.c \
+ zfs_common.c \
+ zfs_common.h \
+ freebsd_kstat_zfs.c \
+ freebsd_ipfw.c \
$(NULL)
else
if MACOS
@@ -178,6 +191,9 @@ netdata_SOURCES += \
proc_net_softnet_stat.c \
proc_net_stat_conntrack.c \
proc_net_stat_synproxy.c \
+ zfs_common.c \
+ zfs_common.h \
+ proc_spl_kstat_zfs.c \
proc_stat.c \
proc_sys_kernel_random_entropy_avail.c \
proc_vmstat.c \
diff --git a/src/Makefile.in b/src/Makefile.in
index d7229d18..3ce869b0 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11.3 from Makefile.am.
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
-# Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -17,6 +16,51 @@
VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -43,6 +87,13 @@ plugins_PROGRAMS = $(am__EXEEXT_1) $(am__EXEEXT_2)
@FREEBSD_TRUE@ plugin_freebsd.c \
@FREEBSD_TRUE@ plugin_freebsd.h \
@FREEBSD_TRUE@ freebsd_sysctl.c \
+@FREEBSD_TRUE@ freebsd_getmntinfo.c \
+@FREEBSD_TRUE@ freebsd_getifaddrs.c \
+@FREEBSD_TRUE@ freebsd_devstat.c \
+@FREEBSD_TRUE@ zfs_common.c \
+@FREEBSD_TRUE@ zfs_common.h \
+@FREEBSD_TRUE@ freebsd_kstat_zfs.c \
+@FREEBSD_TRUE@ freebsd_ipfw.c \
@FREEBSD_TRUE@ $(NULL)
@FREEBSD_FALSE@@MACOS_TRUE@am__append_4 = \
@@ -74,6 +125,9 @@ plugins_PROGRAMS = $(am__EXEEXT_1) $(am__EXEEXT_2)
@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_softnet_stat.c \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_stat_conntrack.c \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_stat_synproxy.c \
+@FREEBSD_FALSE@@MACOS_FALSE@ zfs_common.c \
+@FREEBSD_FALSE@@MACOS_FALSE@ zfs_common.h \
+@FREEBSD_FALSE@@MACOS_FALSE@ proc_spl_kstat_zfs.c \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_stat.c \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_sys_kernel_random_entropy_avail.c \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_vmstat.c \
@@ -86,9 +140,9 @@ plugins_PROGRAMS = $(am__EXEEXT_1) $(am__EXEEXT_2)
@FREEBSD_TRUE@ $(NULL)
subdir = src
-DIST_COMMON = $(dist_cache_DATA) $(dist_log_DATA) \
- $(dist_registry_DATA) $(dist_varlib_DATA) \
- $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(top_srcdir)/depcomp $(dist_cache_DATA) $(dist_log_DATA) \
+ $(dist_registry_DATA) $(dist_varlib_DATA)
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
$(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
@@ -127,8 +181,9 @@ freeipmi_plugin_OBJECTS = $(am_freeipmi_plugin_OBJECTS)
freeipmi_plugin_DEPENDENCIES = $(am__DEPENDENCIES_1)
am__netdata_SOURCES_DIST = adaptive_resortable_list.c \
adaptive_resortable_list.h appconfig.c appconfig.h avl.c avl.h \
- backends.c backends.h clocks.c clocks.h common.c common.h \
- daemon.c daemon.h dictionary.c dictionary.h eval.c eval.h \
+ backend_prometheus.c backend_prometheus.h backends.c \
+ backends.h clocks.c clocks.h common.c common.h daemon.c \
+ daemon.h dictionary.c dictionary.h eval.c eval.h \
global_statistics.c global_statistics.h health.c health.h \
health_config.c health_json.c health_log.c inlined.h locks.h \
log.c log.h main.c main.h plugin_checks.c plugin_checks.h \
@@ -144,25 +199,34 @@ am__netdata_SOURCES_DIST = adaptive_resortable_list.c \
rrdcalctemplate.c rrddim.c rrddimvar.c rrdfamily.c rrdhost.c \
rrdpush.c rrdpush.h rrdset.c rrdsetvar.c rrdvar.c \
simple_pattern.c simple_pattern.h socket.c socket.h \
- storage_number.c storage_number.h sys_devices_system_edac_mc.c \
+ statistical.c statistical.h statsd.c statsd.h storage_number.c \
+ storage_number.h sys_devices_system_edac_mc.c \
sys_devices_system_node.c sys_fs_cgroup.c unit_test.c \
unit_test.h url.c url.h web_api_old.c web_api_old.h \
web_api_v1.c web_api_v1.h web_buffer.c web_buffer.h \
web_buffer_svg.c web_buffer_svg.h web_client.c web_client.h \
web_server.c web_server.h plugin_freebsd.c plugin_freebsd.h \
- freebsd_sysctl.c plugin_macos.c plugin_macos.h macos_sysctl.c \
- macos_mach_smi.c macos_fw.c ipc.c ipc.h plugin_proc.c \
- plugin_proc.h plugin_proc_diskspace.c plugin_proc_diskspace.h \
+ freebsd_sysctl.c freebsd_getmntinfo.c freebsd_getifaddrs.c \
+ freebsd_devstat.c zfs_common.c zfs_common.h \
+ freebsd_kstat_zfs.c freebsd_ipfw.c plugin_macos.c \
+ plugin_macos.h macos_sysctl.c macos_mach_smi.c macos_fw.c \
+ ipc.c ipc.h plugin_proc.c plugin_proc.h \
+ plugin_proc_diskspace.c plugin_proc_diskspace.h \
proc_diskstats.c proc_interrupts.c proc_softirqs.c \
proc_loadavg.c proc_meminfo.c proc_net_dev.c \
proc_net_ip_vs_stats.c proc_net_netstat.c proc_net_rpc_nfs.c \
proc_net_rpc_nfsd.c proc_net_snmp.c proc_net_snmp6.c \
proc_net_softnet_stat.c proc_net_stat_conntrack.c \
- proc_net_stat_synproxy.c proc_stat.c \
+ proc_net_stat_synproxy.c proc_spl_kstat_zfs.c proc_stat.c \
proc_sys_kernel_random_entropy_avail.c proc_vmstat.c \
proc_uptime.c sys_kernel_mm_ksm.c
@FREEBSD_TRUE@am__objects_2 = plugin_freebsd.$(OBJEXT) \
-@FREEBSD_TRUE@ freebsd_sysctl.$(OBJEXT)
+@FREEBSD_TRUE@ freebsd_sysctl.$(OBJEXT) \
+@FREEBSD_TRUE@ freebsd_getmntinfo.$(OBJEXT) \
+@FREEBSD_TRUE@ freebsd_getifaddrs.$(OBJEXT) \
+@FREEBSD_TRUE@ freebsd_devstat.$(OBJEXT) zfs_common.$(OBJEXT) \
+@FREEBSD_TRUE@ freebsd_kstat_zfs.$(OBJEXT) \
+@FREEBSD_TRUE@ freebsd_ipfw.$(OBJEXT)
@FREEBSD_FALSE@@MACOS_TRUE@am__objects_3 = plugin_macos.$(OBJEXT) \
@FREEBSD_FALSE@@MACOS_TRUE@ macos_sysctl.$(OBJEXT) \
@FREEBSD_FALSE@@MACOS_TRUE@ macos_mach_smi.$(OBJEXT) \
@@ -185,15 +249,17 @@ am__netdata_SOURCES_DIST = adaptive_resortable_list.c \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_softnet_stat.$(OBJEXT) \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_stat_conntrack.$(OBJEXT) \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_net_stat_synproxy.$(OBJEXT) \
+@FREEBSD_FALSE@@MACOS_FALSE@ zfs_common.$(OBJEXT) \
+@FREEBSD_FALSE@@MACOS_FALSE@ proc_spl_kstat_zfs.$(OBJEXT) \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_stat.$(OBJEXT) \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_sys_kernel_random_entropy_avail.$(OBJEXT) \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_vmstat.$(OBJEXT) \
@FREEBSD_FALSE@@MACOS_FALSE@ proc_uptime.$(OBJEXT) \
@FREEBSD_FALSE@@MACOS_FALSE@ sys_kernel_mm_ksm.$(OBJEXT)
am_netdata_OBJECTS = adaptive_resortable_list.$(OBJEXT) \
- appconfig.$(OBJEXT) avl.$(OBJEXT) backends.$(OBJEXT) \
- clocks.$(OBJEXT) common.$(OBJEXT) daemon.$(OBJEXT) \
- dictionary.$(OBJEXT) eval.$(OBJEXT) \
+ appconfig.$(OBJEXT) avl.$(OBJEXT) backend_prometheus.$(OBJEXT) \
+ backends.$(OBJEXT) clocks.$(OBJEXT) common.$(OBJEXT) \
+ daemon.$(OBJEXT) dictionary.$(OBJEXT) eval.$(OBJEXT) \
global_statistics.$(OBJEXT) health.$(OBJEXT) \
health_config.$(OBJEXT) health_json.$(OBJEXT) \
health_log.$(OBJEXT) log.$(OBJEXT) main.$(OBJEXT) \
@@ -210,6 +276,7 @@ am_netdata_OBJECTS = adaptive_resortable_list.$(OBJEXT) \
rrddimvar.$(OBJEXT) rrdfamily.$(OBJEXT) rrdhost.$(OBJEXT) \
rrdpush.$(OBJEXT) rrdset.$(OBJEXT) rrdsetvar.$(OBJEXT) \
rrdvar.$(OBJEXT) simple_pattern.$(OBJEXT) socket.$(OBJEXT) \
+ statistical.$(OBJEXT) statsd.$(OBJEXT) \
storage_number.$(OBJEXT) sys_devices_system_edac_mc.$(OBJEXT) \
sys_devices_system_node.$(OBJEXT) sys_fs_cgroup.$(OBJEXT) \
unit_test.$(OBJEXT) url.$(OBJEXT) web_api_old.$(OBJEXT) \
@@ -220,18 +287,43 @@ am_netdata_OBJECTS = adaptive_resortable_list.$(OBJEXT) \
netdata_OBJECTS = $(am_netdata_OBJECTS)
netdata_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
depcomp = $(SHELL) $(top_srcdir)/depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo " CC " $@;
+am__v_CC_1 =
CCLD = $(CC)
LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo " CCLD " $@;
+am__v_CCLD_1 =
SOURCES = $(apps_plugin_SOURCES) $(freeipmi_plugin_SOURCES) \
$(netdata_SOURCES)
DIST_SOURCES = $(am__apps_plugin_SOURCES_DIST) \
$(freeipmi_plugin_SOURCES) $(am__netdata_SOURCES_DIST)
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
@@ -261,11 +353,29 @@ am__uninstall_files_from_dir = { \
}
DATA = $(dist_cache_DATA) $(dist_log_DATA) $(dist_registry_DATA) \
$(dist_varlib_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
@@ -437,8 +547,9 @@ dist_registry_DATA = .keep
dist_log_DATA = .keep
netdata_SOURCES = adaptive_resortable_list.c \
adaptive_resortable_list.h appconfig.c appconfig.h avl.c avl.h \
- backends.c backends.h clocks.c clocks.h common.c common.h \
- daemon.c daemon.h dictionary.c dictionary.h eval.c eval.h \
+ backend_prometheus.c backend_prometheus.h backends.c \
+ backends.h clocks.c clocks.h common.c common.h daemon.c \
+ daemon.h dictionary.c dictionary.h eval.c eval.h \
global_statistics.c global_statistics.h health.c health.h \
health_config.c health_json.c health_log.c inlined.h locks.h \
log.c log.h main.c main.h plugin_checks.c plugin_checks.h \
@@ -454,7 +565,8 @@ netdata_SOURCES = adaptive_resortable_list.c \
rrdcalctemplate.c rrddim.c rrddimvar.c rrdfamily.c rrdhost.c \
rrdpush.c rrdpush.h rrdset.c rrdsetvar.c rrdvar.c \
simple_pattern.c simple_pattern.h socket.c socket.h \
- storage_number.c storage_number.h sys_devices_system_edac_mc.c \
+ statistical.c statistical.h statsd.c statsd.h storage_number.c \
+ storage_number.h sys_devices_system_edac_mc.c \
sys_devices_system_node.c sys_fs_cgroup.c unit_test.c \
unit_test.h url.c url.h web_api_old.c web_api_old.h \
web_api_v1.c web_api_v1.h web_buffer.c web_buffer.h \
@@ -525,14 +637,18 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
$(am__aclocal_m4_deps):
install-pluginsPROGRAMS: $(plugins_PROGRAMS)
@$(NORMAL_INSTALL)
- test -z "$(pluginsdir)" || $(MKDIR_P) "$(DESTDIR)$(pluginsdir)"
@list='$(plugins_PROGRAMS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
for p in $$list; do echo "$$p $$p"; done | \
sed 's/$(EXEEXT)$$//' | \
- while read p p1; do if test -f $$p; \
- then echo "$$p"; echo "$$p"; else :; fi; \
+ while read p p1; do if test -f $$p \
+ ; then echo "$$p"; echo "$$p"; else :; fi; \
done | \
- sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \
+ sed -e 'p;s,.*/,,;n;h' \
+ -e 's|.*|.|' \
-e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
sed 'N;N;N;s,\n, ,g' | \
$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
@@ -553,7 +669,8 @@ uninstall-pluginsPROGRAMS:
@list='$(plugins_PROGRAMS)'; test -n "$(pluginsdir)" || list=; \
files=`for p in $$list; do echo "$$p"; done | \
sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
- -e 's/$$/$(EXEEXT)/' `; \
+ -e 's/$$/$(EXEEXT)/' \
+ `; \
test -n "$$list" || exit 0; \
echo " ( cd '$(DESTDIR)$(pluginsdir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(pluginsdir)" && rm -f $$files
@@ -562,14 +679,18 @@ clean-pluginsPROGRAMS:
-test -z "$(plugins_PROGRAMS)" || rm -f $(plugins_PROGRAMS)
install-sbinPROGRAMS: $(sbin_PROGRAMS)
@$(NORMAL_INSTALL)
- test -z "$(sbindir)" || $(MKDIR_P) "$(DESTDIR)$(sbindir)"
@list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(sbindir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(sbindir)" || exit 1; \
+ fi; \
for p in $$list; do echo "$$p $$p"; done | \
sed 's/$(EXEEXT)$$//' | \
- while read p p1; do if test -f $$p; \
- then echo "$$p"; echo "$$p"; else :; fi; \
+ while read p p1; do if test -f $$p \
+ ; then echo "$$p"; echo "$$p"; else :; fi; \
done | \
- sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \
+ sed -e 'p;s,.*/,,;n;h' \
+ -e 's|.*|.|' \
-e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
sed 'N;N;N;s,\n, ,g' | \
$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
@@ -590,22 +711,26 @@ uninstall-sbinPROGRAMS:
@list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \
files=`for p in $$list; do echo "$$p"; done | \
sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
- -e 's/$$/$(EXEEXT)/' `; \
+ -e 's/$$/$(EXEEXT)/' \
+ `; \
test -n "$$list" || exit 0; \
echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(sbindir)" && rm -f $$files
clean-sbinPROGRAMS:
-test -z "$(sbin_PROGRAMS)" || rm -f $(sbin_PROGRAMS)
+
apps.plugin$(EXEEXT): $(apps_plugin_OBJECTS) $(apps_plugin_DEPENDENCIES) $(EXTRA_apps_plugin_DEPENDENCIES)
@rm -f apps.plugin$(EXEEXT)
- $(LINK) $(apps_plugin_OBJECTS) $(apps_plugin_LDADD) $(LIBS)
+ $(AM_V_CCLD)$(LINK) $(apps_plugin_OBJECTS) $(apps_plugin_LDADD) $(LIBS)
+
freeipmi.plugin$(EXEEXT): $(freeipmi_plugin_OBJECTS) $(freeipmi_plugin_DEPENDENCIES) $(EXTRA_freeipmi_plugin_DEPENDENCIES)
@rm -f freeipmi.plugin$(EXEEXT)
- $(LINK) $(freeipmi_plugin_OBJECTS) $(freeipmi_plugin_LDADD) $(LIBS)
+ $(AM_V_CCLD)$(LINK) $(freeipmi_plugin_OBJECTS) $(freeipmi_plugin_LDADD) $(LIBS)
+
netdata$(EXEEXT): $(netdata_OBJECTS) $(netdata_DEPENDENCIES) $(EXTRA_netdata_DEPENDENCIES)
@rm -f netdata$(EXEEXT)
- $(LINK) $(netdata_OBJECTS) $(netdata_LDADD) $(LIBS)
+ $(AM_V_CCLD)$(LINK) $(netdata_OBJECTS) $(netdata_LDADD) $(LIBS)
mostlyclean-compile:
-rm -f *.$(OBJEXT)
@@ -617,12 +742,18 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/appconfig.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/apps_plugin.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/avl.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/backend_prometheus.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/backends.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/clocks.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/common.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/daemon.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dictionary.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eval.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freebsd_devstat.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freebsd_getifaddrs.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freebsd_getmntinfo.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freebsd_ipfw.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freebsd_kstat_zfs.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freebsd_sysctl.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freeipmi_plugin.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/global_statistics.Po@am__quote@
@@ -662,6 +793,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_net_stat_synproxy.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_self_mountinfo.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_softirqs.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_spl_kstat_zfs.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_stat.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_sys_kernel_random_entropy_avail.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_uptime.Po@am__quote@
@@ -690,6 +822,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rrdvar.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/simple_pattern.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/socket.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/statistical.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/statsd.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/storage_number.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sys_devices_system_edac_mc.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sys_devices_system_node.Po@am__quote@
@@ -703,24 +837,28 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/web_buffer_svg.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/web_client.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/web_server.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/zfs_common.Po@am__quote@
.c.o:
-@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(COMPILE) -c $<
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
.c.obj:
-@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
install-dist_cacheDATA: $(dist_cache_DATA)
@$(NORMAL_INSTALL)
- test -z "$(cachedir)" || $(MKDIR_P) "$(DESTDIR)$(cachedir)"
@list='$(dist_cache_DATA)'; test -n "$(cachedir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(cachedir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(cachedir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -737,8 +875,11 @@ uninstall-dist_cacheDATA:
dir='$(DESTDIR)$(cachedir)'; $(am__uninstall_files_from_dir)
install-dist_logDATA: $(dist_log_DATA)
@$(NORMAL_INSTALL)
- test -z "$(logdir)" || $(MKDIR_P) "$(DESTDIR)$(logdir)"
@list='$(dist_log_DATA)'; test -n "$(logdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(logdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(logdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -755,8 +896,11 @@ uninstall-dist_logDATA:
dir='$(DESTDIR)$(logdir)'; $(am__uninstall_files_from_dir)
install-dist_registryDATA: $(dist_registry_DATA)
@$(NORMAL_INSTALL)
- test -z "$(registrydir)" || $(MKDIR_P) "$(DESTDIR)$(registrydir)"
@list='$(dist_registry_DATA)'; test -n "$(registrydir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(registrydir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(registrydir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -773,8 +917,11 @@ uninstall-dist_registryDATA:
dir='$(DESTDIR)$(registrydir)'; $(am__uninstall_files_from_dir)
install-dist_varlibDATA: $(dist_varlib_DATA)
@$(NORMAL_INSTALL)
- test -z "$(varlibdir)" || $(MKDIR_P) "$(DESTDIR)$(varlibdir)"
@list='$(dist_varlib_DATA)'; test -n "$(varlibdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(varlibdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(varlibdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -790,26 +937,15 @@ uninstall-dist_varlibDATA:
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(varlibdir)'; $(am__uninstall_files_from_dir)
-ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
- mkid -fID $$unique
-tags: TAGS
-
-TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
+ $(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
@@ -821,15 +957,11 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
$$unique; \
fi; \
fi
-ctags: CTAGS
-CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
@@ -838,6 +970,21 @@ GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
@@ -986,20 +1133,20 @@ uninstall-am: uninstall-dist_cacheDATA uninstall-dist_logDATA \
.MAKE: install-am install-strip
-.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
- clean-pluginsPROGRAMS clean-sbinPROGRAMS ctags distclean \
- distclean-compile distclean-generic distclean-tags distdir dvi \
- dvi-am html html-am info info-am install install-am \
- install-data install-data-am install-dist_cacheDATA \
- install-dist_logDATA install-dist_registryDATA \
- install-dist_varlibDATA install-dvi install-dvi-am \
- install-exec install-exec-am install-html install-html-am \
- install-info install-info-am install-man install-pdf \
- install-pdf-am install-pluginsPROGRAMS install-ps \
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+ clean-pluginsPROGRAMS clean-sbinPROGRAMS cscopelist-am ctags \
+ ctags-am distclean distclean-compile distclean-generic \
+ distclean-tags distdir dvi dvi-am html html-am info info-am \
+ install install-am install-data install-data-am \
+ install-dist_cacheDATA install-dist_logDATA \
+ install-dist_registryDATA install-dist_varlibDATA install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-pluginsPROGRAMS install-ps \
install-ps-am install-sbinPROGRAMS install-strip installcheck \
installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-compile \
- mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \
+ mostlyclean-generic pdf pdf-am ps ps-am tags tags-am uninstall \
uninstall-am uninstall-dist_cacheDATA uninstall-dist_logDATA \
uninstall-dist_registryDATA uninstall-dist_varlibDATA \
uninstall-pluginsPROGRAMS uninstall-sbinPROGRAMS
diff --git a/src/appconfig.c b/src/appconfig.c
index 71ff4b75..91c4c5c5 100644
--- a/src/appconfig.c
+++ b/src/appconfig.c
@@ -135,6 +135,7 @@ static inline struct section *appconfig_section_create(struct config *root, cons
struct section *co = callocz(1, sizeof(struct section));
co->name = strdupz(section);
co->hash = simple_hash(co->name);
+ netdata_mutex_init(&co->mutex);
avl_init_lock(&co->values_index, appconfig_option_compare);
@@ -213,7 +214,8 @@ int appconfig_move(struct config *root, const char *section_old, const char *nam
if(!co_new) co_new = appconfig_section_create(root, section_new);
config_section_wrlock(co_old);
- config_section_wrlock(co_new);
+ if(co_old != co_new)
+ config_section_wrlock(co_new);
cv_old = appconfig_option_index_find(co_old, name_old, 0);
if(!cv_old) goto cleanup;
@@ -250,7 +252,8 @@ int appconfig_move(struct config *root, const char *section_old, const char *nam
ret = 0;
cleanup:
- config_section_unlock(co_new);
+ if(co_old != co_new)
+ config_section_unlock(co_new);
config_section_unlock(co_old);
return ret;
}
@@ -294,6 +297,17 @@ long long appconfig_get_number(struct config *root, const char *section, const c
return strtoll(s, NULL, 0);
}
+long double appconfig_get_float(struct config *root, const char *section, const char *name, long double value)
+{
+ char buffer[100], *s;
+ sprintf(buffer, "%0.5Lf", value);
+
+ s = appconfig_get(root, section, name, buffer);
+ if(!s) return value;
+
+ return str2ld(s, NULL);
+}
+
int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value)
{
char *s;
@@ -337,7 +351,7 @@ const char *appconfig_set_default(struct config *root, const char *section, cons
{
struct config_option *cv;
- debug(D_CONFIG, "request to set config in section '%s', name '%s', value '%s'", section, name, value);
+ debug(D_CONFIG, "request to set default config in section '%s', name '%s', value '%s'", section, name, value);
struct section *co = appconfig_section_find(root, section);
if(!co) return appconfig_set(root, section, name, value);
@@ -393,6 +407,16 @@ long long appconfig_set_number(struct config *root, const char *section, const c
return value;
}
+long double appconfig_set_float(struct config *root, const char *section, const char *name, long double value)
+{
+ char buffer[100];
+ sprintf(buffer, "%0.5Lf", value);
+
+ appconfig_set(root, section, name, buffer);
+
+ return value;
+}
+
int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value)
{
char *s;
@@ -417,11 +441,11 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used)
if(!filename) filename = CONFIG_DIR "/" CONFIG_FILENAME;
- debug(D_CONFIG, "Opening config file '%s'", filename);
+ debug(D_CONFIG, "CONFIG: opening config file '%s'", filename);
FILE *fp = fopen(filename, "r");
if(!fp) {
- error("Cannot open file '%s'", filename);
+ error("CONFIG: cannot open file '%s'", filename);
return 0;
}
@@ -430,8 +454,8 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used)
line++;
s = trim(buffer);
- if(!s) {
- debug(D_CONFIG, "Ignoring line %d, it is empty.", line);
+ if(!s || *s == '#') {
+ debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', it is empty.", line, filename);
continue;
}
@@ -449,14 +473,14 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used)
if(!co) {
// line outside a section
- error("Ignoring line %d ('%s'), it is outside all sections.", line, s);
+ error("CONFIG: ignoring line %d ('%s') of file '%s', it is outside all sections.", line, s, filename);
continue;
}
char *name = s;
char *value = strchr(s, '=');
if(!value) {
- error("Ignoring line %d ('%s'), there is no = in it.", line, s);
+ error("CONFIG: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename);
continue;
}
*value = '\0';
@@ -465,12 +489,12 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used)
name = trim(name);
value = trim(value);
- if(!name) {
- error("Ignoring line %d, name is empty.", line);
+ if(!name || *name == '#') {
+ error("CONFIG: ignoring line %d of file '%s', name is empty.", line, filename);
continue;
}
if(!value) {
- debug(D_CONFIG, "Ignoring line %d, value is empty.", line);
+ debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', value is empty.", line, filename);
continue;
}
@@ -479,12 +503,12 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used)
if(!cv) cv = appconfig_value_create(co, name, value);
else {
if(((cv->flags & CONFIG_VALUE_USED) && overwrite_used) || !(cv->flags & CONFIG_VALUE_USED)) {
- debug(D_CONFIG, "Line %d, overwriting '%s/%s'.", line, co->name, cv->name);
+ debug(D_CONFIG, "CONFIG: line %d of file '%s', overwriting '%s/%s'.", line, filename, co->name, cv->name);
freez(cv->value);
cv->value = strdupz(value);
}
else
- debug(D_CONFIG, "Ignoring line %d, '%s/%s' is already present and used.", line, co->name, cv->name);
+ debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', '%s/%s' is already present and used.", line, filename, co->name, cv->name);
}
cv->flags |= CONFIG_VALUE_LOADED;
}
@@ -531,6 +555,7 @@ void appconfig_generate(struct config *root, BUFFER *wb, int only_changed)
for(co = root->sections; co ; co = co->next) {
if(!strcmp(co->name, CONFIG_SECTION_GLOBAL)
|| !strcmp(co->name, CONFIG_SECTION_WEB)
+ || !strcmp(co->name, CONFIG_SECTION_STATSD)
|| !strcmp(co->name, CONFIG_SECTION_PLUGINS)
|| !strcmp(co->name, CONFIG_SECTION_REGISTRY)
|| !strcmp(co->name, CONFIG_SECTION_HEALTH)
@@ -558,7 +583,7 @@ void appconfig_generate(struct config *root, BUFFER *wb, int only_changed)
if(only_changed && !changed) continue;
if(!used) {
- buffer_sprintf(wb, "\n# node '%s' is not used.", co->name);
+ buffer_sprintf(wb, "\n# section '%s' is not used.", co->name);
}
buffer_sprintf(wb, "\n[%s]\n", co->name);
diff --git a/src/appconfig.h b/src/appconfig.h
index 45cc8cfd..b8c2ee80 100644
--- a/src/appconfig.h
+++ b/src/appconfig.h
@@ -5,6 +5,7 @@
#define CONFIG_SECTION_GLOBAL "global"
#define CONFIG_SECTION_WEB "web"
+#define CONFIG_SECTION_STATSD "statsd"
#define CONFIG_SECTION_PLUGINS "plugins"
#define CONFIG_SECTION_REGISTRY "registry"
#define CONFIG_SECTION_HEALTH "health"
@@ -34,12 +35,14 @@ extern int appconfig_load(struct config *root, char *filename, int overwrite_use
extern char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value);
extern long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value);
+extern long double appconfig_get_float(struct config *root, const char *section, const char *name, long double value);
extern int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value);
extern int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value);
extern const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value);
extern const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value);
extern long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value);
+extern long double appconfig_set_float(struct config *root, const char *section, const char *name, long double value);
extern int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value);
extern int appconfig_exists(struct config *root, const char *section, const char *name);
@@ -53,12 +56,14 @@ extern void appconfig_generate(struct config *root, BUFFER *wb, int only_changed
#define config_load(filename, overwrite_used) appconfig_load(&netdata_config, filename, overwrite_used)
#define config_get(section, name, default_value) appconfig_get(&netdata_config, section, name, default_value)
#define config_get_number(section, name, value) appconfig_get_number(&netdata_config, section, name, value)
+#define config_get_float(section, name, value) appconfig_get_float(&netdata_config, section, name, value)
#define config_get_boolean(section, name, value) appconfig_get_boolean(&netdata_config, section, name, value)
#define config_get_boolean_ondemand(section, name, value) appconfig_get_boolean_ondemand(&netdata_config, section, name, value)
-#define config_set(section, name, default_value) appconfig_get(&netdata_config, section, name, default_value)
+#define config_set(section, name, default_value) appconfig_set(&netdata_config, section, name, default_value)
#define config_set_default(section, name, value) appconfig_set_default(&netdata_config, section, name, value)
#define config_set_number(section, name, value) appconfig_set_number(&netdata_config, section, name, value)
+#define config_set_float(section, name, value) appconfig_set_float(&netdata_config, section, name, value)
#define config_set_boolean(section, name, value) appconfig_set_boolean(&netdata_config, section, name, value)
#define config_exists(section, name) appconfig_exists(&netdata_config, section, name)
diff --git a/src/apps_plugin.c b/src/apps_plugin.c
index b1bf06be..ecb6aaea 100644
--- a/src/apps_plugin.c
+++ b/src/apps_plugin.c
@@ -34,8 +34,7 @@
#define MAX_COMPARE_NAME 100
#define MAX_NAME 100
-#define MAX_CMDLINE 1024
-
+#define MAX_CMDLINE 16384
// ----------------------------------------------------------------------------
// the rates we are going to send to netdata will have this detail a value of:
@@ -109,11 +108,12 @@ static size_t
// metric.
// the total system time, as reported by /proc/stat
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
static kernel_uint_t
global_utime = 0,
global_stime = 0,
global_gtime = 0;
-
+#endif
// the normalization ratios, as calculated by normalize_utilization()
double utime_fix_ratio = 1.0,
@@ -127,7 +127,6 @@ double utime_fix_ratio = 1.0,
cminflt_fix_ratio = 1.0,
cmajflt_fix_ratio = 1.0;
-
// ----------------------------------------------------------------------------
// target
//
@@ -223,7 +222,7 @@ size_t
struct pid_stat {
int32_t pid;
char comm[MAX_COMPARE_NAME + 1];
- char cmdline[MAX_CMDLINE + 1];
+ char *cmdline;
uint32_t log_thrown;
@@ -438,7 +437,7 @@ static struct target *get_users_target(uid_t uid) {
w->idhash = simple_hash(w->id);
struct passwd *pw = getpwuid(uid);
- if(!pw)
+ if(!pw || !pw->pw_name || !*pw->pw_name)
snprintfz(w->name, MAX_NAME, "%u", uid);
else
snprintfz(w->name, MAX_NAME, "%s", pw->pw_name);
@@ -471,7 +470,7 @@ struct target *get_groups_target(gid_t gid)
w->idhash = simple_hash(w->id);
struct group *gr = getgrgid(gid);
- if(!gr)
+ if(!gr || !gr->gr_name || !*gr->gr_name)
snprintfz(w->name, MAX_NAME, "%u", gid);
else
snprintfz(w->name, MAX_NAME, "%s", gr->gr_name);
@@ -698,6 +697,7 @@ static inline void del_pid_entry(pid_t pid) {
freez(p->statm_filename);
freez(p->io_filename);
freez(p->cmdline_filename);
+ freez(p->cmdline);
freez(p);
all_pids[pid] = NULL;
@@ -768,7 +768,7 @@ static inline void assign_target_to_pid(struct pid_stat *p) {
if(unlikely(( (!w->starts_with && !w->ends_with && w->comparehash == hash && !strcmp(w->compare, p->comm))
|| (w->starts_with && !w->ends_with && !strncmp(w->compare, p->comm, w->comparelen))
|| (!w->starts_with && w->ends_with && pclen >= w->comparelen && !strcmp(w->compare, &p->comm[pclen - w->comparelen]))
- || (proc_pid_cmdline_is_needed && w->starts_with && w->ends_with && strstr(p->cmdline, w->compare))
+ || (proc_pid_cmdline_is_needed && w->starts_with && w->ends_with && p->cmdline && strstr(p->cmdline, w->compare))
))) {
if(w->target) p->target = w->target;
@@ -787,6 +787,7 @@ static inline void assign_target_to_pid(struct pid_stat *p) {
// update pids from proc
static inline int read_proc_pid_cmdline(struct pid_stat *p) {
+ static char cmdline[MAX_CMDLINE + 1];
#ifdef __FreeBSD__
size_t i, bytes = MAX_CMDLINE;
@@ -796,7 +797,7 @@ static inline int read_proc_pid_cmdline(struct pid_stat *p) {
mib[1] = KERN_PROC;
mib[2] = KERN_PROC_ARGS;
mib[3] = p->pid;
- if (unlikely(sysctl(mib, 4, p->cmdline, &bytes, NULL, 0)))
+ if (unlikely(sysctl(mib, 4, cmdline, &bytes, NULL, 0)))
goto cleanup;
#else
if(unlikely(!p->cmdline_filename)) {
@@ -808,15 +809,17 @@ static inline int read_proc_pid_cmdline(struct pid_stat *p) {
int fd = open(p->cmdline_filename, O_RDONLY, 0666);
if(unlikely(fd == -1)) goto cleanup;
- ssize_t i, bytes = read(fd, p->cmdline, MAX_CMDLINE);
+ ssize_t i, bytes = read(fd, cmdline, MAX_CMDLINE);
close(fd);
if(unlikely(bytes < 0)) goto cleanup;
#endif
- p->cmdline[bytes] = '\0';
+ cmdline[bytes] = '\0';
for(i = 0; i < bytes ; i++)
- if(unlikely(!p->cmdline[i])) p->cmdline[i] = ' ';
+ if(unlikely(!cmdline[i])) cmdline[i] = ' ';
+
+ p->cmdline = strdupz(cmdline);
if(unlikely(debug))
fprintf(stderr, "Read file '%s' contents: %s\n", p->cmdline_filename, p->cmdline);
@@ -825,7 +828,7 @@ static inline int read_proc_pid_cmdline(struct pid_stat *p) {
cleanup:
// copy the command to the command line
- strncpyz(p->cmdline, p->comm, MAX_CMDLINE);
+ p->cmdline = strdupz(p->comm);
return 0;
}
@@ -1157,24 +1160,13 @@ cleanup:
#endif
}
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
static inline int read_proc_stat() {
-#ifdef __FreeBSD__
- long cp_time[CPUSTATES];
- static kernel_uint_t utime_raw = 0, stime_raw = 0, ntime_raw = 0;
-
- if (unlikely(CPUSTATES != 5)) {
- error("FREEBSD: There are %d CPU states (5 was expected)", CPUSTATES);
- goto cleanup;
- }
- if (unlikely(GETSYSCTL_BY_NAME("kern.cp_time", cp_time))) goto cleanup;
-#else
static char filename[FILENAME_MAX + 1] = "";
static procfile *ff = NULL;
static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, gntime_raw = 0, ntime_raw = 0;
-#endif
static usec_t collected_usec = 0, last_collected_usec = 0;
-#ifndef __FreeBSD__
if(unlikely(!ff)) {
snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix);
ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
@@ -1183,7 +1175,6 @@ static inline int read_proc_stat() {
ff = procfile_readall(ff);
if(unlikely(!ff)) goto cleanup;
-#endif
last_collected_usec = collected_usec;
collected_usec = now_monotonic_usec();
@@ -1193,25 +1184,13 @@ static inline int read_proc_stat() {
// temporary - it is added global_ntime;
kernel_uint_t global_ntime = 0;
-#ifdef __FreeBSD__
- incremental_rate(global_utime, utime_raw, cp_time[0], collected_usec, last_collected_usec);
- incremental_rate(global_ntime, ntime_raw, cp_time[1], collected_usec, last_collected_usec);
- incremental_rate(global_stime, stime_raw, cp_time[2], collected_usec, last_collected_usec);
-#else
incremental_rate(global_utime, utime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 1)), collected_usec, last_collected_usec);
incremental_rate(global_ntime, ntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 2)), collected_usec, last_collected_usec);
incremental_rate(global_stime, stime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 3)), collected_usec, last_collected_usec);
incremental_rate(global_gtime, gtime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 10)), collected_usec, last_collected_usec);
-#endif
global_utime += global_ntime;
-#ifdef __FreeBSD__
- if(enable_guest_charts) {
- enable_guest_charts = 0;
- info("Guest charts aren't supported by FreeBSD");
- }
-#else
if(enable_guest_charts) {
// temporary - it is added global_ntime;
kernel_uint_t global_gntime = 0;
@@ -1224,7 +1203,6 @@ static inline int read_proc_stat() {
// remove guest time from user time
global_utime -= (global_utime > global_gtime) ? global_gtime : global_utime;
}
-#endif
if(unlikely(global_iterations_counter == 1)) {
global_utime = 0;
@@ -1240,7 +1218,11 @@ cleanup:
global_gtime = 0;
return 0;
}
-
+#else
+static inline int read_proc_stat() {
+ return 0;
+}
+#endif
// ----------------------------------------------------------------------------
@@ -1751,7 +1733,6 @@ static inline int print_process_and_parents(struct pid_stat *p, usec_t time) {
}
static inline void print_process_tree(struct pid_stat *p, char *msg) {
- log_date(stderr);
fprintf(stderr, "%s: process %s (%d, %s) with parents:\n", msg, p->comm, p->pid, p->updated?"running":"exited");
print_process_and_parents(p, p->stat_collected_usec);
}
@@ -1860,7 +1841,6 @@ static inline void process_exited_processes() {
continue;
if(unlikely(debug)) {
- log_date(stderr);
fprintf(stderr, "Absorb %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")\n"
, p->comm
, p->pid
@@ -2620,14 +2600,13 @@ static inline void send_END(void) {
fprintf(stdout, "END\n");
}
-static usec_t send_resource_usage_to_netdata() {
+void send_resource_usage_to_netdata(usec_t dt) {
static struct timeval last = { 0, 0 };
static struct rusage me_last;
struct timeval now;
struct rusage me;
- usec_t usec;
usec_t cpuuser;
usec_t cpusyst;
@@ -2635,10 +2614,6 @@ static usec_t send_resource_usage_to_netdata() {
now_monotonic_timeval(&last);
getrusage(RUSAGE_SELF, &me_last);
- // the first time, give a zero to allow
- // netdata calibrate to the current time
- // usec = update_every * USEC_PER_SEC;
- usec = 0ULL;
cpuuser = 0;
cpusyst = 0;
}
@@ -2646,7 +2621,6 @@ static usec_t send_resource_usage_to_netdata() {
now_monotonic_timeval(&now);
getrusage(RUSAGE_SELF, &me);
- usec = dt_usec(&now, &last);
cpuuser = me.ru_utime.tv_sec * USEC_PER_SEC + me.ru_utime.tv_usec;
cpusyst = me.ru_stime.tv_sec * USEC_PER_SEC + me.ru_stime.tv_usec;
@@ -2658,38 +2632,45 @@ static usec_t send_resource_usage_to_netdata() {
if(unlikely(!created_charts)) {
created_charts = 1;
- fprintf(stdout
- , "CHART netdata.apps_cpu '' 'Apps Plugin CPU' 'milliseconds/s' apps.plugin netdata.apps_cpu stacked 140000 %1$d\n"
- "DIMENSION user '' incremental 1 1000\n"
- "DIMENSION system '' incremental 1 1000\n"
- "CHART netdata.apps_sizes '' 'Apps Plugin Files' 'files/s' apps.plugin netdata.apps_sizes line 140001 %1$d\n"
- "DIMENSION calls '' incremental 1 1\n"
- "DIMENSION files '' incremental 1 1\n"
- "DIMENSION pids '' absolute 1 1\n"
- "DIMENSION fds '' absolute 1 1\n"
- "DIMENSION targets '' absolute 1 1\n"
- "DIMENSION new_pids 'new pids' incremental 1 1\n"
- "CHART netdata.apps_fix '' 'Apps Plugin Normalization Ratios' 'percentage' apps.plugin netdata.apps_fix line 140002 %1$d\n"
- "DIMENSION utime '' absolute 1 %2$llu\n"
- "DIMENSION stime '' absolute 1 %2$llu\n"
- "DIMENSION gtime '' absolute 1 %2$llu\n"
- "DIMENSION minflt '' absolute 1 %2$llu\n"
- "DIMENSION majflt '' absolute 1 %2$llu\n"
+ fprintf(stdout,
+ "CHART netdata.apps_cpu '' 'Apps Plugin CPU' 'milliseconds/s' apps.plugin netdata.apps_cpu stacked 140000 %1$d\n"
+ "DIMENSION user '' incremental 1 1000\n"
+ "DIMENSION system '' incremental 1 1000\n"
+ "CHART netdata.apps_sizes '' 'Apps Plugin Files' 'files/s' apps.plugin netdata.apps_sizes line 140001 %1$d\n"
+ "DIMENSION calls '' incremental 1 1\n"
+ "DIMENSION files '' incremental 1 1\n"
+ "DIMENSION pids '' absolute 1 1\n"
+ "DIMENSION fds '' absolute 1 1\n"
+ "DIMENSION targets '' absolute 1 1\n"
+ "DIMENSION new_pids 'new pids' incremental 1 1\n"
+ , update_every
+ );
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ fprintf(stdout,
+ "CHART netdata.apps_fix '' 'Apps Plugin Normalization Ratios' 'percentage' apps.plugin netdata.apps_fix line 140002 %1$d\n"
+ "DIMENSION utime '' absolute 1 %2$llu\n"
+ "DIMENSION stime '' absolute 1 %2$llu\n"
+ "DIMENSION gtime '' absolute 1 %2$llu\n"
+ "DIMENSION minflt '' absolute 1 %2$llu\n"
+ "DIMENSION majflt '' absolute 1 %2$llu\n"
, update_every
, RATES_DETAIL
);
if(include_exited_childs)
- fprintf(stdout
- , "CHART netdata.apps_children_fix '' 'Apps Plugin Exited Children Normalization Ratios' 'percentage' apps.plugin netdata.apps_children_fix line 140003 %1$d\n"
- "DIMENSION cutime '' absolute 1 %2$llu\n"
- "DIMENSION cstime '' absolute 1 %2$llu\n"
- "DIMENSION cgtime '' absolute 1 %2$llu\n"
- "DIMENSION cminflt '' absolute 1 %2$llu\n"
- "DIMENSION cmajflt '' absolute 1 %2$llu\n"
+ fprintf(stdout,
+ "CHART netdata.apps_children_fix '' 'Apps Plugin Exited Children Normalization Ratios' 'percentage' apps.plugin netdata.apps_children_fix line 140003 %1$d\n"
+ "DIMENSION cutime '' absolute 1 %2$llu\n"
+ "DIMENSION cstime '' absolute 1 %2$llu\n"
+ "DIMENSION cgtime '' absolute 1 %2$llu\n"
+ "DIMENSION cminflt '' absolute 1 %2$llu\n"
+ "DIMENSION cmajflt '' absolute 1 %2$llu\n"
, update_every
, RATES_DETAIL
);
+#endif
+
}
fprintf(stdout,
@@ -2705,31 +2686,35 @@ static usec_t send_resource_usage_to_netdata() {
"SET targets = %zu\n"
"SET new_pids = %zu\n"
"END\n"
- "BEGIN netdata.apps_fix %llu\n"
- "SET utime = %u\n"
- "SET stime = %u\n"
- "SET gtime = %u\n"
- "SET minflt = %u\n"
- "SET majflt = %u\n"
- "END\n"
- , usec
+ , dt
, cpuuser
, cpusyst
- , usec
+ , dt
, calls_counter
, file_counter
, all_pids_count
, all_files_len
, apps_groups_targets_count
, targets_assignment_counter
- , usec
- , (unsigned int)(utime_fix_ratio * 100 * RATES_DETAIL)
- , (unsigned int)(stime_fix_ratio * 100 * RATES_DETAIL)
- , (unsigned int)(gtime_fix_ratio * 100 * RATES_DETAIL)
- , (unsigned int)(minflt_fix_ratio * 100 * RATES_DETAIL)
- , (unsigned int)(majflt_fix_ratio * 100 * RATES_DETAIL)
);
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ fprintf(stdout,
+ "BEGIN netdata.apps_fix %llu\n"
+ "SET utime = %u\n"
+ "SET stime = %u\n"
+ "SET gtime = %u\n"
+ "SET minflt = %u\n"
+ "SET majflt = %u\n"
+ "END\n"
+ , dt
+ , (unsigned int)(utime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(stime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(gtime_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(minflt_fix_ratio * 100 * RATES_DETAIL)
+ , (unsigned int)(majflt_fix_ratio * 100 * RATES_DETAIL)
+ );
+
if(include_exited_childs)
fprintf(stdout,
"BEGIN netdata.apps_children_fix %llu\n"
@@ -2739,17 +2724,17 @@ static usec_t send_resource_usage_to_netdata() {
"SET cminflt = %u\n"
"SET cmajflt = %u\n"
"END\n"
- , usec
+ , dt
, (unsigned int)(cutime_fix_ratio * 100 * RATES_DETAIL)
, (unsigned int)(cstime_fix_ratio * 100 * RATES_DETAIL)
, (unsigned int)(cgtime_fix_ratio * 100 * RATES_DETAIL)
, (unsigned int)(cminflt_fix_ratio * 100 * RATES_DETAIL)
, (unsigned int)(cmajflt_fix_ratio * 100 * RATES_DETAIL)
);
-
- return usec;
+#endif
}
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
static void normalize_utilization(struct target *root) {
struct target *w;
@@ -2895,25 +2880,30 @@ static void normalize_utilization(struct target *root) {
);
}
}
+#else // ALL_PIDS_ARE_READ_INSTANTLY == 1
+static void normalize_utilization(struct target *root) {
+ (void)root;
+}
+#endif // ALL_PIDS_ARE_READ_INSTANTLY
-static void send_collected_data_to_netdata(struct target *root, const char *type, usec_t usec) {
+static void send_collected_data_to_netdata(struct target *root, const char *type, usec_t dt) {
struct target *w;
- send_BEGIN(type, "cpu", usec);
+ send_BEGIN(type, "cpu", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, (kernel_uint_t)(w->utime * utime_fix_ratio) + (kernel_uint_t)(w->stime * stime_fix_ratio) + (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cutime * cutime_fix_ratio) + (kernel_uint_t)(w->cstime * cstime_fix_ratio) + (kernel_uint_t)(w->cgtime * cgtime_fix_ratio)):0ULL));
}
send_END();
- send_BEGIN(type, "cpu_user", usec);
+ send_BEGIN(type, "cpu_user", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, (kernel_uint_t)(w->utime * utime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cutime * cutime_fix_ratio)):0ULL));
}
send_END();
- send_BEGIN(type, "cpu_system", usec);
+ send_BEGIN(type, "cpu_system", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, (kernel_uint_t)(w->stime * stime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cstime * cstime_fix_ratio)):0ULL));
@@ -2921,7 +2911,7 @@ static void send_collected_data_to_netdata(struct target *root, const char *type
send_END();
if(show_guest_time) {
- send_BEGIN(type, "cpu_guest", usec);
+ send_BEGIN(type, "cpu_guest", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cgtime * cgtime_fix_ratio)):0ULL));
@@ -2929,42 +2919,42 @@ static void send_collected_data_to_netdata(struct target *root, const char *type
send_END();
}
- send_BEGIN(type, "threads", usec);
+ send_BEGIN(type, "threads", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, w->num_threads);
}
send_END();
- send_BEGIN(type, "processes", usec);
+ send_BEGIN(type, "processes", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, w->processes);
}
send_END();
- send_BEGIN(type, "mem", usec);
+ send_BEGIN(type, "mem", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, (w->statm_resident > w->statm_share)?(w->statm_resident - w->statm_share):0ULL);
}
send_END();
- send_BEGIN(type, "vmem", usec);
+ send_BEGIN(type, "vmem", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, w->statm_size);
}
send_END();
- send_BEGIN(type, "minor_faults", usec);
+ send_BEGIN(type, "minor_faults", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, (kernel_uint_t)(w->minflt * minflt_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cminflt * cminflt_fix_ratio)):0ULL));
}
send_END();
- send_BEGIN(type, "major_faults", usec);
+ send_BEGIN(type, "major_faults", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, (kernel_uint_t)(w->majflt * majflt_fix_ratio) + (include_exited_childs?((kernel_uint_t)(w->cmajflt * cmajflt_fix_ratio)):0ULL));
@@ -2972,14 +2962,14 @@ static void send_collected_data_to_netdata(struct target *root, const char *type
send_END();
#ifndef __FreeBSD__
- send_BEGIN(type, "lreads", usec);
+ send_BEGIN(type, "lreads", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, w->io_logical_bytes_read);
}
send_END();
- send_BEGIN(type, "lwrites", usec);
+ send_BEGIN(type, "lwrites", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, w->io_logical_bytes_written);
@@ -2987,14 +2977,14 @@ static void send_collected_data_to_netdata(struct target *root, const char *type
send_END();
#endif
- send_BEGIN(type, "preads", usec);
+ send_BEGIN(type, "preads", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, w->io_storage_bytes_read);
}
send_END();
- send_BEGIN(type, "pwrites", usec);
+ send_BEGIN(type, "pwrites", dt);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
send_SET(w->name, w->io_storage_bytes_written);
@@ -3002,21 +2992,21 @@ static void send_collected_data_to_netdata(struct target *root, const char *type
send_END();
if(enable_file_charts) {
- send_BEGIN(type, "files", usec);
+ send_BEGIN(type, "files", dt);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed))
send_SET(w->name, w->openfiles);
}
send_END();
- send_BEGIN(type, "sockets", usec);
+ send_BEGIN(type, "sockets", dt);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed))
send_SET(w->name, w->opensockets);
}
send_END();
- send_BEGIN(type, "pipes", usec);
+ send_BEGIN(type, "pipes", dt);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed))
send_SET(w->name, w->openpipes);
@@ -3472,8 +3462,9 @@ int main(int argc, char **argv) {
static int profiling_count=0;
profiling_count++;
if(unlikely(profiling_count > 1000)) exit(0);
+ usec_t dt = update_every * USEC_PER_SEC;
#else
- heartbeat_next(&hb, step);
+ usec_t dt = heartbeat_next(&hb, step);
#endif
if(!collect_data_for_all_processes()) {
@@ -3485,7 +3476,7 @@ int main(int argc, char **argv) {
calculate_netdata_statistics();
normalize_utilization(apps_groups_root_target);
- usec_t dt = send_resource_usage_to_netdata();
+ send_resource_usage_to_netdata(dt);
// this is smart enough to show only newly added apps, when needed
send_charts_updates_to_netdata(apps_groups_root_target, "apps", "Apps");
diff --git a/src/backend_prometheus.c b/src/backend_prometheus.c
new file mode 100644
index 00000000..88ec2c65
--- /dev/null
+++ b/src/backend_prometheus.c
@@ -0,0 +1,397 @@
+#include "common.h"
+
+// ----------------------------------------------------------------------------
+// PROMETHEUS
+// /api/v1/allmetrics?format=prometheus
+
+static struct prometheus_server {
+ const char *server;
+ uint32_t hash;
+ time_t last_access;
+ struct prometheus_server *next;
+} *prometheus_server_root = NULL;
+
+static inline time_t prometheus_server_last_access(const char *server, time_t now) {
+ uint32_t hash = simple_hash(server);
+
+ struct prometheus_server *ps;
+ for(ps = prometheus_server_root; ps ;ps = ps->next) {
+ if (hash == ps->hash && !strcmp(server, ps->server)) {
+ time_t last = ps->last_access;
+ ps->last_access = now;
+ return last;
+ }
+ }
+
+ ps = callocz(1, sizeof(struct prometheus_server));
+ ps->server = strdupz(server);
+ ps->hash = hash;
+ ps->last_access = now;
+ ps->next = prometheus_server_root;
+ prometheus_server_root = ps;
+
+ return 0;
+}
+
+static inline size_t prometheus_name_copy(char *d, const char *s, size_t usable) {
+ size_t n;
+
+ for(n = 0; *s && n < usable ; d++, s++, n++) {
+ register char c = *s;
+
+ if(!isalnum(c)) *d = '_';
+ else *d = c;
+ }
+ *d = '\0';
+
+ return n;
+}
+
+static inline size_t prometheus_label_copy(char *d, const char *s, size_t usable) {
+ size_t n;
+
+ // make sure we can escape one character without overflowing the buffer
+ usable--;
+
+ for(n = 0; *s && n < usable ; d++, s++, n++) {
+ register char c = *s;
+
+ if(unlikely(c == '"' || c == '\\' || c == '\n')) {
+ *d++ = '\\';
+ n++;
+ }
+ *d = c;
+ }
+ *d = '\0';
+
+ return n;
+}
+
+static inline char *prometheus_units_copy(char *d, const char *s, size_t usable) {
+ const char *sorig = s;
+ char *ret = d;
+ size_t n;
+
+ *d++ = '_';
+ for(n = 1; *s && n < usable ; d++, s++, n++) {
+ register char c = *s;
+
+ if(!isalnum(c)) *d = '_';
+ else *d = c;
+ }
+
+ if(n == 2 && sorig[0] == '%') {
+ n = 0;
+ d = ret;
+ s = "_percent";
+ for( ; *s && n < usable ; n++) *d++ = *s++;
+ }
+ else if(n > 3 && sorig[n-3] == '/' && sorig[n-2] == 's') {
+ n = n - 2;
+ d -= 2;
+ s = "_persec";
+ for( ; *s && n < usable ; n++) *d++ = *s++;
+ }
+
+ *d = '\0';
+
+ return ret;
+}
+
+
+#define PROMETHEUS_ELEMENT_MAX 256
+#define PROMETHEUS_LABELS_MAX 1024
+
+static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER *wb, const char *prefix, uint32_t options, time_t after, time_t before, int allhosts, int help, int types, int names) {
+ rrdhost_rdlock(host);
+
+ char hostname[PROMETHEUS_ELEMENT_MAX + 1];
+ prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
+
+ char labels[PROMETHEUS_LABELS_MAX + 1] = "";
+ if(allhosts) {
+ if(host->tags && *(host->tags))
+ buffer_sprintf(wb, "netdata_host_tags{instance=\"%s\",%s} 1 %llu\n", hostname, host->tags, now_realtime_usec() / USEC_PER_MS);
+
+ snprintfz(labels, PROMETHEUS_LABELS_MAX, ",instance=\"%s\"", hostname);
+ }
+ else {
+ if(host->tags && *(host->tags))
+ buffer_sprintf(wb, "netdata_host_tags{%s} 1 %llu\n", host->tags, now_realtime_usec() / USEC_PER_MS);
+ }
+
+ // for each chart
+ RRDSET *st;
+ rrdset_foreach_read(st, host) {
+ char chart[PROMETHEUS_ELEMENT_MAX + 1];
+ char context[PROMETHEUS_ELEMENT_MAX + 1];
+ char family[PROMETHEUS_ELEMENT_MAX + 1];
+ char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
+
+ prometheus_label_copy(chart, (names && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
+ prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
+
+ if(likely(backends_can_send_rrdset(options, st))) {
+ rrdset_rdlock(st);
+
+ int as_collected = ((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AS_COLLECTED);
+ int homogeneus = 1;
+ if(as_collected) {
+ if(rrdset_flag_check(st, RRDSET_FLAG_HOMEGENEOUS_CHECK))
+ rrdset_update_heterogeneous_flag(st);
+
+ if(rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS))
+ homogeneus = 0;
+ }
+ else {
+ if((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AVERAGE)
+ prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX);
+ }
+
+ if(unlikely(help))
+ buffer_sprintf(wb, "\n# COMMENT %s chart \"%s\", context \"%s\", family \"%s\", units \"%s\"\n"
+ , (homogeneus)?"homogeneus":"heterogeneous"
+ , (names && st->name) ? st->name : st->id
+ , st->context
+ , st->family
+ , st->units
+ );
+
+ // for each dimension
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if(rd->collections_counter) {
+ char dimension[PROMETHEUS_ELEMENT_MAX + 1];
+ char *suffix = "";
+
+ if (as_collected) {
+ // we need as-collected / raw data
+
+ const char *t = "gauge", *h = "gives";
+ if(rd->algorithm == RRD_ALGORITHM_INCREMENTAL ||
+ rd->algorithm == RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL) {
+ t = "counter";
+ h = "delta gives";
+ suffix = "_total";
+ }
+
+ if(homogeneus) {
+ // all the dimensions of the chart, has the same algorithm, multiplier and divisor
+ // we add all dimensions as labels
+
+ prometheus_label_copy(dimension, (names && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+
+ if(unlikely(help))
+ buffer_sprintf(wb
+ , "# COMMENT %s_%s%s: chart \"%s\", context \"%s\", family \"%s\", dimension \"%s\", value * " COLLECTED_NUMBER_FORMAT " / " COLLECTED_NUMBER_FORMAT " %s %s (%s)\n"
+ , prefix
+ , context
+ , suffix
+ , (names && st->name) ? st->name : st->id
+ , st->context
+ , st->family
+ , (names && rd->name) ? rd->name : rd->id
+ , rd->multiplier
+ , rd->divisor
+ , h
+ , st->units
+ , t
+ );
+
+ if(unlikely(types))
+ buffer_sprintf(wb, "# COMMENT TYPE %s_%s%s %s\n"
+ , prefix
+ , context
+ , suffix
+ , t
+ );
+
+ buffer_sprintf(wb
+ , "%s_%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " COLLECTED_NUMBER_FORMAT " %llu\n"
+ , prefix
+ , context
+ , suffix
+ , chart
+ , family
+ , dimension
+ , labels
+ , rd->last_collected_value
+ , timeval_msec(&rd->last_collected_time)
+ );
+ }
+ else {
+ // the dimensions of the chart, do not have the same algorithm, multiplier or divisor
+ // we create a metric per dimension
+
+ prometheus_name_copy(dimension, (names && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+
+ if(unlikely(help))
+ buffer_sprintf(wb
+ , "# COMMENT %s_%s_%s%s: chart \"%s\", context \"%s\", family \"%s\", dimension \"%s\", value * " COLLECTED_NUMBER_FORMAT " / " COLLECTED_NUMBER_FORMAT " %s %s (%s)\n"
+ , prefix
+ , context
+ , dimension
+ , suffix
+ , (names && st->name) ? st->name : st->id
+ , st->context
+ , st->family
+ , (names && rd->name) ? rd->name : rd->id
+ , rd->multiplier
+ , rd->divisor
+ , h
+ , st->units
+ , t
+ );
+
+ if(unlikely(types))
+ buffer_sprintf(wb, "# COMMENT TYPE %s_%s_%s%s %s\n"
+ , prefix
+ , context
+ , dimension
+ , suffix
+ , t
+ );
+
+ buffer_sprintf(wb
+ , "%s_%s_%s%s{chart=\"%s\",family=\"%s\"%s} " COLLECTED_NUMBER_FORMAT " %llu\n"
+ , prefix
+ , context
+ , dimension
+ , suffix
+ , chart
+ , family
+ , labels
+ , rd->last_collected_value
+ , timeval_msec(&rd->last_collected_time)
+ );
+ }
+ }
+ else {
+ // we need average or sum of the data
+
+ time_t first_t = after, last_t = before;
+ calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, options, &first_t, &last_t);
+
+ if(!isnan(value) && !isinf(value)) {
+
+ if((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AVERAGE)
+ suffix = "_average";
+ else if((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_SUM)
+ suffix = "_sum";
+
+ prometheus_label_copy(dimension, (names && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+
+ if (unlikely(help))
+ buffer_sprintf(wb, "# COMMENT %s_%s%s%s: dimension \"%s\", value is %s, gauge, dt %llu to %llu inclusive\n"
+ , prefix
+ , context
+ , units
+ , suffix
+ , (names && rd->name) ? rd->name : rd->id
+ , st->units
+ , (unsigned long long)first_t
+ , (unsigned long long)last_t
+ );
+
+ if (unlikely(types))
+ buffer_sprintf(wb, "# COMMENT TYPE %s_%s%s%s gauge\n"
+ , prefix
+ , context
+ , units
+ , suffix
+ );
+
+ buffer_sprintf(wb, "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " CALCULATED_NUMBER_FORMAT " %llu\n"
+ , prefix
+ , context
+ , units
+ , suffix
+ , chart
+ , family
+ , dimension
+ , labels
+ , value
+ , last_t * MSEC_PER_SEC
+ );
+ }
+ }
+ }
+ }
+
+ rrdset_unlock(st);
+ }
+ }
+
+ rrdhost_unlock(host);
+}
+
+static inline time_t prometheus_preparation(RRDHOST *host, BUFFER *wb, uint32_t options, const char *server, time_t now, int help) {
+ if(!server || !*server) server = "default";
+
+ time_t after = prometheus_server_last_access(server, now);
+
+ int first_seen = 0;
+ if(!after) {
+ after = now - backend_update_every;
+ first_seen = 1;
+ }
+
+ if(after > now) {
+ // oops! this should never happen
+ after = now - backend_update_every;
+ }
+
+ if(help) {
+ int show_range = 1;
+ char *mode;
+ if((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AS_COLLECTED) {
+ mode = "as collected";
+ show_range = 0;
+ }
+ else if((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AVERAGE)
+ mode = "average";
+ else if((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_SUM)
+ mode = "sum";
+ else
+ mode = "unknown";
+
+ buffer_sprintf(wb, "# COMMENT netdata \"%s\" to %sprometheus \"%s\", source \"%s\", last seen %lu %s"
+ , host->hostname
+ , (first_seen)?"FIRST SEEN ":""
+ , server
+ , mode
+ , (unsigned long)((first_seen)?0:(now - after))
+ , (first_seen)?"never":"seconds ago"
+ );
+
+ if(show_range)
+ buffer_sprintf(wb, ", time range %lu to %lu", (unsigned long)after, (unsigned long)now);
+
+ buffer_strcat(wb, "\n\n");
+ }
+
+ return after;
+}
+
+void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, uint32_t options, int help, int types, int names) {
+ time_t before = now_realtime_sec();
+
+ // we start at the point we had stopped before
+ time_t after = prometheus_preparation(host, wb, options, server, before, help);
+
+ rrd_stats_api_v1_charts_allmetrics_prometheus(host, wb, prefix, options, after, before, 0, help, types, names);
+}
+
+void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, uint32_t options, int help, int types, int names) {
+ time_t before = now_realtime_sec();
+
+ // we start at the point we had stopped before
+ time_t after = prometheus_preparation(host, wb, options, server, before, help);
+
+ rrd_rdlock();
+ rrdhost_foreach_read(host) {
+ rrd_stats_api_v1_charts_allmetrics_prometheus(host, wb, prefix, options, after, before, 1, help, types, names);
+ }
+ rrd_unlock();
+}
diff --git a/src/backend_prometheus.h b/src/backend_prometheus.h
new file mode 100644
index 00000000..53dddb0d
--- /dev/null
+++ b/src/backend_prometheus.h
@@ -0,0 +1,11 @@
+//
+// Created by costa on 09/07/17.
+//
+
+#ifndef NETDATA_BACKEND_PROMETHEUS_H
+#define NETDATA_BACKEND_PROMETHEUS_H
+
+extern void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, uint32_t options, int help, int types, int names);
+extern void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, uint32_t options, int help, int types, int names);
+
+#endif //NETDATA_BACKEND_PROMETHEUS_H
diff --git a/src/backends.c b/src/backends.c
index 3e385cab..eed4ab40 100644
--- a/src/backends.c
+++ b/src/backends.c
@@ -22,47 +22,81 @@
// 5. repeats the above forever.
//
-#define BACKEND_SOURCE_DATA_AS_COLLECTED 0x00000001
-#define BACKEND_SOURCE_DATA_AVERAGE 0x00000002
-#define BACKEND_SOURCE_DATA_SUM 0x00000004
-
+const char *backend_prefix = "netdata";
+int backend_send_names = 1;
+int backend_update_every = 10;
+uint32_t backend_options = BACKEND_SOURCE_DATA_AVERAGE;
// ----------------------------------------------------------------------------
// helper functions for backends
+static inline size_t backend_name_copy(char *d, const char *s, size_t usable) {
+ size_t n;
+
+ for(n = 0; *s && n < usable ; d++, s++, n++) {
+ char c = *s;
+
+ if(c != '.' && !isalnum(c)) *d = '_';
+ else *d = c;
+ }
+ *d = '\0';
+
+ return n;
+}
+
// calculate the SUM or AVERAGE of a dimension, for any timeframe
// may return NAN if the database does not have any value in the give timeframe
-static inline calculated_number backend_calculate_value_from_stored_data(
+inline calculated_number backend_calculate_value_from_stored_data(
RRDSET *st // the chart
, RRDDIM *rd // the dimension
, time_t after // the start timestamp
, time_t before // the end timestamp
, uint32_t options // BACKEND_SOURCE_* bitmap
+ , time_t *first_timestamp // the first point of the database used in this response
+ , time_t *last_timestamp // the timestamp that should be reported to backend
) {
// find the edges of the rrd database for this chart
time_t first_t = rrdset_first_entry_t(st);
time_t last_t = rrdset_last_entry_t(st);
+ time_t update_every = st->update_every;
- if(unlikely(before < first_t || after > last_t))
- // the chart has not been updated in the wanted timeframe
- return NAN;
+ // step back a little, to make sure we have complete data collection
+ // for all metrics
+ after -= update_every * 2;
+ before -= update_every * 2;
// align the time-frame
- // for 'after' also skip the first value by adding st->update_every
- after = after - after % st->update_every + st->update_every;
- before = before - before % st->update_every;
+ after = after - (after % update_every);
+ before = before - (before % update_every);
- if(unlikely(after < first_t))
- after = first_t;
+ // for before, loose another iteration
+ // the latest point will be reported the next time
+ before -= update_every;
if(unlikely(after > before))
- // this can happen when st->update_every > before - after
- before = after;
+ // this can happen when update_every > before - after
+ after = before;
+
+ if(unlikely(after < first_t))
+ after = first_t;
if(unlikely(before > last_t))
before = last_t;
+ if(unlikely(before < first_t || after > last_t)) {
+ // the chart has not been updated in the wanted timeframe
+ debug(D_BACKEND, "BACKEND: %s.%s.%s: aligned timeframe %lu to %lu is outside the chart's database range %lu to %lu",
+ st->rrdhost->hostname, st->id, rd->id,
+ (unsigned long)after, (unsigned long)before,
+ (unsigned long)first_t, (unsigned long)last_t
+ );
+ return NAN;
+ }
+
+ *first_timestamp = after;
+ *last_timestamp = before;
+
size_t counter = 0;
calculated_number sum = 0;
@@ -88,10 +122,15 @@ static inline calculated_number backend_calculate_value_from_stored_data(
counter++;
}
- if(unlikely(!counter))
+ if(unlikely(!counter)) {
+ debug(D_BACKEND, "BACKEND: %s.%s.%s: no values stored in database for range %lu to %lu",
+ st->rrdhost->hostname, st->id, rd->id,
+ (unsigned long)after, (unsigned long)before
+ );
return NAN;
+ }
- if(unlikely(options & BACKEND_SOURCE_DATA_SUM))
+ if(unlikely((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_SUM))
return sum;
return sum / (calculated_number)counter;
@@ -113,7 +152,7 @@ static inline int discard_response(BUFFER *b, const char *backend) {
}
*d = '\0';
- info("Received %zu bytes from %s backend. Ignoring them. Sample: '%s'", buffer_strlen(b), backend, sample);
+ info("BACKEND: received %zu bytes from %s backend. Ignoring them. Sample: '%s'", buffer_strlen(b), backend, sample);
buffer_flush(b);
return 0;
}
@@ -138,13 +177,18 @@ static inline int format_dimension_collected_graphite_plaintext(
(void)before;
(void)options;
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ backend_name_copy(chart_name, (backend_send_names && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
+ backend_name_copy(dimension_name, (backend_send_names && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
+
buffer_sprintf(
b
, "%s.%s.%s.%s " COLLECTED_NUMBER_FORMAT " %u\n"
, prefix
, hostname
- , st->id
- , rd->id
+ , chart_name
+ , dimension_name
, rd->last_collected_value
, (uint32_t)rd->last_collected_time.tv_sec
);
@@ -165,7 +209,13 @@ static inline int format_dimension_stored_graphite_plaintext(
) {
(void)host;
- calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, options);
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ backend_name_copy(chart_name, (backend_send_names && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
+ backend_name_copy(dimension_name, (backend_send_names && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
+
+ time_t first_t = after, last_t = before;
+ calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, options, &first_t, &last_t);
if(!isnan(value)) {
@@ -174,10 +224,10 @@ static inline int format_dimension_stored_graphite_plaintext(
, "%s.%s.%s.%s " CALCULATED_NUMBER_FORMAT " %u\n"
, prefix
, hostname
- , st->id
- , rd->id
+ , chart_name
+ , dimension_name
, value
- , (uint32_t) before
+ , (uint32_t) last_t
);
return 1;
@@ -209,15 +259,22 @@ static inline int format_dimension_collected_opentsdb_telnet(
(void)before;
(void)options;
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ backend_name_copy(chart_name, (backend_send_names && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
+ backend_name_copy(dimension_name, (backend_send_names && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
+
buffer_sprintf(
b
- , "put %s.%s.%s %u " COLLECTED_NUMBER_FORMAT " host=%s\n"
+ , "put %s.%s.%s %u " COLLECTED_NUMBER_FORMAT " host=%s%s%s\n"
, prefix
- , st->id
- , rd->id
+ , chart_name
+ , dimension_name
, (uint32_t)rd->last_collected_time.tv_sec
, rd->last_collected_value
, hostname
+ , (host->tags)?" ":""
+ , (host->tags)?host->tags:""
);
return 1;
@@ -236,19 +293,27 @@ static inline int format_dimension_stored_opentsdb_telnet(
) {
(void)host;
- calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, options);
+ time_t first_t = after, last_t = before;
+ calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, options, &first_t, &last_t);
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ backend_name_copy(chart_name, (backend_send_names && st->name)?st->name:st->id, RRD_ID_LENGTH_MAX);
+ backend_name_copy(dimension_name, (backend_send_names && rd->name)?rd->name:rd->id, RRD_ID_LENGTH_MAX);
if(!isnan(value)) {
buffer_sprintf(
b
- , "put %s.%s.%s %u " CALCULATED_NUMBER_FORMAT " host=%s\n"
+ , "put %s.%s.%s %u " CALCULATED_NUMBER_FORMAT " host=%s%s%s\n"
, prefix
- , st->id
- , rd->id
- , (uint32_t) before
+ , chart_name
+ , dimension_name
+ , (uint32_t) last_t
, value
, hostname
+ , (host->tags)?" ":""
+ , (host->tags)?host->tags:""
);
return 1;
@@ -329,7 +394,8 @@ static inline int format_dimension_stored_json_plaintext(
) {
(void)host;
- calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, options);
+ time_t first_t = after, last_t = before;
+ calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, options, &first_t, &last_t);
if(!isnan(value)) {
buffer_sprintf(b, "{"
@@ -362,7 +428,7 @@ static inline int format_dimension_stored_json_plaintext(
rd->name,
value,
- (uint32_t)before
+ (uint32_t) last_t
);
return 1;
@@ -378,6 +444,56 @@ static inline int process_json_response(BUFFER *b) {
// ----------------------------------------------------------------------------
// the backend thread
+static SIMPLE_PATTERN *charts_pattern = NULL;
+
+inline int backends_can_send_rrdset(uint32_t options, RRDSET *st) {
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_BACKEND_IGNORE)))
+ return 0;
+
+ if(unlikely(!rrdset_flag_check(st, RRDSET_FLAG_BACKEND_SEND))) {
+ // we have not checked this chart
+ if(simple_pattern_matches(charts_pattern, st->id) || simple_pattern_matches(charts_pattern, st->name))
+ rrdset_flag_set(st, RRDSET_FLAG_BACKEND_SEND);
+ else {
+ rrdset_flag_set(st, RRDSET_FLAG_BACKEND_IGNORE);
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is disabled for backends.", st->id, st->rrdhost->hostname);
+ return 0;
+ }
+ }
+
+ if(unlikely(!rrdset_is_available_for_backends(st))) {
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is not available for backends.", st->id, st->rrdhost->hostname);
+ return 0;
+ }
+
+ if(unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE && !((options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AS_COLLECTED))) {
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s' because its memory mode is '%s' and the backend requires database access.", st->id, st->rrdhost->hostname, rrd_memory_mode_name(st->rrdhost->rrd_memory_mode));
+ return 0;
+ }
+
+ return 1;
+}
+
+inline uint32_t backend_parse_data_source(const char *source, uint32_t mode) {
+ if(!strcmp(source, "raw") || !strcmp(source, "as collected") || !strcmp(source, "as-collected") || !strcmp(source, "as_collected") || !strcmp(source, "ascollected")) {
+ mode |= BACKEND_SOURCE_DATA_AS_COLLECTED;
+ mode &= ~(BACKEND_SOURCE_BITS ^ BACKEND_SOURCE_DATA_AS_COLLECTED);
+ }
+ else if(!strcmp(source, "average")) {
+ mode |= BACKEND_SOURCE_DATA_AVERAGE;
+ mode &= ~(BACKEND_SOURCE_BITS ^ BACKEND_SOURCE_DATA_AVERAGE);
+ }
+ else if(!strcmp(source, "sum") || !strcmp(source, "volume")) {
+ mode |= BACKEND_SOURCE_DATA_SUM;
+ mode &= ~(BACKEND_SOURCE_BITS ^ BACKEND_SOURCE_DATA_SUM);
+ }
+ else {
+ error("BACKEND: invalid data source method '%s'.", source);
+ }
+
+ return mode;
+}
+
void *backends_main(void *ptr) {
int default_port = 0;
int sock = -1;
@@ -387,13 +503,13 @@ void *backends_main(void *ptr) {
int (*backend_request_formatter)(BUFFER *, const char *, RRDHOST *, const char *, RRDSET *, RRDDIM *, time_t, time_t, uint32_t) = NULL;
int (*backend_response_checker)(BUFFER *) = NULL;
- info("BACKEND thread created with task id %d", gettid());
+ info("BACKEND: thread created with task id %d", gettid());
if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0)
- error("Cannot set pthread cancel type to DEFERRED.");
+ error("BACKEND: cannot set pthread cancel type to DEFERRED.");
if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
- error("Cannot set pthread cancel state to ENABLE.");
+ error("BACKEND: cannot set pthread cancel state to ENABLE.");
// ------------------------------------------------------------------------
// collect configuration options
@@ -402,45 +518,35 @@ void *backends_main(void *ptr) {
.tv_sec = 0,
.tv_usec = 0
};
- uint32_t options;
int enabled = config_get_boolean(CONFIG_SECTION_BACKEND, "enabled", 0);
const char *source = config_get(CONFIG_SECTION_BACKEND, "data source", "average");
const char *type = config_get(CONFIG_SECTION_BACKEND, "type", "graphite");
const char *destination = config_get(CONFIG_SECTION_BACKEND, "destination", "localhost");
- const char *prefix = config_get(CONFIG_SECTION_BACKEND, "prefix", "netdata");
+ backend_prefix = config_get(CONFIG_SECTION_BACKEND, "prefix", "netdata");
const char *hostname = config_get(CONFIG_SECTION_BACKEND, "hostname", localhost->hostname);
- int frequency = (int)config_get_number(CONFIG_SECTION_BACKEND, "update every", 10);
+ backend_update_every = (int)config_get_number(CONFIG_SECTION_BACKEND, "update every", backend_update_every);
int buffer_on_failures = (int)config_get_number(CONFIG_SECTION_BACKEND, "buffer on failures", 10);
- long timeoutms = config_get_number(CONFIG_SECTION_BACKEND, "timeout ms", frequency * 2 * 1000);
+ long timeoutms = config_get_number(CONFIG_SECTION_BACKEND, "timeout ms", backend_update_every * 2 * 1000);
+ backend_send_names = config_get_boolean(CONFIG_SECTION_BACKEND, "send names instead of ids", backend_send_names);
+
+ charts_pattern = simple_pattern_create(config_get(CONFIG_SECTION_BACKEND, "send charts matching", "*"), SIMPLE_PATTERN_EXACT);
+
// ------------------------------------------------------------------------
// validate configuration options
// and prepare for sending data to our backend
- if(!enabled || frequency < 1)
- goto cleanup;
-
- if(!strcmp(source, "as collected")) {
- options = BACKEND_SOURCE_DATA_AS_COLLECTED;
- }
- else if(!strcmp(source, "average")) {
- options = BACKEND_SOURCE_DATA_AVERAGE;
- }
- else if(!strcmp(source, "sum") || !strcmp(source, "volume")) {
- options = BACKEND_SOURCE_DATA_SUM;
- }
- else {
- error("Invalid data source method '%s' for backend given. Disabling backed.", source);
- goto cleanup;
- }
+ backend_options = backend_parse_data_source(source, backend_options);
if(timeoutms < 1) {
- error("BACKED invalid timeout %ld ms given. Assuming %d ms.", timeoutms, frequency * 2 * 1000);
- timeoutms = frequency * 2 * 1000;
+ error("BACKEND: invalid timeout %ld ms given. Assuming %d ms.", timeoutms, backend_update_every * 2 * 1000);
+ timeoutms = backend_update_every * 2 * 1000;
}
timeout.tv_sec = (timeoutms * 1000) / 1000000;
timeout.tv_usec = (timeoutms * 1000) % 1000000;
+ if(!enabled || backend_update_every < 1)
+ goto cleanup;
// ------------------------------------------------------------------------
// select the backend type
@@ -450,7 +556,7 @@ void *backends_main(void *ptr) {
default_port = 2003;
backend_response_checker = process_graphite_response;
- if(options == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ if((backend_options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AS_COLLECTED)
backend_request_formatter = format_dimension_collected_graphite_plaintext;
else
backend_request_formatter = format_dimension_stored_graphite_plaintext;
@@ -461,7 +567,7 @@ void *backends_main(void *ptr) {
default_port = 4242;
backend_response_checker = process_opentsdb_response;
- if(options == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ if((backend_options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AS_COLLECTED)
backend_request_formatter = format_dimension_collected_opentsdb_telnet;
else
backend_request_formatter = format_dimension_stored_opentsdb_telnet;
@@ -472,19 +578,19 @@ void *backends_main(void *ptr) {
default_port = 5448;
backend_response_checker = process_json_response;
- if (options == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ if ((backend_options & BACKEND_SOURCE_BITS) == BACKEND_SOURCE_DATA_AS_COLLECTED)
backend_request_formatter = format_dimension_collected_json_plaintext;
else
backend_request_formatter = format_dimension_stored_json_plaintext;
}
else {
- error("Unknown backend type '%s'", type);
+ error("BACKEND: Unknown backend type '%s'", type);
goto cleanup;
}
if(backend_request_formatter == NULL || backend_response_checker == NULL) {
- error("backend is misconfigured - disabling it.");
+ error("BACKEND: backend is misconfigured - disabling it.");
goto cleanup;
}
@@ -509,18 +615,18 @@ void *backends_main(void *ptr) {
chart_backend_reconnects = 0,
chart_backend_latency = 0;
- RRDSET *chart_metrics = rrdset_create_localhost("netdata", "backend_metrics", NULL, "backend", NULL, "Netdata Buffered Metrics", "metrics", 130600, frequency, RRDSET_TYPE_LINE);
+ RRDSET *chart_metrics = rrdset_create_localhost("netdata", "backend_metrics", NULL, "backend", NULL, "Netdata Buffered Metrics", "metrics", 130600, backend_update_every, RRDSET_TYPE_LINE);
rrddim_add(chart_metrics, "buffered", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(chart_metrics, "lost", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(chart_metrics, "sent", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- RRDSET *chart_bytes = rrdset_create_localhost("netdata", "backend_bytes", NULL, "backend", NULL, "Netdata Backend Data Size", "KB", 130610, frequency, RRDSET_TYPE_AREA);
+ RRDSET *chart_bytes = rrdset_create_localhost("netdata", "backend_bytes", NULL, "backend", NULL, "Netdata Backend Data Size", "KB", 130610, backend_update_every, RRDSET_TYPE_AREA);
rrddim_add(chart_bytes, "buffered", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(chart_bytes, "lost", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(chart_bytes, "sent", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(chart_bytes, "received", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- RRDSET *chart_ops = rrdset_create_localhost("netdata", "backend_ops", NULL, "backend", NULL, "Netdata Backend Operations", "operations", 130630, frequency, RRDSET_TYPE_LINE);
+ RRDSET *chart_ops = rrdset_create_localhost("netdata", "backend_ops", NULL, "backend", NULL, "Netdata Backend Operations", "operations", 130630, backend_update_every, RRDSET_TYPE_LINE);
rrddim_add(chart_ops, "write", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(chart_ops, "discard", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(chart_ops, "reconnect", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
@@ -534,11 +640,11 @@ void *backends_main(void *ptr) {
*
* issue #1432 and https://www.softlab.ntua.gr/facilities/documentation/unix/unix-socket-faq/unix-socket-faq-2.html
*
- RRDSET *chart_latency = rrdset_create_localhost("netdata", "backend_latency", NULL, "backend", NULL, "Netdata Backend Latency", "ms", 130620, frequency, RRDSET_TYPE_AREA);
+ RRDSET *chart_latency = rrdset_create_localhost("netdata", "backend_latency", NULL, "backend", NULL, "Netdata Backend Latency", "ms", 130620, backend_update_every, RRDSET_TYPE_AREA);
rrddim_add(chart_latency, "latency", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
*/
- RRDSET *chart_rusage = rrdset_create_localhost("netdata", "backend_thread_cpu", NULL, "backend", NULL, "NetData Backend Thread CPU usage", "milliseconds/s", 130630, frequency, RRDSET_TYPE_STACKED);
+ RRDSET *chart_rusage = rrdset_create_localhost("netdata", "backend_thread_cpu", NULL, "backend", NULL, "NetData Backend Thread CPU usage", "milliseconds/s", 130630, backend_update_every, RRDSET_TYPE_STACKED);
rrddim_add(chart_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(chart_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
@@ -546,9 +652,9 @@ void *backends_main(void *ptr) {
// ------------------------------------------------------------------------
// prepare the backend main loop
- info("BACKEND configured ('%s' on '%s' sending '%s' data, every %d seconds, as host '%s', with prefix '%s')", type, destination, source, frequency, hostname, prefix);
+ info("BACKEND: configured ('%s' on '%s' sending '%s' data, every %d seconds, as host '%s', with prefix '%s')", type, destination, source, backend_update_every, hostname, backend_prefix);
- usec_t step_ut = frequency * USEC_PER_SEC;
+ usec_t step_ut = backend_update_every * USEC_PER_SEC;
time_t after = now_realtime_sec();
int failures = 0;
heartbeat_t hb;
@@ -558,9 +664,10 @@ void *backends_main(void *ptr) {
// ------------------------------------------------------------------------
// Wait for the next iteration point.
+
heartbeat_next(&hb, step_ut);
time_t before = now_realtime_sec();
-
+ debug(D_BACKEND, "BACKEND: preparing buffer for timeframe %lu to %lu", (unsigned long)after, (unsigned long)before);
// ------------------------------------------------------------------------
// add to the buffer the data we need to send to the backend
@@ -568,33 +675,59 @@ void *backends_main(void *ptr) {
int pthreadoldcancelstate;
if(unlikely(pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &pthreadoldcancelstate) != 0))
- error("Cannot set pthread cancel state to DISABLE.");
+ error("BACKEND: cannot set pthread cancel state to DISABLE.");
+
+ size_t count_hosts = 0;
+ size_t count_charts_total = 0;
+ size_t count_dims_total = 0;
rrd_rdlock();
RRDHOST *host;
rrdhost_foreach_read(host) {
- if(host->rrd_memory_mode == RRD_MEMORY_MODE_NONE)
- continue;
-
rrdhost_rdlock(host);
+ count_hosts++;
+ size_t count_charts = 0;
+ size_t count_dims = 0;
+ size_t count_dims_skipped = 0;
+
+ const char *__hostname = (host == localhost)?hostname:host->hostname;
+
RRDSET *st;
rrdset_foreach_read(st, host) {
- rrdset_rdlock(st);
+ if(likely(backends_can_send_rrdset(backend_options, st))) {
+ rrdset_rdlock(st);
+
+ count_charts++;
+
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if (likely(rd->last_collected_time.tv_sec >= after)) {
+ chart_buffered_metrics += backend_request_formatter(b, backend_prefix, host, __hostname, st, rd, after, before, backend_options);
+ count_dims++;
+ }
+ else {
+ debug(D_BACKEND, "BACKEND: not sending dimension '%s' of chart '%s' from host '%s', its last data collection (%lu) is not within our timeframe (%lu to %lu)", rd->id, st->id, __hostname, (unsigned long)rd->last_collected_time.tv_sec, (unsigned long)after, (unsigned long)before);
+ count_dims_skipped++;
+ }
+ }
- RRDDIM *rd;
- rrddim_foreach_read(rd, st) {
- if(rd->last_collected_time.tv_sec >= after)
- chart_buffered_metrics += backend_request_formatter(b, prefix, host, (host == localhost)?hostname:host->hostname, st, rd, after, before, options);
+ rrdset_unlock(st);
}
- rrdset_unlock(st);
}
+
+ debug(D_BACKEND, "BACKEND: sending host '%s', metrics of %zu dimensions, of %zu charts. Skipped %zu dimensions.", __hostname, count_dims, count_charts, count_dims_skipped);
+ count_charts_total += count_charts;
+ count_dims_total += count_dims;
+
rrdhost_unlock(host);
}
rrd_unlock();
+ debug(D_BACKEND, "BACKEND: buffer has %zu bytes, added metrics for %zu dimensions, of %zu charts, from %zu hosts", buffer_strlen(b), count_dims_total, count_charts_total, count_hosts);
+
if(unlikely(pthread_setcancelstate(pthreadoldcancelstate, NULL) != 0))
- error("Cannot set pthread cancel state to RESTORE (%d).", pthreadoldcancelstate);
+ error("BACKEND: cannot set pthread cancel state to RESTORE (%d).", pthreadoldcancelstate);
// ------------------------------------------------------------------------
@@ -639,14 +772,14 @@ void *backends_main(void *ptr) {
chart_receptions++;
}
else if(r == 0) {
- error("Backend '%s' closed the socket", destination);
+ error("BACKEND: '%s' closed the socket", destination);
close(sock);
sock = -1;
}
else {
// failed to receive data
if(errno != EAGAIN && errno != EWOULDBLOCK) {
- error("Cannot receive data from backend '%s'.", destination);
+ error("BACKEND: cannot receive data from backend '%s'.", destination);
}
}
}
@@ -698,7 +831,7 @@ void *backends_main(void *ptr) {
}
else {
// oops! we couldn't send (all or some of the) data
- error("Failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zd bytes. Will re-connect.", destination, len, written);
+ error("BACKEND: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zd bytes. Will re-connect.", destination, len, written);
chart_transmission_failures++;
if(written != -1)
@@ -713,7 +846,7 @@ void *backends_main(void *ptr) {
}
}
else {
- error("Failed to update database backend '%s'", destination);
+ error("BACKEND: failed to update database backend '%s'", destination);
chart_transmission_failures++;
// increment the counter we check for data loss
@@ -723,7 +856,7 @@ void *backends_main(void *ptr) {
if(failures > buffer_on_failures) {
// too bad! we are going to lose data
chart_lost_bytes += buffer_strlen(b);
- error("Reached %d backend failures. Flushing buffers to protect this host - this results in data loss on back-end server '%s'", failures, destination);
+ error("BACKEND: reached %d backend failures. Flushing buffers to protect this host - this results in data loss on back-end server '%s'", failures, destination);
buffer_flush(b);
failures = 0;
chart_data_lost_events++;
@@ -781,7 +914,7 @@ cleanup:
buffer_free(b);
buffer_free(response);
- info("BACKEND thread exiting");
+ info("BACKEND: thread exiting");
static_thread->enabled = 0;
pthread_exit(NULL);
diff --git a/src/backends.h b/src/backends.h
index 61122a1d..e882f3db 100644
--- a/src/backends.h
+++ b/src/backends.h
@@ -1,6 +1,30 @@
#ifndef NETDATA_BACKENDS_H
#define NETDATA_BACKENDS_H 1
-void *backends_main(void *ptr);
+#define BACKEND_SOURCE_DATA_AS_COLLECTED 0x00000001
+#define BACKEND_SOURCE_DATA_AVERAGE 0x00000002
+#define BACKEND_SOURCE_DATA_SUM 0x00000004
+
+#define BACKEND_SOURCE_BITS (BACKEND_SOURCE_DATA_AS_COLLECTED|BACKEND_SOURCE_DATA_AVERAGE|BACKEND_SOURCE_DATA_SUM)
+
+extern int backend_send_names;
+extern int backend_update_every;
+extern uint32_t backend_options;
+extern const char *backend_prefix;
+
+extern void *backends_main(void *ptr);
+
+extern int backends_can_send_rrdset(uint32_t options, RRDSET *st);
+extern uint32_t backend_parse_data_source(const char *source, uint32_t mode);
+
+extern calculated_number backend_calculate_value_from_stored_data(
+ RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , uint32_t options // BACKEND_SOURCE_* bitmap
+ , time_t *first_timestamp // the timestamp of the first point used in this response
+ , time_t *last_timestamp // the timestamp that should be reported to backend
+);
#endif /* NETDATA_BACKENDS_H */
diff --git a/src/clocks.c b/src/clocks.c
index 879ebf91..8f2aa740 100644
--- a/src/clocks.c
+++ b/src/clocks.c
@@ -115,8 +115,8 @@ usec_t heartbeat_next(heartbeat_t *hb, usec_t tick)
if(likely(*hb != 0ULL)) {
usec_t dt = now - *hb;
*hb = now;
- if(unlikely(dt / tick > 1))
- error("heartbeat missed between %llu usec and %llu usec", *hb, now);
+ if(unlikely(dt >= tick + tick / 2))
+ error("heartbeat missed %llu microseconds", dt - tick);
return dt;
}
else {
diff --git a/src/clocks.h b/src/clocks.h
index 197b5431..ca571525 100644
--- a/src/clocks.h
+++ b/src/clocks.h
@@ -55,6 +55,8 @@ typedef usec_t heartbeat_t;
#define USEC_PER_SEC 1000000ULL
#define MSEC_PER_SEC 1000ULL
+#define USEC_PER_MS 1000ULL
+
#ifndef HAVE_CLOCK_GETTIME
/* Fallback function for POSIX.1-2001 clock_gettime() function.
*
diff --git a/src/common.c b/src/common.c
index 88fcf85b..aa75c198 100644
--- a/src/common.c
+++ b/src/common.c
@@ -226,6 +226,13 @@ void json_escape_string(char *dst, const char *src, size_t size) {
*d = '\0';
}
+void json_fix_string(char *s) {
+ for( ; *s ;s++) {
+ if(unlikely(*s == '\\')) *s = '/';
+ else if(unlikely(*s == '"')) *s = '\'';
+ }
+}
+
int sleep_usec(usec_t usec) {
#ifndef NETDATA_WITH_USLEEP
@@ -895,9 +902,8 @@ char *mystrsep(char **ptr, char *s) {
char *trim(char *s) {
// skip leading spaces
- // and 'comments' as well!?
while (*s && isspace(*s)) s++;
- if (!*s || *s == '#') return NULL;
+ if (!*s) return NULL;
// skip tailing spaces
// this way is way faster. Writes only one NUL char.
@@ -913,105 +919,163 @@ char *trim(char *s) {
return s;
}
-void *mymmap(const char *filename, size_t size, int flags, int ksm) {
-#ifndef MADV_MERGEABLE
- (void)ksm;
-#endif
- static int log_madvise_1 = 1;
-#ifdef MADV_MERGEABLE
- static int log_madvise_2 = 1, log_madvise_3 = 1;
-#endif
- void *mem = NULL;
+inline char *trim_all(char *buffer) {
+ char *d = buffer, *s = buffer;
+
+ // skip spaces
+ while(isspace(*s)) s++;
+
+ while(*s) {
+ // copy the non-space part
+ while(*s && !isspace(*s)) *d++ = *s++;
+
+ // add a space if we have to
+ if(*s && isspace(*s)) {
+ *d++ = ' ';
+ s++;
+ }
+
+ // skip spaces
+ while(isspace(*s)) s++;
+ }
+
+ *d = '\0';
+
+ if(d > buffer) {
+ d--;
+ if(isspace(*d)) *d = '\0';
+ }
+
+ if(!buffer[0]) return NULL;
+ return buffer;
+}
+
+static int memory_file_open(const char *filename, size_t size) {
+ // info("memory_file_open('%s', %zu", filename, size);
- errno = 0;
int fd = open(filename, O_RDWR | O_CREAT | O_NOATIME, 0664);
if (fd != -1) {
if (lseek(fd, size, SEEK_SET) == (off_t) size) {
if (write(fd, "", 1) == 1) {
if (ftruncate(fd, size))
error("Cannot truncate file '%s' to size %zu. Will use the larger file.", filename, size);
+ }
+ else error("Cannot write to file '%s' at position %zu.", filename, size);
+ }
+ else error("Cannot seek file '%s' to size %zu.", filename, size);
+ }
+ else error("Cannot create/open file '%s'.", filename);
-#ifdef MADV_MERGEABLE
- if (flags & MAP_SHARED || !enable_ksm || !ksm) {
-#endif
- mem = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, fd, 0);
- if (mem == MAP_FAILED) {
- error("Cannot allocate SHARED memory for file '%s'.", filename);
- mem = NULL;
- }
- else {
+ return fd;
+}
+
+// mmap_shared is used for memory mode = map
+static void *memory_file_mmap(const char *filename, size_t size, int flags) {
+ // info("memory_file_mmap('%s', %zu", filename, size);
+ static int log_madvise = 1;
+
+ int fd = -1;
+ if(filename) {
+ fd = memory_file_open(filename, size);
+ if(fd == -1) return MAP_FAILED;
+ }
+
+ void *mem = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, fd, 0);
+ if (mem != MAP_FAILED) {
#ifdef NETDATA_LOG_ALLOCATIONS
- mmap_accounting(size);
+ mmap_accounting(size);
#endif
- int advise = MADV_SEQUENTIAL | MADV_DONTFORK;
- if (flags & MAP_SHARED) advise |= MADV_WILLNEED;
-
- if (madvise(mem, size, advise) != 0 && log_madvise_1) {
- error("Cannot advise the kernel about the memory usage of file '%s'.", filename);
- log_madvise_1--;
- }
- }
+ int advise = MADV_SEQUENTIAL | MADV_DONTFORK;
+ if (flags & MAP_SHARED) advise |= MADV_WILLNEED;
+
+ if (madvise(mem, size, advise) != 0 && log_madvise) {
+ error("Cannot advise the kernel about shared memory usage.");
+ log_madvise--;
+ }
+ }
+
+ if(fd != -1)
+ close(fd);
+
+ return mem;
+}
+
#ifdef MADV_MERGEABLE
- }
- else {
-/*
- // test - load the file into memory
- mem = calloc(1, size);
- if(mem) {
- if(lseek(fd, 0, SEEK_SET) == 0) {
- if(read(fd, mem, size) != (ssize_t)size)
- error("Cannot read from file '%s'", filename);
- }
- else
- error("Cannot seek to beginning of file '%s'.", filename);
- }
-*/
- mem = mmap(NULL, size, PROT_READ | PROT_WRITE, flags | MAP_ANONYMOUS, -1, 0);
- if (mem == MAP_FAILED) {
- error("Cannot allocate PRIVATE ANONYMOUS memory for KSM for file '%s'.", filename);
- mem = NULL;
- }
- else {
+static void *memory_file_mmap_ksm(const char *filename, size_t size, int flags) {
+ // info("memory_file_mmap_ksm('%s', %zu", filename, size);
+ static int log_madvise_2 = 1, log_madvise_3 = 1;
+
+ int fd = -1;
+ if(filename) {
+ fd = memory_file_open(filename, size);
+ if(fd == -1) return MAP_FAILED;
+ }
+
+ void *mem = mmap(NULL, size, PROT_READ | PROT_WRITE, flags | MAP_ANONYMOUS, -1, 0);
+ if (mem != MAP_FAILED) {
#ifdef NETDATA_LOG_ALLOCATIONS
- mmap_accounting(size);
-#endif
- if (lseek(fd, 0, SEEK_SET) == 0) {
- if (read(fd, mem, size) != (ssize_t) size)
- error("Cannot read from file '%s'", filename);
- } else
- error("Cannot seek to beginning of file '%s'.", filename);
-
- // don't use MADV_SEQUENTIAL|MADV_DONTFORK, they disable MADV_MERGEABLE
- if (madvise(mem, size, MADV_SEQUENTIAL | MADV_DONTFORK) != 0 && log_madvise_2) {
- error("Cannot advise the kernel about the memory usage (MADV_SEQUENTIAL|MADV_DONTFORK) of file '%s'.",
- filename);
- log_madvise_2--;
- }
-
- if (madvise(mem, size, MADV_MERGEABLE) != 0 && log_madvise_3) {
- error("Cannot advise the kernel about the memory usage (MADV_MERGEABLE) of file '%s'.",
- filename);
- log_madvise_3--;
- }
- }
- }
+ mmap_accounting(size);
#endif
+ if(fd != -1) {
+ if (lseek(fd, 0, SEEK_SET) == 0) {
+ if (read(fd, mem, size) != (ssize_t) size)
+ error("Cannot read from file '%s'", filename);
}
- else
- error("Cannot write to file '%s' at position %zu.", filename, size);
+ else error("Cannot seek to beginning of file '%s'.", filename);
}
- else
- error("Cannot seek file '%s' to size %zu.", filename, size);
- close(fd);
+ // don't use MADV_SEQUENTIAL|MADV_DONTFORK, they disable MADV_MERGEABLE
+ if (madvise(mem, size, MADV_SEQUENTIAL | MADV_DONTFORK) != 0 && log_madvise_2) {
+ error("Cannot advise the kernel about the memory usage (MADV_SEQUENTIAL|MADV_DONTFORK) of file '%s'.", filename);
+ log_madvise_2--;
+ }
+
+ if (madvise(mem, size, MADV_MERGEABLE) != 0 && log_madvise_3) {
+ error("Cannot advise the kernel about the memory usage (MADV_MERGEABLE) of file '%s'.", filename);
+ log_madvise_3--;
+ }
}
+
+ if(fd != -1)
+ close(fd);
+
+ return mem;
+}
+#else
+static void *memory_file_mmap_ksm(const char *filename, size_t size, int flags) {
+ // info("memory_file_mmap_ksm FALLBACK ('%s', %zu", filename, size);
+
+ if(filename)
+ return memory_file_mmap(filename, size, flags);
+
+ // when KSM is not available and no filename is given (memory mode = ram),
+ // we just report failure
+ return MAP_FAILED;
+}
+#endif
+
+void *mymmap(const char *filename, size_t size, int flags, int ksm) {
+ void *mem = NULL;
+
+ if (filename && (flags & MAP_SHARED || !enable_ksm || !ksm))
+ // memory mode = map | save
+ // when KSM is not enabled
+ // MAP_SHARED is used for memory mode = map (no KSM possible)
+ mem = memory_file_mmap(filename, size, flags);
+
else
- error("Cannot create/open file '%s'.", filename);
+ // memory mode = save | ram
+ // when KSM is enabled
+ // for memory mode = ram, the filename is NULL
+ mem = memory_file_mmap_ksm(filename, size, flags);
+ if(mem == MAP_FAILED) return NULL;
+
+ errno = 0;
return mem;
}
-int savememory(const char *filename, void *mem, size_t size) {
+int memory_file_save(const char *filename, void *mem, size_t size) {
char tmpfilename[FILENAME_MAX + 1];
snprintfz(tmpfilename, FILENAME_MAX, "%s.%ld.tmp", filename, (long) getpid());
@@ -1228,3 +1292,47 @@ unsigned long end_tsc(void) {
return (((unsigned long)d << 32) | (unsigned long)a) - tsc;
}
*/
+
+int recursively_delete_dir(const char *path, const char *reason) {
+ DIR *dir = opendir(path);
+ if(!dir) {
+ error("Cannot read %s directory to be deleted '%s'", reason?reason:"", path);
+ return -1;
+ }
+
+ int ret = 0;
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type == DT_DIR
+ && (
+ (de->d_name[0] == '.' && de->d_name[1] == '\0')
+ || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ ))
+ continue;
+
+ char fullpath[FILENAME_MAX + 1];
+ snprintfz(fullpath, FILENAME_MAX, "%s/%s", path, de->d_name);
+
+ if(de->d_type == DT_DIR) {
+ int r = recursively_delete_dir(fullpath, reason);
+ if(r > 0) ret += r;
+ continue;
+ }
+
+ info("Deleting %s file '%s'", reason?reason:"", fullpath);
+ if(unlikely(unlink(fullpath) == -1))
+ error("Cannot delete %s file '%s'", reason?reason:"", fullpath);
+ else
+ ret++;
+ }
+
+ info("Deleting empty directory '%s'", path);
+ if(unlikely(rmdir(path) == -1))
+ error("Cannot delete empty directory '%s'", path);
+ else
+ ret++;
+
+ closedir(dir);
+
+ return ret;
+}
diff --git a/src/common.h b/src/common.h
index b82c078f..efeebf16 100644
--- a/src/common.h
+++ b/src/common.h
@@ -40,6 +40,7 @@
#include <strings.h>
#include <arpa/inet.h>
#include <netinet/tcp.h>
+#include <sys/ioctl.h>
#ifdef HAVE_NETINET_IN_H
#include <netinet/in.h>
@@ -201,12 +202,14 @@
#define NETDATA_OS_TYPE "linux"
#endif /* __FreeBSD__, __APPLE__*/
+#include "statistical.h"
#include "socket.h"
#include "eval.h"
#include "health.h"
#include "rrd.h"
#include "plugin_tc.h"
#include "plugins_d.h"
+#include "statsd.h"
#include "rrd2json.h"
#include "rrd2json_api_old.h"
#include "web_client.h"
@@ -217,6 +220,7 @@
#include "unit_test.h"
#include "ipc.h"
#include "backends.h"
+#include "backend_prometheus.h"
#include "inlined.h"
#include "adaptive_resortable_list.h"
#include "rrdpush.h"
@@ -238,7 +242,8 @@ extern void netdata_fix_chart_name(char *s);
extern void strreverse(char* begin, char* end);
extern char *mystrsep(char **ptr, char *s);
-extern char *trim(char *s);
+extern char *trim(char *s); // remove leading and trailing spaces; may return NULL
+extern char *trim_all(char *buffer); // like trim(), but also remove duplicate spaces inside the string; may return NULL
extern int vsnprintfz(char *dst, size_t n, const char *fmt, va_list args);
extern int snprintfz(char *dst, size_t n, const char *fmt, ...) PRINTFLIKE(3, 4);
@@ -265,9 +270,10 @@ extern void freez(void *ptr);
#endif
extern void json_escape_string(char *dst, const char *src, size_t size);
+extern void json_fix_string(char *s);
extern void *mymmap(const char *filename, size_t size, int flags, int ksm);
-extern int savememory(const char *filename, void *mem, size_t size);
+extern int memory_file_save(const char *filename, void *mem, size_t size);
extern int fd_is_valid(int fd);
@@ -289,6 +295,8 @@ extern pid_t get_system_pid_max(void);
extern unsigned int hz;
extern void get_system_HZ(void);
+extern int recursively_delete_dir(const char *path, const char *reason);
+
extern volatile sig_atomic_t netdata_exit;
extern const char *os_type;
diff --git a/src/daemon.c b/src/daemon.c
index 42b04c40..bc02446e 100644
--- a/src/daemon.c
+++ b/src/daemon.c
@@ -73,8 +73,9 @@ void create_needed_dir(const char *dir, uid_t uid, gid_t gid)
error("Cannot create directory '%s'", dir);
}
-int become_user(const char *username, int pid_fd)
-{
+int become_user(const char *username, int pid_fd) {
+ int am_i_root = (getuid() == 0)?1:0;
+
struct passwd *pw = getpwnam(username);
if(!pw) {
error("User %s is not present.", username);
@@ -94,12 +95,12 @@ int become_user(const char *username, int pid_fd)
int ngroups = (int)sysconf(_SC_NGROUPS_MAX);
gid_t *supplementary_groups = NULL;
- if(ngroups) {
+ if(ngroups > 0) {
supplementary_groups = mallocz(sizeof(gid_t) * ngroups);
if(getgrouplist(username, gid, supplementary_groups, &ngroups) == -1) {
- error("Cannot get supplementary groups of user '%s'.", username);
- freez(supplementary_groups);
- supplementary_groups = NULL;
+ if(am_i_root)
+ error("Cannot get supplementary groups of user '%s'.", username);
+
ngroups = 0;
}
}
@@ -109,14 +110,17 @@ int become_user(const char *username, int pid_fd)
chown_open_file(stdaccess_fd, uid, gid);
chown_open_file(pid_fd, uid, gid);
- if(supplementary_groups && ngroups) {
- if(setgroups(ngroups, supplementary_groups) == -1)
- error("Cannot set supplementary groups for user '%s'", username);
-
- freez(supplementary_groups);
+ if(supplementary_groups && ngroups > 0) {
+ if(setgroups((size_t)ngroups, supplementary_groups) == -1) {
+ if(am_i_root)
+ error("Cannot set supplementary groups for user '%s'", username);
+ }
ngroups = 0;
}
+ if(supplementary_groups)
+ freez(supplementary_groups);
+
#ifdef __APPLE__
if(setregid(gid, gid) != 0) {
#else
@@ -155,22 +159,42 @@ int become_user(const char *username, int pid_fd)
return(0);
}
+#ifndef OOM_SCORE_ADJ_MAX
+#define OOM_SCORE_ADJ_MAX 1000
+#endif
+#ifndef OOM_SCORE_ADJ_MIN
+#define OOM_SCORE_ADJ_MIN -1000
+#endif
+
static void oom_score_adj(void) {
- int score = (int)config_get_number(CONFIG_SECTION_GLOBAL, "OOM score", 1000);
+ char buf[10 + 1];
+ snprintfz(buf, 10, "%d", OOM_SCORE_ADJ_MAX);
+
+ // check the environment
+ char *s = getenv("OOMScoreAdjust");
+ if(!s || !*s) s = buf;
+
+ // check netdata.conf configuration
+ s = config_get(CONFIG_SECTION_GLOBAL, "OOM score", s);
+ if(!s || !*s) s = buf;
+
+ if(!isdigit(*s) && *s != '-' && *s != '+') {
+ info("Out-Of-Memory score not changed due to setting: '%s'", s);
+ return;
+ }
int done = 0;
int fd = open("/proc/self/oom_score_adj", O_WRONLY);
if(fd != -1) {
- char buf[10 + 1];
- ssize_t len = snprintfz(buf, 10, "%d", score);
+ ssize_t len = strlen(s);
if(len > 0 && write(fd, buf, (size_t)len) == len) done = 1;
close(fd);
}
if(!done)
- error("Cannot adjust my Out-Of-Memory score to %d.", score);
+ error("Cannot adjust my Out-Of-Memory score to '%s'.", s);
else
- debug(D_SYSTEM, "Adjusted my Out-Of-Memory score to %d.", score);
+ info("Adjusted my Out-Of-Memory score to '%s'.", s);
}
static void process_nice_level(void) {
diff --git a/src/eval.c b/src/eval.c
index 122959ce..9248109b 100644
--- a/src/eval.c
+++ b/src/eval.c
@@ -723,7 +723,7 @@ static inline int parse_variable(const char **string, char *buffer, size_t len)
static inline int parse_constant(const char **string, calculated_number *number) {
char *end = NULL;
- calculated_number n = strtold(*string, &end);
+ calculated_number n = str2ld(*string, &end);
if(unlikely(!end || *string == end)) {
*number = 0;
return 0;
diff --git a/src/freebsd_devstat.c b/src/freebsd_devstat.c
new file mode 100644
index 00000000..5b0687d5
--- /dev/null
+++ b/src/freebsd_devstat.c
@@ -0,0 +1,662 @@
+#include "common.h"
+
+#include <sys/devicestat.h>
+
+struct disk {
+ char *name;
+ uint32_t hash;
+ size_t len;
+
+ // flags
+ int configured;
+ int enabled;
+ int updated;
+
+ int do_io;
+ int do_ops;
+ int do_qops;
+ int do_util;
+ int do_iotime;
+ int do_await;
+ int do_avagsz;
+ int do_svctm;
+
+
+ // data for differential charts
+
+ struct prev_dstat {
+ collected_number bytes_read;
+ collected_number bytes_write;
+ collected_number operations_read;
+ collected_number operations_write;
+ collected_number duration_read_ms;
+ collected_number duration_write_ms;
+ collected_number busy_time_ms;
+ } prev_dstat;
+
+ // charts and dimensions
+
+ RRDSET *st_io;
+ RRDDIM *rd_io_in;
+ RRDDIM *rd_io_out;
+
+ RRDSET *st_ops;
+ RRDDIM *rd_ops_in;
+ RRDDIM *rd_ops_out;
+
+ RRDSET *st_qops;
+ RRDDIM *rd_qops;
+
+ RRDSET *st_util;
+ RRDDIM *rd_util;
+
+ RRDSET *st_iotime;
+ RRDDIM *rd_iotime_in;
+ RRDDIM *rd_iotime_out;
+
+ RRDSET *st_await;
+ RRDDIM *rd_await_in;
+ RRDDIM *rd_await_out;
+
+ RRDSET *st_avagsz;
+ RRDDIM *rd_avagsz_in;
+ RRDDIM *rd_avagsz_out;
+
+ RRDSET *st_svctm;
+ RRDDIM *rd_svctm;
+
+ struct disk *next;
+};
+
+static struct disk *disks_root = NULL, *disks_last_used = NULL;
+
+static size_t disks_added = 0, disks_found = 0;
+
+static void disk_free(struct disk *dm) {
+ if (likely(dm->st_io))
+ rrdset_is_obsolete(dm->st_io);
+ if (likely(dm->st_ops))
+ rrdset_is_obsolete(dm->st_ops);
+ if (likely(dm->st_qops))
+ rrdset_is_obsolete(dm->st_qops);
+ if (likely(dm->st_util))
+ rrdset_is_obsolete(dm->st_util);
+ if (likely(dm->st_iotime))
+ rrdset_is_obsolete(dm->st_iotime);
+ if (likely(dm->st_await))
+ rrdset_is_obsolete(dm->st_await);
+ if (likely(dm->st_avagsz))
+ rrdset_is_obsolete(dm->st_avagsz);
+ if (likely(dm->st_svctm))
+ rrdset_is_obsolete(dm->st_svctm);
+
+ disks_added--;
+ freez(dm->name);
+ freez(dm);
+}
+
+static void disks_cleanup() {
+ if (likely(disks_found == disks_added)) return;
+
+ struct disk *dm = disks_root, *last = NULL;
+ while(dm) {
+ if (unlikely(!dm->updated)) {
+ // info("Removing disk '%s', linked after '%s'", dm->name, last?last->name:"ROOT");
+
+ if (disks_last_used == dm)
+ disks_last_used = last;
+
+ struct disk *t = dm;
+
+ if (dm == disks_root || !last)
+ disks_root = dm = dm->next;
+
+ else
+ last->next = dm = dm->next;
+
+ t->next = NULL;
+ disk_free(t);
+ }
+ else {
+ last = dm;
+ dm->updated = 0;
+ dm = dm->next;
+ }
+ }
+}
+
+static struct disk *get_disk(const char *name) {
+ struct disk *dm;
+
+ uint32_t hash = simple_hash(name);
+
+ // search it, from the last position to the end
+ for(dm = disks_last_used ; dm ; dm = dm->next) {
+ if (unlikely(hash == dm->hash && !strcmp(name, dm->name))) {
+ disks_last_used = dm->next;
+ return dm;
+ }
+ }
+
+ // search it from the beginning to the last position we used
+ for(dm = disks_root ; dm != disks_last_used ; dm = dm->next) {
+ if (unlikely(hash == dm->hash && !strcmp(name, dm->name))) {
+ disks_last_used = dm->next;
+ return dm;
+ }
+ }
+
+ // create a new one
+ dm = callocz(1, sizeof(struct disk));
+ dm->name = strdupz(name);
+ dm->hash = simple_hash(dm->name);
+ dm->len = strlen(dm->name);
+ disks_added++;
+
+ // link it to the end
+ if (disks_root) {
+ struct disk *e;
+ for(e = disks_root; e->next ; e = e->next) ;
+ e->next = dm;
+ }
+ else
+ disks_root = dm;
+
+ return dm;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// kern.devstat
+
+int do_kern_devstat(int update_every, usec_t dt) {
+
+#define DELAULT_EXLUDED_DISKS ""
+#define CONFIG_SECTION_KERN_DEVSTAT "plugin:freebsd:kern.devstat"
+#define BINTIME_SCALE 5.42101086242752217003726400434970855712890625e-17 // this is 1000/2^64
+
+ static int enable_new_disks = -1;
+ static int enable_pass_devices = -1, do_system_io = -1, do_io = -1, do_ops = -1, do_qops = -1, do_util = -1,
+ do_iotime = -1, do_await = -1, do_avagsz = -1, do_svctm = -1;
+ static SIMPLE_PATTERN *excluded_disks = NULL;
+
+ if (unlikely(enable_new_disks == -1)) {
+ enable_new_disks = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT,
+ "enable new disks detected at runtime", CONFIG_BOOLEAN_AUTO);
+
+ enable_pass_devices = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT,
+ "performance metrics for pass devices", CONFIG_BOOLEAN_AUTO);
+
+ do_system_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "total bandwidth for all disks",
+ CONFIG_BOOLEAN_YES);
+
+ do_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "bandwidth for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_ops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "operations for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_qops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "queued operations for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_util = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "utilization percentage for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_iotime = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "i/o time for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_await = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o time for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_avagsz = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o bandwidth for all disks",
+ CONFIG_BOOLEAN_AUTO);
+ do_svctm = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average service time for all disks",
+ CONFIG_BOOLEAN_AUTO);
+
+ excluded_disks = simple_pattern_create(
+ config_get(CONFIG_SECTION_KERN_DEVSTAT, "disable by default disks matching", DELAULT_EXLUDED_DISKS)
+ , SIMPLE_PATTERN_EXACT
+ );
+ }
+
+ if (likely(do_system_io || do_io || do_ops || do_qops || do_util || do_iotime || do_await || do_avagsz || do_svctm)) {
+ static int mib_numdevs[3] = {0, 0, 0};
+ int numdevs;
+ int common_error = 0;
+
+ if (unlikely(GETSYSCTL_SIMPLE("kern.devstat.numdevs", mib_numdevs, numdevs))) {
+ common_error = 1;
+ } else {
+ static int mib_devstat[3] = {0, 0, 0};
+ static void *devstat_data = NULL;
+ static int old_numdevs = 0;
+
+ if (unlikely(numdevs != old_numdevs)) {
+ devstat_data = reallocz(devstat_data, sizeof(long) + sizeof(struct devstat) *
+ numdevs); // there is generation number before devstat structures
+ old_numdevs = numdevs;
+ }
+ if (unlikely(GETSYSCTL_WSIZE("kern.devstat.all", mib_devstat, devstat_data,
+ sizeof(long) + sizeof(struct devstat) * numdevs))) {
+ common_error = 1;
+ } else {
+ struct devstat *dstat;
+ int i;
+ collected_number total_disk_kbytes_read = 0;
+ collected_number total_disk_kbytes_write = 0;
+
+ disks_found = 0;
+
+ dstat = devstat_data + sizeof(long); // skip generation number
+
+ for (i = 0; i < numdevs; i++) {
+ if (likely(do_system_io)) {
+ if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) || ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) {
+ total_disk_kbytes_read += dstat[i].bytes[DEVSTAT_READ] / KILO_FACTOR;
+ total_disk_kbytes_write += dstat[i].bytes[DEVSTAT_WRITE] / KILO_FACTOR;
+ }
+ }
+
+ if (unlikely(!enable_pass_devices))
+ if ((dstat[i].device_type & DEVSTAT_TYPE_PASS) == DEVSTAT_TYPE_PASS)
+ continue;
+
+ if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) || ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) {
+ char disk[DEVSTAT_NAME_LEN + MAX_INT_DIGITS + 1];
+ struct cur_dstat {
+ collected_number duration_read_ms;
+ collected_number duration_write_ms;
+ collected_number busy_time_ms;
+ } cur_dstat;
+
+ sprintf(disk, "%s%d", dstat[i].device_name, dstat[i].unit_number);
+
+ struct disk *dm = get_disk(disk);
+ dm->updated = 1;
+ disks_found++;
+
+ if(unlikely(!dm->configured)) {
+ char var_name[4096 + 1];
+
+ // this is the first time we see this disk
+
+ // remember we configured it
+ dm->configured = 1;
+
+ dm->enabled = enable_new_disks;
+
+ if (likely(dm->enabled))
+ dm->enabled = !simple_pattern_matches(excluded_disks, disk);
+
+ snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_KERN_DEVSTAT, disk);
+ dm->enabled = config_get_boolean_ondemand(var_name, "enabled", dm->enabled);
+
+ dm->do_io = config_get_boolean_ondemand(var_name, "bandwidth", do_io);
+ dm->do_ops = config_get_boolean_ondemand(var_name, "operations", do_ops);
+ dm->do_qops = config_get_boolean_ondemand(var_name, "queued operations", do_qops);
+ dm->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", do_util);
+ dm->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", do_iotime);
+ dm->do_await = config_get_boolean_ondemand(var_name, "average completed i/o time",
+ do_await);
+ dm->do_avagsz = config_get_boolean_ondemand(var_name, "average completed i/o bandwidth",
+ do_avagsz);
+ dm->do_svctm = config_get_boolean_ondemand(var_name, "average service time", do_svctm);
+
+ // initialise data for differential charts
+
+ dm->prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ];
+ dm->prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE];
+ dm->prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ];
+ dm->prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE];
+ dm->prev_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000
+ + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
+ dm->prev_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000
+ + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
+ dm->prev_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000
+ + dstat[i].busy_time.frac * BINTIME_SCALE;
+ }
+
+ cur_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000
+ + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
+ cur_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000
+ + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
+ cur_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000 + dstat[i].busy_time.frac * BINTIME_SCALE;
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_io == CONFIG_BOOLEAN_YES || (dm->do_io == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].bytes[DEVSTAT_READ] || dstat[i].bytes[DEVSTAT_WRITE]))) {
+ if (unlikely(!dm->st_io)) {
+ dm->st_io = rrdset_create_localhost("disk",
+ disk,
+ NULL,
+ disk,
+ "disk.io",
+ "Disk I/O Bandwidth",
+ "kilobytes/s",
+ 2000,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ dm->rd_io_in = rrddim_add(dm->st_io, "reads", NULL, 1, KILO_FACTOR,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_io_out = rrddim_add(dm->st_io, "writes", NULL, -1, KILO_FACTOR,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(dm->st_io);
+
+ rrddim_set_by_pointer(dm->st_io, dm->rd_io_in, dstat[i].bytes[DEVSTAT_READ]);
+ rrddim_set_by_pointer(dm->st_io, dm->rd_io_out, dstat[i].bytes[DEVSTAT_WRITE]);
+ rrdset_done(dm->st_io);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_ops == CONFIG_BOOLEAN_YES || (dm->do_ops == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE]))) {
+ if (unlikely(!dm->st_ops)) {
+ dm->st_ops = rrdset_create_localhost("disk_ops",
+ disk,
+ NULL,
+ disk,
+ "disk.ops",
+ "Disk Completed I/O Operations",
+ "operations/s",
+ 2001,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_ops, RRDSET_FLAG_DETAIL);
+
+ dm->rd_ops_in = rrddim_add(dm->st_ops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_ops_out = rrddim_add(dm->st_ops, "writes", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(dm->st_ops);
+
+ rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_in, dstat[i].operations[DEVSTAT_READ]);
+ rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_out, dstat[i].operations[DEVSTAT_WRITE]);
+ rrdset_done(dm->st_ops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_qops == CONFIG_BOOLEAN_YES || (dm->do_qops == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].start_count || dstat[i].end_count))) {
+ if (unlikely(!dm->st_qops)) {
+ dm->st_qops = rrdset_create_localhost("disk_qops",
+ disk,
+ NULL,
+ disk,
+ "disk.qops",
+ "Disk Current I/O Operations",
+ "operations",
+ 2002,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_qops, RRDSET_FLAG_DETAIL);
+
+ dm->rd_qops = rrddim_add(dm->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(dm->st_qops);
+
+ rrddim_set_by_pointer(dm->st_qops, dm->rd_qops, dstat[i].start_count - dstat[i].end_count);
+ rrdset_done(dm->st_qops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_util == CONFIG_BOOLEAN_YES || (dm->do_util == CONFIG_BOOLEAN_AUTO &&
+ cur_dstat.busy_time_ms)) {
+ if (unlikely(!dm->st_util)) {
+ dm->st_util = rrdset_create_localhost("disk_util",
+ disk,
+ NULL,
+ disk,
+ "disk.util",
+ "Disk Utilization Time",
+ "% of time working",
+ 2004,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(dm->st_util, RRDSET_FLAG_DETAIL);
+
+ dm->rd_util = rrddim_add(dm->st_util, "utilization", NULL, 1, 10,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(dm->st_util);
+
+ rrddim_set_by_pointer(dm->st_util, dm->rd_util, cur_dstat.busy_time_ms);
+ rrdset_done(dm->st_util);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_iotime == CONFIG_BOOLEAN_YES || (dm->do_iotime == CONFIG_BOOLEAN_AUTO &&
+ (cur_dstat.duration_read_ms || cur_dstat.duration_write_ms))) {
+ if (unlikely(!dm->st_iotime)) {
+ dm->st_iotime = rrdset_create_localhost("disk_iotime",
+ disk,
+ NULL,
+ disk,
+ "disk.iotime",
+ "Disk Total I/O Time",
+ "milliseconds/s",
+ 2022,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_iotime, RRDSET_FLAG_DETAIL);
+
+ dm->rd_iotime_in = rrddim_add(dm->st_iotime, "reads", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ dm->rd_iotime_out = rrddim_add(dm->st_iotime, "writes", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(dm->st_iotime);
+
+ rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_in, cur_dstat.duration_read_ms);
+ rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_out, cur_dstat.duration_write_ms);
+ rrdset_done(dm->st_iotime);
+ }
+
+ // --------------------------------------------------------------------
+ // calculate differential charts
+ // only if this is not the first time we run
+
+ if (likely(dt)) {
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_await == CONFIG_BOOLEAN_YES || (dm->do_await == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE]))) {
+ if (unlikely(!dm->st_await)) {
+ dm->st_await = rrdset_create_localhost("disk_await",
+ disk,
+ NULL,
+ disk,
+ "disk.await",
+ "Average Completed I/O Operation Time",
+ "ms per operation",
+ 2005,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_await, RRDSET_FLAG_DETAIL);
+
+ dm->rd_await_in = rrddim_add(dm->st_await, "reads", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ dm->rd_await_out = rrddim_add(dm->st_await, "writes", NULL, -1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(dm->st_await);
+
+ rrddim_set_by_pointer(dm->st_await, dm->rd_await_in,
+ (dstat[i].operations[DEVSTAT_READ] -
+ dm->prev_dstat.operations_read) ?
+ (cur_dstat.duration_read_ms - dm->prev_dstat.duration_read_ms) /
+ (dstat[i].operations[DEVSTAT_READ] -
+ dm->prev_dstat.operations_read) :
+ 0);
+ rrddim_set_by_pointer(dm->st_await, dm->rd_await_out,
+ (dstat[i].operations[DEVSTAT_WRITE] -
+ dm->prev_dstat.operations_write) ?
+ (cur_dstat.duration_write_ms - dm->prev_dstat.duration_write_ms) /
+ (dstat[i].operations[DEVSTAT_WRITE] -
+ dm->prev_dstat.operations_write) :
+ 0);
+ rrdset_done(dm->st_await);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_avagsz == CONFIG_BOOLEAN_YES || (dm->do_avagsz == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE]))) {
+ if (unlikely(!dm->st_avagsz)) {
+ dm->st_avagsz = rrdset_create_localhost("disk_avgsz",
+ disk,
+ NULL,
+ disk,
+ "disk.avgsz",
+ "Average Completed I/O Operation Bandwidth",
+ "kilobytes per operation",
+ 2006,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rrdset_flag_set(dm->st_avagsz, RRDSET_FLAG_DETAIL);
+
+ dm->rd_avagsz_in = rrddim_add(dm->st_avagsz, "reads", NULL, 1, KILO_FACTOR,
+ RRD_ALGORITHM_ABSOLUTE);
+ dm->rd_avagsz_out = rrddim_add(dm->st_avagsz, "writes", NULL, -1, KILO_FACTOR,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(dm->st_avagsz);
+
+ rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_in,
+ (dstat[i].operations[DEVSTAT_READ] -
+ dm->prev_dstat.operations_read) ?
+ (dstat[i].bytes[DEVSTAT_READ] - dm->prev_dstat.bytes_read) /
+ (dstat[i].operations[DEVSTAT_READ] -
+ dm->prev_dstat.operations_read) :
+ 0);
+ rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_out,
+ (dstat[i].operations[DEVSTAT_WRITE] -
+ dm->prev_dstat.operations_write) ?
+ (dstat[i].bytes[DEVSTAT_WRITE] - dm->prev_dstat.bytes_write) /
+ (dstat[i].operations[DEVSTAT_WRITE] -
+ dm->prev_dstat.operations_write) :
+ 0);
+ rrdset_done(dm->st_avagsz);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(dm->do_svctm == CONFIG_BOOLEAN_YES || (dm->do_svctm == CONFIG_BOOLEAN_AUTO &&
+ (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE]))) {
+ if (unlikely(!dm->st_svctm)) {
+ dm->st_svctm = rrdset_create_localhost("disk_svctm",
+ disk,
+ NULL,
+ disk,
+ "disk.svctm",
+ "Average Service Time",
+ "ms per operation",
+ 2007,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(dm->st_svctm, RRDSET_FLAG_DETAIL);
+
+ dm->rd_svctm = rrddim_add(dm->st_svctm, "svctm", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(dm->st_svctm);
+
+ rrddim_set_by_pointer(dm->st_svctm, dm->rd_svctm,
+ ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) +
+ (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write)) ?
+ (cur_dstat.busy_time_ms - dm->prev_dstat.busy_time_ms) /
+ ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) +
+ (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write)) :
+ 0);
+ rrdset_done(dm->st_svctm);
+ }
+
+ // --------------------------------------------------------------------
+
+ dm->prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ];
+ dm->prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE];
+ dm->prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ];
+ dm->prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE];
+ dm->prev_dstat.duration_read_ms = cur_dstat.duration_read_ms;
+ dm->prev_dstat.duration_write_ms = cur_dstat.duration_write_ms;
+ dm->prev_dstat.busy_time_ms = cur_dstat.busy_time_ms;
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_system_io)) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("system",
+ "io",
+ NULL,
+ "disk",
+ NULL,
+ "Disk I/O",
+ "kilobytes/s",
+ 150,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, total_disk_kbytes_read);
+ rrddim_set_by_pointer(st, rd_out, total_disk_kbytes_write);
+ rrdset_done(st);
+ }
+ }
+ }
+ if (unlikely(common_error)) {
+ do_system_io = 0;
+ error("DISABLED: system.io chart");
+ do_io = 0;
+ error("DISABLED: disk.* charts");
+ do_ops = 0;
+ error("DISABLED: disk_ops.* charts");
+ do_qops = 0;
+ error("DISABLED: disk_qops.* charts");
+ do_util = 0;
+ error("DISABLED: disk_util.* charts");
+ do_iotime = 0;
+ error("DISABLED: disk_iotime.* charts");
+ do_await = 0;
+ error("DISABLED: disk_await.* charts");
+ do_avagsz = 0;
+ error("DISABLED: disk_avgsz.* charts");
+ do_svctm = 0;
+ error("DISABLED: disk_svctm.* charts");
+ error("DISABLED: kern.devstat module");
+ return 1;
+ }
+ } else {
+ error("DISABLED: kern.devstat module");
+ return 1;
+ }
+
+ disks_cleanup();
+
+ return 0;
+}
diff --git a/src/freebsd_getifaddrs.c b/src/freebsd_getifaddrs.c
new file mode 100644
index 00000000..7355fac9
--- /dev/null
+++ b/src/freebsd_getifaddrs.c
@@ -0,0 +1,494 @@
+#include "common.h"
+
+#include <ifaddrs.h>
+
+struct network_interface {
+ char *name;
+ uint32_t hash;
+ size_t len;
+
+ // flags
+ int configured;
+ int enabled;
+ int updated;
+
+ int do_bandwidth;
+ int do_packets;
+ int do_errors;
+ int do_drops;
+ int do_events;
+
+ // charts and dimensions
+
+ RRDSET *st_bandwidth;
+ RRDDIM *rd_bandwidth_in;
+ RRDDIM *rd_bandwidth_out;
+
+ RRDSET *st_packets;
+ RRDDIM *rd_packets_in;
+ RRDDIM *rd_packets_out;
+ RRDDIM *rd_packets_m_in;
+ RRDDIM *rd_packets_m_out;
+
+ RRDSET *st_errors;
+ RRDDIM *rd_errors_in;
+ RRDDIM *rd_errors_out;
+
+ RRDSET *st_drops;
+ RRDDIM *rd_drops_in;
+ RRDDIM *rd_drops_out;
+
+ RRDSET *st_events;
+ RRDDIM *rd_events_coll;
+
+ struct network_interface *next;
+};
+
+static struct network_interface *network_interfaces_root = NULL, *network_interfaces_last_used = NULL;
+
+static size_t network_interfaces_added = 0, network_interfaces_found = 0;
+
+static void network_interface_free(struct network_interface *ifm) {
+ if (likely(ifm->st_bandwidth))
+ rrdset_is_obsolete(ifm->st_bandwidth);
+ if (likely(ifm->st_packets))
+ rrdset_is_obsolete(ifm->st_packets);
+ if (likely(ifm->st_errors))
+ rrdset_is_obsolete(ifm->st_errors);
+ if (likely(ifm->st_drops))
+ rrdset_is_obsolete(ifm->st_drops);
+ if (likely(ifm->st_events))
+ rrdset_is_obsolete(ifm->st_events);
+
+ network_interfaces_added--;
+ freez(ifm->name);
+ freez(ifm);
+}
+
+static void network_interfaces_cleanup() {
+ if (likely(network_interfaces_found == network_interfaces_added)) return;
+
+ struct network_interface *ifm = network_interfaces_root, *last = NULL;
+ while(ifm) {
+ if (unlikely(!ifm->updated)) {
+ // info("Removing network interface '%s', linked after '%s'", ifm->name, last?last->name:"ROOT");
+
+ if (network_interfaces_last_used == ifm)
+ network_interfaces_last_used = last;
+
+ struct network_interface *t = ifm;
+
+ if (ifm == network_interfaces_root || !last)
+ network_interfaces_root = ifm = ifm->next;
+
+ else
+ last->next = ifm = ifm->next;
+
+ t->next = NULL;
+ network_interface_free(t);
+ }
+ else {
+ last = ifm;
+ ifm->updated = 0;
+ ifm = ifm->next;
+ }
+ }
+}
+
+static struct network_interface *get_network_interface(const char *name) {
+ struct network_interface *ifm;
+
+ uint32_t hash = simple_hash(name);
+
+ // search it, from the last position to the end
+ for(ifm = network_interfaces_last_used ; ifm ; ifm = ifm->next) {
+ if (unlikely(hash == ifm->hash && !strcmp(name, ifm->name))) {
+ network_interfaces_last_used = ifm->next;
+ return ifm;
+ }
+ }
+
+ // search it from the beginning to the last position we used
+ for(ifm = network_interfaces_root ; ifm != network_interfaces_last_used ; ifm = ifm->next) {
+ if (unlikely(hash == ifm->hash && !strcmp(name, ifm->name))) {
+ network_interfaces_last_used = ifm->next;
+ return ifm;
+ }
+ }
+
+ // create a new one
+ ifm = callocz(1, sizeof(struct network_interface));
+ ifm->name = strdupz(name);
+ ifm->hash = simple_hash(ifm->name);
+ ifm->len = strlen(ifm->name);
+ network_interfaces_added++;
+
+ // link it to the end
+ if (network_interfaces_root) {
+ struct network_interface *e;
+ for(e = network_interfaces_root; e->next ; e = e->next) ;
+ e->next = ifm;
+ }
+ else
+ network_interfaces_root = ifm;
+
+ return ifm;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// getifaddrs
+
+int do_getifaddrs(int update_every, usec_t dt) {
+ (void)dt;
+
+#define DELAULT_EXLUDED_INTERFACES "lo*"
+#define CONFIG_SECTION_GETIFADDRS "plugin:freebsd:getifaddrs"
+
+ static int enable_new_interfaces = -1;
+ static int do_bandwidth_ipv4 = -1, do_bandwidth_ipv6 = -1, do_bandwidth = -1, do_packets = -1,
+ do_errors = -1, do_drops = -1, do_events = -1;
+ static SIMPLE_PATTERN *excluded_interfaces = NULL;
+
+ if (unlikely(enable_new_interfaces == -1)) {
+ enable_new_interfaces = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS,
+ "enable new interfaces detected at runtime",
+ CONFIG_BOOLEAN_AUTO);
+
+ do_bandwidth_ipv4 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv4 interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_bandwidth_ipv6 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv6 interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "bandwidth for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_packets = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "packets for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_errors = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "errors for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_drops = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "drops for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+ do_events = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "collisions for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+
+ excluded_interfaces = simple_pattern_create(
+ config_get(CONFIG_SECTION_GETIFADDRS, "disable by default interfaces matching",
+ DELAULT_EXLUDED_INTERFACES)
+ , SIMPLE_PATTERN_EXACT
+ );
+ }
+
+ if (likely(do_bandwidth_ipv4 || do_bandwidth_ipv6 || do_bandwidth || do_packets || do_errors ||
+ do_drops || do_events)) {
+ struct ifaddrs *ifap;
+
+ if (unlikely(getifaddrs(&ifap))) {
+ error("FREEBSD: getifaddrs() failed");
+ do_bandwidth_ipv4 = 0;
+ error("DISABLED: system.ipv4 chart");
+ do_bandwidth_ipv6 = 0;
+ error("DISABLED: system.ipv6 chart");
+ do_bandwidth = 0;
+ error("DISABLED: net.* charts");
+ do_packets = 0;
+ error("DISABLED: net_packets.* charts");
+ do_errors = 0;
+ error("DISABLED: net_errors.* charts");
+ do_drops = 0;
+ error("DISABLED: net_drops.* charts");
+ do_events = 0;
+ error("DISABLED: net_events.* charts");
+ error("DISABLED: getifaddrs module");
+ return 1;
+ } else {
+#define IFA_DATA(s) (((struct if_data *)ifa->ifa_data)->ifi_ ## s)
+ struct ifaddrs *ifa;
+ struct iftot {
+ u_long ift_ibytes;
+ u_long ift_obytes;
+ } iftot = {0, 0};
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_bandwidth_ipv4)) {
+ iftot.ift_ibytes = iftot.ift_obytes = 0;
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_INET)
+ continue;
+ iftot.ift_ibytes += IFA_DATA(ibytes);
+ iftot.ift_obytes += IFA_DATA(obytes);
+ }
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("system",
+ "ipv4",
+ NULL,
+ "network",
+ NULL,
+ "IPv4 Bandwidth",
+ "kilobits/s",
+ 500,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st, "InOctets", "received", 8, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "OutOctets", "sent", -8, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
+ rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_bandwidth_ipv6)) {
+ iftot.ift_ibytes = iftot.ift_obytes = 0;
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ iftot.ift_ibytes += IFA_DATA(ibytes);
+ iftot.ift_obytes += IFA_DATA(obytes);
+ }
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_in = NULL, *rd_out = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost("system",
+ "ipv6",
+ NULL,
+ "network",
+ NULL,
+ "IPv6 Bandwidth",
+ "kilobits/s",
+ 500,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ rd_in = rrddim_add(st, "received", NULL, 8, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
+ rd_out = rrddim_add(st, "sent", NULL, -8, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(st);
+
+ rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
+ rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
+ rrdset_done(st);
+ }
+
+ // --------------------------------------------------------------------
+
+ network_interfaces_found = 0;
+
+ for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr->sa_family != AF_LINK)
+ continue;
+
+ struct network_interface *ifm = get_network_interface(ifa->ifa_name);
+ ifm->updated = 1;
+ network_interfaces_found++;
+
+ if (unlikely(!ifm->configured)) {
+ char var_name[4096 + 1];
+
+ // this is the first time we see this network interface
+
+ // remember we configured it
+ ifm->configured = 1;
+
+ ifm->enabled = enable_new_interfaces;
+
+ if (likely(ifm->enabled))
+ ifm->enabled = !simple_pattern_matches(excluded_interfaces, ifa->ifa_name);
+
+ snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETIFADDRS, ifa->ifa_name);
+ ifm->enabled = config_get_boolean_ondemand(var_name, "enabled", ifm->enabled);
+
+ if (unlikely(ifm->enabled == CONFIG_BOOLEAN_NO))
+ continue;
+
+ ifm->do_bandwidth = config_get_boolean_ondemand(var_name, "bandwidth", do_bandwidth);
+ ifm->do_packets = config_get_boolean_ondemand(var_name, "packets", do_packets);
+ ifm->do_errors = config_get_boolean_ondemand(var_name, "errors", do_errors);
+ ifm->do_drops = config_get_boolean_ondemand(var_name, "drops", do_drops);
+ ifm->do_events = config_get_boolean_ondemand(var_name, "events", do_events);
+ }
+
+ if (unlikely(!ifm->enabled))
+ continue;
+
+ // --------------------------------------------------------------------
+
+ if (ifm->do_bandwidth == CONFIG_BOOLEAN_YES || (ifm->do_bandwidth == CONFIG_BOOLEAN_AUTO &&
+ (IFA_DATA(ibytes) || IFA_DATA(obytes)))) {
+ if (unlikely(!ifm->st_bandwidth)) {
+ ifm->st_bandwidth = rrdset_create_localhost("net",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.net",
+ "Bandwidth",
+ "kilobits/s",
+ 7000,
+ update_every,
+ RRDSET_TYPE_AREA
+ );
+
+ ifm->rd_bandwidth_in = rrddim_add(ifm->st_bandwidth, "received", NULL, 8, KILO_FACTOR,
+ RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_bandwidth_out = rrddim_add(ifm->st_bandwidth, "sent", NULL, -8, KILO_FACTOR,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(ifm->st_bandwidth);
+
+ rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_in, IFA_DATA(ibytes));
+ rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_out, IFA_DATA(obytes));
+ rrdset_done(ifm->st_bandwidth);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (ifm->do_packets == CONFIG_BOOLEAN_YES || (ifm->do_packets == CONFIG_BOOLEAN_AUTO &&
+ (IFA_DATA(ipackets) || IFA_DATA(opackets) || IFA_DATA(imcasts) || IFA_DATA(omcasts)))) {
+ if (unlikely(!ifm->st_packets)) {
+ ifm->st_packets = rrdset_create_localhost("net_packets",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.packets",
+ "Packets",
+ "packets/s",
+ 7001,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(ifm->st_packets, RRDSET_FLAG_DETAIL);
+
+ ifm->rd_packets_in = rrddim_add(ifm->st_packets, "received", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_packets_out = rrddim_add(ifm->st_packets, "sent", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_packets_m_in = rrddim_add(ifm->st_packets, "multicast_received", NULL, 1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_packets_m_out = rrddim_add(ifm->st_packets, "multicast_sent", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(ifm->st_packets);
+
+ rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_in, IFA_DATA(ipackets));
+ rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_out, IFA_DATA(opackets));
+ rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_in, IFA_DATA(imcasts));
+ rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_out, IFA_DATA(omcasts));
+ rrdset_done(ifm->st_packets);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (ifm->do_errors == CONFIG_BOOLEAN_YES || (ifm->do_errors == CONFIG_BOOLEAN_AUTO &&
+ (IFA_DATA(ierrors) || IFA_DATA(oerrors)))) {
+ if (unlikely(!ifm->st_errors)) {
+ ifm->st_errors = rrdset_create_localhost("net_errors",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.errors",
+ "Interface Errors",
+ "errors/s",
+ 7002,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(ifm->st_errors, RRDSET_FLAG_DETAIL);
+
+ ifm->rd_errors_in = rrddim_add(ifm->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ ifm->rd_errors_out = rrddim_add(ifm->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(ifm->st_errors);
+
+ rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_in, IFA_DATA(ierrors));
+ rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_out, IFA_DATA(oerrors));
+ rrdset_done(ifm->st_errors);
+ }
+ // --------------------------------------------------------------------
+
+ if (ifm->do_drops == CONFIG_BOOLEAN_YES || (ifm->do_drops == CONFIG_BOOLEAN_AUTO &&
+ (IFA_DATA(iqdrops)
+ #if __FreeBSD__ >= 11
+ || IFA_DATA(oqdrops)
+#endif
+ ))) {
+ if (unlikely(!ifm->st_drops)) {
+ ifm->st_drops = rrdset_create_localhost("net_drops",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.drops",
+ "Interface Drops",
+ "drops/s",
+ 7003,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(ifm->st_drops, RRDSET_FLAG_DETAIL);
+
+ ifm->rd_drops_in = rrddim_add(ifm->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#if __FreeBSD__ >= 11
+ ifm->rd_drops_out = rrddim_add(ifm->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+#endif
+ } else
+ rrdset_next(ifm->st_drops);
+
+ rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_in, IFA_DATA(iqdrops));
+#if __FreeBSD__ >= 11
+ rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_out, IFA_DATA(oqdrops));
+#endif
+ rrdset_done(ifm->st_drops);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (ifm->do_events == CONFIG_BOOLEAN_YES || (ifm->do_events == CONFIG_BOOLEAN_AUTO &&
+ IFA_DATA(collisions))) {
+ if (unlikely(!ifm->st_events)) {
+ ifm->st_events = rrdset_create_localhost("net_events",
+ ifa->ifa_name,
+ NULL,
+ ifa->ifa_name,
+ "net.events",
+ "Network Interface Events",
+ "events/s",
+ 7006,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ rrdset_flag_set(ifm->st_events, RRDSET_FLAG_DETAIL);
+
+ ifm->rd_events_coll = rrddim_add(ifm->st_events, "collisions", NULL, -1, 1,
+ RRD_ALGORITHM_INCREMENTAL);
+ } else
+ rrdset_next(ifm->st_events);
+
+ rrddim_set_by_pointer(ifm->st_events, ifm->rd_events_coll, IFA_DATA(collisions));
+ rrdset_done(ifm->st_events);
+ }
+ }
+
+ freeifaddrs(ifap);
+ }
+ } else {
+ error("DISABLED: getifaddrs module");
+ return 1;
+ }
+
+ network_interfaces_cleanup();
+
+ return 0;
+}
diff --git a/src/freebsd_getmntinfo.c b/src/freebsd_getmntinfo.c
new file mode 100644
index 00000000..e7ca56b5
--- /dev/null
+++ b/src/freebsd_getmntinfo.c
@@ -0,0 +1,293 @@
+#include "common.h"
+
+#include <sys/mount.h>
+
+struct mount_point {
+ char *name;
+ uint32_t hash;
+ size_t len;
+
+ // flags
+ int configured;
+ int enabled;
+ int updated;
+
+ int do_space;
+ int do_inodes;
+
+ size_t collected; // the number of times this has been collected
+
+ // charts and dimensions
+
+ RRDSET *st_space;
+ RRDDIM *rd_space_used;
+ RRDDIM *rd_space_avail;
+ RRDDIM *rd_space_reserved;
+
+ RRDSET *st_inodes;
+ RRDDIM *rd_inodes_used;
+ RRDDIM *rd_inodes_avail;
+
+ struct mount_point *next;
+};
+
+static struct mount_point *mount_points_root = NULL, *mount_points_last_used = NULL;
+
+static size_t mount_points_added = 0, mount_points_found = 0;
+
+static void mount_point_free(struct mount_point *m) {
+ if (likely(m->st_space))
+ rrdset_is_obsolete(m->st_space);
+ if (likely(m->st_inodes))
+ rrdset_is_obsolete(m->st_inodes);
+
+ mount_points_added--;
+ freez(m->name);
+ freez(m);
+}
+
+static void mount_points_cleanup() {
+ if (likely(mount_points_found == mount_points_added)) return;
+
+ struct mount_point *m = mount_points_root, *last = NULL;
+ while(m) {
+ if (unlikely(!m->updated)) {
+ // info("Removing mount point '%s', linked after '%s'", m->name, last?last->name:"ROOT");
+
+ if (mount_points_last_used == m)
+ mount_points_last_used = last;
+
+ struct mount_point *t = m;
+
+ if (m == mount_points_root || !last)
+ mount_points_root = m = m->next;
+
+ else
+ last->next = m = m->next;
+
+ t->next = NULL;
+ mount_point_free(t);
+ }
+ else {
+ last = m;
+ m->updated = 0;
+ m = m->next;
+ }
+ }
+}
+
+static struct mount_point *get_mount_point(const char *name) {
+ struct mount_point *m;
+
+ uint32_t hash = simple_hash(name);
+
+ // search it, from the last position to the end
+ for(m = mount_points_last_used ; m ; m = m->next) {
+ if (unlikely(hash == m->hash && !strcmp(name, m->name))) {
+ mount_points_last_used = m->next;
+ return m;
+ }
+ }
+
+ // search it from the beginning to the last position we used
+ for(m = mount_points_root ; m != mount_points_last_used ; m = m->next) {
+ if (unlikely(hash == m->hash && !strcmp(name, m->name))) {
+ mount_points_last_used = m->next;
+ return m;
+ }
+ }
+
+ // create a new one
+ m = callocz(1, sizeof(struct mount_point));
+ m->name = strdupz(name);
+ m->hash = simple_hash(m->name);
+ m->len = strlen(m->name);
+ mount_points_added++;
+
+ // link it to the end
+ if (mount_points_root) {
+ struct mount_point *e;
+ for(e = mount_points_root; e->next ; e = e->next) ;
+ e->next = m;
+ }
+ else
+ mount_points_root = m;
+
+ return m;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// getmntinfo
+
+int do_getmntinfo(int update_every, usec_t dt) {
+ (void)dt;
+
+#define DELAULT_EXLUDED_PATHS "/proc/*"
+// taken from gnulib/mountlist.c and shortened to FreeBSD related fstypes
+#define DEFAULT_EXCLUDED_FILESYSTEMS "autofs procfs subfs devfs none"
+#define CONFIG_SECTION_GETMNTINFO "plugin:freebsd:getmntinfo"
+
+ static int enable_new_mount_points = -1;
+ static int do_space = -1, do_inodes = -1;
+ static SIMPLE_PATTERN *excluded_mountpoints = NULL;
+ static SIMPLE_PATTERN *excluded_filesystems = NULL;
+
+ if (unlikely(enable_new_mount_points == -1)) {
+ enable_new_mount_points = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO,
+ "enable new mount points detected at runtime",
+ CONFIG_BOOLEAN_AUTO);
+
+ do_space = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "space usage for all disks", CONFIG_BOOLEAN_AUTO);
+ do_inodes = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "inodes usage for all disks", CONFIG_BOOLEAN_AUTO);
+
+ excluded_mountpoints = simple_pattern_create(
+ config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on paths",
+ DELAULT_EXLUDED_PATHS),
+ SIMPLE_PATTERN_EXACT
+ );
+
+ excluded_filesystems = simple_pattern_create(
+ config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on filesystems",
+ DEFAULT_EXCLUDED_FILESYSTEMS),
+ SIMPLE_PATTERN_EXACT
+ );
+ }
+
+ if (likely(do_space || do_inodes)) {
+ struct statfs *mntbuf;
+ int mntsize;
+
+ // there is no mount info in sysctl MIBs
+ if (unlikely(!(mntsize = getmntinfo(&mntbuf, MNT_NOWAIT)))) {
+ error("FREEBSD: getmntinfo() failed");
+ do_space = 0;
+ error("DISABLED: disk_space.* charts");
+ do_inodes = 0;
+ error("DISABLED: disk_inodes.* charts");
+ error("DISABLED: getmntinfo module");
+ return 1;
+ } else {
+ int i;
+
+ mount_points_found = 0;
+
+ for (i = 0; i < mntsize; i++) {
+ char title[4096 + 1];
+
+ struct mount_point *m = get_mount_point(mntbuf[i].f_mntonname);
+ m->updated = 1;
+ mount_points_found++;
+
+ if (unlikely(!m->configured)) {
+ char var_name[4096 + 1];
+
+ // this is the first time we see this filesystem
+
+ // remember we configured it
+ m->configured = 1;
+
+ m->enabled = enable_new_mount_points;
+
+ if (likely(m->enabled))
+ m->enabled = !(simple_pattern_matches(excluded_mountpoints, mntbuf[i].f_mntonname)
+ || simple_pattern_matches(excluded_filesystems, mntbuf[i].f_fstypename));
+
+ snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETMNTINFO, mntbuf[i].f_mntonname);
+ m->enabled = config_get_boolean_ondemand(var_name, "enabled", m->enabled);
+
+ if (unlikely(m->enabled == CONFIG_BOOLEAN_NO))
+ continue;
+
+ m->do_space = config_get_boolean_ondemand(var_name, "space usage", do_space);
+ m->do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", do_inodes);
+ }
+
+ if (unlikely(!m->enabled))
+ continue;
+
+ if (unlikely(mntbuf[i].f_flags & MNT_RDONLY && !m->collected))
+ continue;
+
+ // --------------------------------------------------------------------------
+
+ int rendered = 0;
+
+ if (m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_blocks > 2))) {
+ if (unlikely(!m->st_space)) {
+ snprintfz(title, 4096, "Disk Space Usage for %s [%s]",
+ mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
+ m->st_space = rrdset_create_localhost("disk_space",
+ mntbuf[i].f_mntonname,
+ NULL,
+ mntbuf[i].f_mntonname,
+ "disk.space",
+ title,
+ "GB",
+ 2023,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ m->rd_space_avail = rrddim_add(m->st_space, "avail", NULL,
+ mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_space_used = rrddim_add(m->st_space, "used", NULL,
+ mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_space_reserved = rrddim_add(m->st_space, "reserved_for_root", "reserved for root",
+ mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(m->st_space);
+
+ rrddim_set_by_pointer(m->st_space, m->rd_space_avail, (collected_number) mntbuf[i].f_bavail);
+ rrddim_set_by_pointer(m->st_space, m->rd_space_used, (collected_number) (mntbuf[i].f_blocks -
+ mntbuf[i].f_bfree));
+ rrddim_set_by_pointer(m->st_space, m->rd_space_reserved, (collected_number) (mntbuf[i].f_bfree -
+ mntbuf[i].f_bavail));
+ rrdset_done(m->st_space);
+
+ rendered++;
+ }
+
+ // --------------------------------------------------------------------------
+
+ if (m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_files > 1))) {
+ if (unlikely(!m->st_inodes)) {
+ snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]",
+ mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
+ m->st_inodes = rrdset_create_localhost("disk_inodes",
+ mntbuf[i].f_mntonname,
+ NULL,
+ mntbuf[i].f_mntonname,
+ "disk.inodes",
+ title,
+ "Inodes",
+ 2024,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+
+ m->rd_inodes_avail = rrddim_add(m->st_inodes, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_inodes_used = rrddim_add(m->st_inodes, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(m->st_inodes);
+
+ rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_avail, (collected_number) mntbuf[i].f_ffree);
+ rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_used, (collected_number) (mntbuf[i].f_files -
+ mntbuf[i].f_ffree));
+ rrdset_done(m->st_inodes);
+
+ rendered++;
+ }
+
+ if (likely(rendered))
+ m->collected++;
+ }
+ }
+ } else {
+ error("DISABLED: getmntinfo module");
+ return 1;
+ }
+
+ mount_points_cleanup();
+
+ return 0;
+}
diff --git a/src/freebsd_ipfw.c b/src/freebsd_ipfw.c
new file mode 100644
index 00000000..b89650a0
--- /dev/null
+++ b/src/freebsd_ipfw.c
@@ -0,0 +1,360 @@
+#include "common.h"
+
+#include <netinet/ip_fw.h>
+
+#define FREE_MEM_THRESHOLD 10000 // number of unused chunks that trigger memory freeing
+
+#define COMMON_IPFW_ERROR() error("DISABLED: ipfw.packets chart"); \
+ error("DISABLED: ipfw.bytes chart"); \
+ error("DISABLED: ipfw.dyn_active chart"); \
+ error("DISABLED: ipfw.dyn_expired chart"); \
+ error("DISABLED: ipfw.mem chart");
+
+// --------------------------------------------------------------------------------------------------------------------
+// ipfw
+
+int do_ipfw(int update_every, usec_t dt) {
+ (void)dt;
+#if __FreeBSD__ >= 11
+
+ static int do_static = -1, do_dynamic = -1, do_mem = -1;
+
+ if (unlikely(do_static == -1)) {
+ do_static = config_get_boolean("plugin:freebsd:ipfw", "counters for static rules", 1);
+ do_dynamic = config_get_boolean("plugin:freebsd:ipfw", "number of dynamic rules", 1);
+ do_mem = config_get_boolean("plugin:freebsd:ipfw", "allocated memory", 1);
+ }
+
+ // variables for getting ipfw configuration
+
+ int error;
+ static int ipfw_socket = -1;
+ static ipfw_cfg_lheader *cfg = NULL;
+ ip_fw3_opheader *op3 = NULL;
+ static socklen_t *optlen = NULL, cfg_size = 0;
+
+ // variables for static rules handling
+
+ ipfw_obj_ctlv *ctlv = NULL;
+ ipfw_obj_tlv *rbase = NULL;
+ int rcnt = 0;
+
+ int n, seen;
+ struct ip_fw_rule *rule;
+ struct ip_fw_bcounter *cntr;
+ int c = 0;
+
+ char rule_num_str[12];
+
+ // variables for dynamic rules handling
+
+ caddr_t dynbase = NULL;
+ size_t dynsz = 0;
+ size_t readsz = sizeof(*cfg);;
+ int ttype = 0;
+ ipfw_obj_tlv *tlv;
+ ipfw_dyn_rule *dyn_rule;
+ uint16_t rulenum, prev_rulenum = IPFW_DEFAULT_RULE;
+ unsigned srn, static_rules_num = 0;
+ static size_t dyn_rules_num_size = 0;
+
+ static struct dyn_rule_num {
+ uint16_t rule_num;
+ uint32_t active_rules;
+ uint32_t expired_rules;
+ } *dyn_rules_num = NULL;
+
+ uint32_t *dyn_rules_counter;
+
+ if (likely(do_static | do_dynamic | do_mem)) {
+
+ // initialize the smallest ipfw_cfg_lheader possible
+
+ if (unlikely((optlen == NULL) || (cfg == NULL))) {
+ optlen = reallocz(optlen, sizeof(socklen_t));
+ *optlen = cfg_size = 32;
+ cfg = reallocz(cfg, *optlen);
+ }
+
+ // get socket descriptor and initialize ipfw_cfg_lheader structure
+
+ if (unlikely(ipfw_socket == -1))
+ ipfw_socket = socket(AF_INET, SOCK_RAW, IPPROTO_RAW);
+ if (unlikely(ipfw_socket == -1)) {
+ error("FREEBSD: can't get socket for ipfw configuration");
+ error("FREEBSD: run netdata as root to get access to ipfw data");
+ COMMON_IPFW_ERROR();
+ return 1;
+ }
+
+ bzero(cfg, 32);
+ cfg->flags = IPFW_CFG_GET_STATIC | IPFW_CFG_GET_COUNTERS | IPFW_CFG_GET_STATES;
+ op3 = &cfg->opheader;
+ op3->opcode = IP_FW_XGET;
+
+ // get ifpw configuration size than get configuration
+
+ *optlen = cfg_size;
+ error = getsockopt(ipfw_socket, IPPROTO_IP, IP_FW3, op3, optlen);
+ if (error)
+ if (errno != ENOMEM) {
+ error("FREEBSD: ipfw socket reading error");
+ COMMON_IPFW_ERROR();
+ return 1;
+ }
+ if ((cfg->size > cfg_size) || ((cfg_size - cfg->size) > sizeof(struct dyn_rule_num) * FREE_MEM_THRESHOLD)) {
+ *optlen = cfg_size = cfg->size;
+ cfg = reallocz(cfg, *optlen);
+ bzero(cfg, 32);
+ cfg->flags = IPFW_CFG_GET_STATIC | IPFW_CFG_GET_COUNTERS | IPFW_CFG_GET_STATES;
+ op3 = &cfg->opheader;
+ op3->opcode = IP_FW_XGET;
+ error = getsockopt(ipfw_socket, IPPROTO_IP, IP_FW3, op3, optlen);
+ if (error) {
+ error("FREEBSD: ipfw socket reading error");
+ COMMON_IPFW_ERROR();
+ return 1;
+ }
+ }
+
+ // go through static rules configuration structures
+
+ ctlv = (ipfw_obj_ctlv *) (cfg + 1);
+
+ if (cfg->flags & IPFW_CFG_GET_STATIC) {
+ /* We've requested static rules */
+ if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) {
+ readsz += ctlv->head.length;
+ ctlv = (ipfw_obj_ctlv *) ((caddr_t) ctlv +
+ ctlv->head.length);
+ }
+
+ if (ctlv->head.type == IPFW_TLV_RULE_LIST) {
+ rbase = (ipfw_obj_tlv *) (ctlv + 1);
+ rcnt = ctlv->count;
+ readsz += ctlv->head.length;
+ ctlv = (ipfw_obj_ctlv *) ((caddr_t) ctlv + ctlv->head.length);
+ }
+ }
+
+ if ((cfg->flags & IPFW_CFG_GET_STATES) && (readsz != *optlen)) {
+ /* We may have some dynamic states */
+ dynsz = *optlen - readsz;
+ /* Skip empty header */
+ if (dynsz != sizeof(ipfw_obj_ctlv))
+ dynbase = (caddr_t) ctlv;
+ else
+ dynsz = 0;
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_mem)) {
+ static RRDSET *st_mem = NULL;
+ static RRDDIM *rd_dyn_mem = NULL;
+ static RRDDIM *rd_stat_mem = NULL;
+
+ if (unlikely(!st_mem)) {
+ st_mem = rrdset_create_localhost("ipfw",
+ "mem",
+ NULL,
+ "memory allocated",
+ NULL,
+ "Memory allocated by rules",
+ "bytes",
+ 3005,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ rrdset_flag_set(st_mem, RRDSET_FLAG_DETAIL);
+
+ rd_dyn_mem = rrddim_add(st_mem, "dynamic", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_stat_mem = rrddim_add(st_mem, "static", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ } else
+ rrdset_next(st_mem);
+
+ rrddim_set_by_pointer(st_mem, rd_dyn_mem, dynsz);
+ rrddim_set_by_pointer(st_mem, rd_stat_mem, *optlen - dynsz);
+ rrdset_done(st_mem);
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_packets = NULL, *st_bytes = NULL;
+ RRDDIM *rd_packets = NULL, *rd_bytes = NULL;
+
+ if (likely(do_static || do_dynamic)) {
+ if (likely(do_static)) {
+ if (unlikely(!st_packets))
+ st_packets = rrdset_create_localhost("ipfw",
+ "packets",
+ NULL,
+ "static rules",
+ NULL,
+ "Packets",
+ "packets/s",
+ 3001,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_packets);
+
+ if (unlikely(!st_bytes))
+ st_bytes = rrdset_create_localhost("ipfw",
+ "bytes",
+ NULL,
+ "static rules",
+ NULL,
+ "Bytes",
+ "bytes/s",
+ 3002,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_bytes);
+ }
+
+ for (n = seen = 0; n < rcnt; n++, rbase = (ipfw_obj_tlv *) ((caddr_t) rbase + rbase->length)) {
+ cntr = (struct ip_fw_bcounter *) (rbase + 1);
+ rule = (struct ip_fw_rule *) ((caddr_t) cntr + cntr->size);
+ if (rule->rulenum != prev_rulenum)
+ static_rules_num++;
+ if (rule->rulenum > IPFW_DEFAULT_RULE)
+ break;
+
+ if (likely(do_static)) {
+ sprintf(rule_num_str, "%d_%d", rule->rulenum, rule->id);
+
+ rd_packets = rrddim_find(st_packets, rule_num_str);
+ if (unlikely(!rd_packets))
+ rd_packets = rrddim_add(st_packets, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_by_pointer(st_packets, rd_packets, cntr->pcnt);
+
+ rd_bytes = rrddim_find(st_bytes, rule_num_str);
+ if (unlikely(!rd_bytes))
+ rd_bytes = rrddim_add(st_bytes, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set_by_pointer(st_bytes, rd_bytes, cntr->bcnt);
+ }
+
+ c += rbase->length;
+ seen++;
+ }
+
+ if (likely(do_static)) {
+ rrdset_done(st_packets);
+ rrdset_done(st_bytes);
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ // go through dynamic rules configuration structures
+
+ if (likely(do_dynamic && (dynsz > 0))) {
+ if ((dyn_rules_num_size < sizeof(struct dyn_rule_num) * static_rules_num) ||
+ ((dyn_rules_num_size - sizeof(struct dyn_rule_num) * static_rules_num) >
+ sizeof(struct dyn_rule_num) * FREE_MEM_THRESHOLD)) {
+ dyn_rules_num_size = sizeof(struct dyn_rule_num) * static_rules_num;
+ dyn_rules_num = reallocz(dyn_rules_num, dyn_rules_num_size);
+ }
+ bzero(dyn_rules_num, sizeof(struct dyn_rule_num) * static_rules_num);
+ dyn_rules_num->rule_num = IPFW_DEFAULT_RULE;
+
+ if (dynsz > 0 && ctlv->head.type == IPFW_TLV_DYNSTATE_LIST) {
+ dynbase += sizeof(*ctlv);
+ dynsz -= sizeof(*ctlv);
+ ttype = IPFW_TLV_DYN_ENT;
+ }
+
+ while (dynsz > 0) {
+ tlv = (ipfw_obj_tlv *) dynbase;
+ if (tlv->type != ttype)
+ break;
+
+ dyn_rule = (ipfw_dyn_rule *) (tlv + 1);
+ bcopy(&dyn_rule->rule, &rulenum, sizeof(rulenum));
+
+ for (srn = 0; srn < (static_rules_num - 1); srn++) {
+ if (dyn_rule->expire > 0)
+ dyn_rules_counter = &dyn_rules_num[srn].active_rules;
+ else
+ dyn_rules_counter = &dyn_rules_num[srn].expired_rules;
+ if (dyn_rules_num[srn].rule_num == rulenum) {
+ (*dyn_rules_counter)++;
+ break;
+ }
+ if (dyn_rules_num[srn].rule_num == IPFW_DEFAULT_RULE) {
+ dyn_rules_num[srn].rule_num = rulenum;
+ dyn_rules_num[srn + 1].rule_num = IPFW_DEFAULT_RULE;
+ (*dyn_rules_counter)++;
+ break;
+ }
+ }
+
+ dynsz -= tlv->length;
+ dynbase += tlv->length;
+ }
+
+ // --------------------------------------------------------------------
+
+ static RRDSET *st_active = NULL, *st_expired = NULL;
+ RRDDIM *rd_active = NULL, *rd_expired = NULL;
+
+ if (unlikely(!st_active))
+ st_active = rrdset_create_localhost("ipfw",
+ "active",
+ NULL,
+ "dynamic_rules",
+ NULL,
+ "Active rules",
+ "rules",
+ 3003,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_active);
+
+ if (unlikely(!st_expired))
+ st_expired = rrdset_create_localhost("ipfw",
+ "expired",
+ NULL,
+ "dynamic_rules",
+ NULL,
+ "Expired rules",
+ "rules",
+ 3004,
+ update_every,
+ RRDSET_TYPE_STACKED
+ );
+ else
+ rrdset_next(st_expired);
+
+ for (srn = 0; (srn < (static_rules_num - 1)) && (dyn_rules_num[srn].rule_num != IPFW_DEFAULT_RULE); srn++) {
+ sprintf(rule_num_str, "%d", dyn_rules_num[srn].rule_num);
+
+ rd_active = rrddim_find(st_active, rule_num_str);
+ if (unlikely(!rd_active))
+ rd_active = rrddim_add(st_active, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_set_by_pointer(st_active, rd_active, dyn_rules_num[srn].active_rules);
+
+ rd_expired = rrddim_find(st_expired, rule_num_str);
+ if (unlikely(!rd_expired))
+ rd_expired = rrddim_add(st_expired, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_set_by_pointer(st_expired, rd_expired, dyn_rules_num[srn].expired_rules);
+ }
+
+ rrdset_done(st_active);
+ rrdset_done(st_expired);
+ }
+ }
+
+ return 0;
+#else
+ error("FREEBSD: ipfw charts supported for FreeBSD 11.0 and newer releases only");
+ COMMON_IPFW_ERROR();
+ return 1;
+#endif
+}
diff --git a/src/freebsd_kstat_zfs.c b/src/freebsd_kstat_zfs.c
new file mode 100644
index 00000000..17642994
--- /dev/null
+++ b/src/freebsd_kstat_zfs.c
@@ -0,0 +1,212 @@
+#include "common.h"
+#include "zfs_common.h"
+
+struct arcstats arcstats = { 0 };
+
+// kstat.zfs.misc.arcstats
+int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) {
+ (void)dt;
+
+ unsigned long long l2_size;
+ size_t uint64_t_size = sizeof(uint64_t);
+ static struct mibs {
+ int hits[5];
+ int misses[5];
+ int demand_data_hits[5];
+ int demand_data_misses[5];
+ int demand_metadata_hits[5];
+ int demand_metadata_misses[5];
+ int prefetch_data_hits[5];
+ int prefetch_data_misses[5];
+ int prefetch_metadata_hits[5];
+ int prefetch_metadata_misses[5];
+ int mru_hits[5];
+ int mru_ghost_hits[5];
+ int mfu_hits[5];
+ int mfu_ghost_hits[5];
+ int deleted[5];
+ int mutex_miss[5];
+ int evict_skip[5];
+ int evict_not_enough[5];
+ int evict_l2_cached[5];
+ int evict_l2_eligible[5];
+ int evict_l2_ineligible[5];
+ int evict_l2_skip[5];
+ int hash_elements[5];
+ int hash_elements_max[5];
+ int hash_collisions[5];
+ int hash_chains[5];
+ int hash_chain_max[5];
+ int p[5];
+ int c[5];
+ int c_min[5];
+ int c_max[5];
+ int size[5];
+ int hdr_size[5];
+ int data_size[5];
+ int metadata_size[5];
+ int other_size[5];
+ int anon_size[5];
+ int anon_evictable_data[5];
+ int anon_evictable_metadata[5];
+ int mru_size[5];
+ int mru_evictable_data[5];
+ int mru_evictable_metadata[5];
+ int mru_ghost_size[5];
+ int mru_ghost_evictable_data[5];
+ int mru_ghost_evictable_metadata[5];
+ int mfu_size[5];
+ int mfu_evictable_data[5];
+ int mfu_evictable_metadata[5];
+ int mfu_ghost_size[5];
+ int mfu_ghost_evictable_data[5];
+ int mfu_ghost_evictable_metadata[5];
+ int l2_hits[5];
+ int l2_misses[5];
+ int l2_feeds[5];
+ int l2_rw_clash[5];
+ int l2_read_bytes[5];
+ int l2_write_bytes[5];
+ int l2_writes_sent[5];
+ int l2_writes_done[5];
+ int l2_writes_error[5];
+ int l2_writes_lock_retry[5];
+ int l2_evict_lock_retry[5];
+ int l2_evict_reading[5];
+ int l2_evict_l1cached[5];
+ int l2_free_on_write[5];
+ int l2_cdata_free_on_write[5];
+ int l2_abort_lowmem[5];
+ int l2_cksum_bad[5];
+ int l2_io_error[5];
+ int l2_size[5];
+ int l2_asize[5];
+ int l2_hdr_size[5];
+ int l2_compress_successes[5];
+ int l2_compress_zeros[5];
+ int l2_compress_failures[5];
+ int memory_throttle_count[5];
+ int duplicate_buffers[5];
+ int duplicate_buffers_size[5];
+ int duplicate_reads[5];
+ int memory_direct_count[5];
+ int memory_indirect_count[5];
+ int arc_no_grow[5];
+ int arc_tempreserve[5];
+ int arc_loaned_bytes[5];
+ int arc_prune[5];
+ int arc_meta_used[5];
+ int arc_meta_limit[5];
+ int arc_meta_max[5];
+ int arc_meta_min[5];
+ int arc_need_free[5];
+ int arc_sys_free[5];
+ } mibs;
+
+ l2exist = -1;
+
+ if(unlikely(sysctlbyname("kstat.zfs.misc.arcstats.l2_size", &l2_size, &uint64_t_size, NULL, 0)))
+ return 0;
+
+ if(likely(l2_size))
+ l2exist = 1;
+ else
+ l2exist = 0;
+
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hits", mibs.hits, arcstats.hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.misses", mibs.misses, arcstats.misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_data_hits", mibs.demand_data_hits, arcstats.demand_data_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_data_misses", mibs.demand_data_misses, arcstats.demand_data_misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_metadata_hits", mibs.demand_metadata_hits, arcstats.demand_metadata_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_metadata_misses", mibs.demand_metadata_misses, arcstats.demand_metadata_misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_data_hits", mibs.prefetch_data_hits, arcstats.prefetch_data_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_data_misses", mibs.prefetch_data_misses, arcstats.prefetch_data_misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_metadata_hits", mibs.prefetch_metadata_hits, arcstats.prefetch_metadata_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_metadata_misses", mibs.prefetch_metadata_misses, arcstats.prefetch_metadata_misses);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_hits", mibs.mru_hits, arcstats.mru_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_hits", mibs.mru_ghost_hits, arcstats.mru_ghost_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_hits", mibs.mfu_hits, arcstats.mfu_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_hits", mibs.mfu_ghost_hits, arcstats.mfu_ghost_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.deleted", mibs.deleted, arcstats.deleted);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mutex_miss", mibs.mutex_miss, arcstats.mutex_miss);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_skip", mibs.evict_skip, arcstats.evict_skip);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_not_enough", mibs.evict_not_enough, arcstats.evict_not_enough);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_cached", mibs.evict_l2_cached, arcstats.evict_l2_cached);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_eligible", mibs.evict_l2_eligible, arcstats.evict_l2_eligible);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_ineligible", mibs.evict_l2_ineligible, arcstats.evict_l2_ineligible);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_skip", mibs.evict_l2_skip, arcstats.evict_l2_skip);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_elements", mibs.hash_elements, arcstats.hash_elements);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_elements_max", mibs.hash_elements_max, arcstats.hash_elements_max);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_collisions", mibs.hash_collisions, arcstats.hash_collisions);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_chains", mibs.hash_chains, arcstats.hash_chains);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_chain_max", mibs.hash_chain_max, arcstats.hash_chain_max);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.p", mibs.p, arcstats.p);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c", mibs.c, arcstats.c);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c_min", mibs.c_min, arcstats.c_min);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c_max", mibs.c_max, arcstats.c_max);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.size", mibs.size, arcstats.size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hdr_size", mibs.hdr_size, arcstats.hdr_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.data_size", mibs.data_size, arcstats.data_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.metadata_size", mibs.metadata_size, arcstats.metadata_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.other_size", mibs.other_size, arcstats.other_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_size", mibs.anon_size, arcstats.anon_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_evictable_data", mibs.anon_evictable_data, arcstats.anon_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_evictable_metadata", mibs.anon_evictable_metadata, arcstats.anon_evictable_metadata);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_size", mibs.mru_size, arcstats.mru_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_evictable_data", mibs.mru_evictable_data, arcstats.mru_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_evictable_metadata", mibs.mru_evictable_metadata, arcstats.mru_evictable_metadata);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_size", mibs.mru_ghost_size, arcstats.mru_ghost_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_evictable_data", mibs.mru_ghost_evictable_data, arcstats.mru_ghost_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_evictable_metadata", mibs.mru_ghost_evictable_metadata, arcstats.mru_ghost_evictable_metadata);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_size", mibs.mfu_size, arcstats.mfu_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_evictable_data", mibs.mfu_evictable_data, arcstats.mfu_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_evictable_metadata", mibs.mfu_evictable_metadata, arcstats.mfu_evictable_metadata);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_size", mibs.mfu_ghost_size, arcstats.mfu_ghost_size);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_evictable_data", mibs.mfu_ghost_evictable_data, arcstats.mfu_ghost_evictable_data);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_evictable_metadata", mibs.mfu_ghost_evictable_metadata, arcstats.mfu_ghost_evictable_metadata);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_hits", mibs.l2_hits, arcstats.l2_hits);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_misses", mibs.l2_misses, arcstats.l2_misses);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_feeds", mibs.l2_feeds, arcstats.l2_feeds);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_rw_clash", mibs.l2_rw_clash, arcstats.l2_rw_clash);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_read_bytes", mibs.l2_read_bytes, arcstats.l2_read_bytes);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_write_bytes", mibs.l2_write_bytes, arcstats.l2_write_bytes);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_sent", mibs.l2_writes_sent, arcstats.l2_writes_sent);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_done", mibs.l2_writes_done, arcstats.l2_writes_done);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_error", mibs.l2_writes_error, arcstats.l2_writes_error);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_lock_retry", mibs.l2_writes_lock_retry, arcstats.l2_writes_lock_retry);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_lock_retry", mibs.l2_evict_lock_retry, arcstats.l2_evict_lock_retry);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_reading", mibs.l2_evict_reading, arcstats.l2_evict_reading);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_l1cached", mibs.l2_evict_l1cached, arcstats.l2_evict_l1cached);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_free_on_write", mibs.l2_free_on_write, arcstats.l2_free_on_write);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_cdata_free_on_write", mibs.l2_cdata_free_on_write, arcstats.l2_cdata_free_on_write);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_abort_lowmem", mibs.l2_abort_lowmem, arcstats.l2_abort_lowmem);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_cksum_bad", mibs.l2_cksum_bad, arcstats.l2_cksum_bad);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_io_error", mibs.l2_io_error, arcstats.l2_io_error);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_size", mibs.l2_size, arcstats.l2_size);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_asize", mibs.l2_asize, arcstats.l2_asize);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_hdr_size", mibs.l2_hdr_size, arcstats.l2_hdr_size);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_successes", mibs.l2_compress_successes, arcstats.l2_compress_successes);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_zeros", mibs.l2_compress_zeros, arcstats.l2_compress_zeros);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_failures", mibs.l2_compress_failures, arcstats.l2_compress_failures);
+ GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_throttle_count", mibs.memory_throttle_count, arcstats.memory_throttle_count);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_buffers", mibs.duplicate_buffers, arcstats.duplicate_buffers);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_buffers_size", mibs.duplicate_buffers_size, arcstats.duplicate_buffers_size);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_reads", mibs.duplicate_reads, arcstats.duplicate_reads);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_direct_count", mibs.memory_direct_count, arcstats.memory_direct_count);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_indirect_count", mibs.memory_indirect_count, arcstats.memory_indirect_count);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_no_grow", mibs.arc_no_grow, arcstats.arc_no_grow);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_tempreserve", mibs.arc_tempreserve, arcstats.arc_tempreserve);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_loaned_bytes", mibs.arc_loaned_bytes, arcstats.arc_loaned_bytes);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_prune", mibs.arc_prune, arcstats.arc_prune);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_used", mibs.arc_meta_used, arcstats.arc_meta_used);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_limit", mibs.arc_meta_limit, arcstats.arc_meta_limit);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_max", mibs.arc_meta_max, arcstats.arc_meta_max);
+ // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_min", mibs.arc_meta_min, arcstats.arc_meta_min);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_need_free", mibs.arc_need_free, arcstats.arc_need_free);
+ // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_sys_free", mibs.arc_sys_free, arcstats.arc_sys_free);
+
+ generate_charts_arcstats(update_every);
+ generate_charts_arc_summary(update_every);
+
+ return 0;
+} \ No newline at end of file
diff --git a/src/freebsd_sysctl.c b/src/freebsd_sysctl.c
index 965c1cbb..d2f0eaa8 100644
--- a/src/freebsd_sysctl.c
+++ b/src/freebsd_sysctl.c
@@ -1,8 +1,6 @@
#include "common.h"
#include <sys/vmmeter.h>
-#include <sys/devicestat.h>
-#include <sys/mount.h>
#include <vm/vm_param.h>
#define _KERNEL
@@ -12,8 +10,6 @@
#undef _KERNEL
#include <net/netisr.h>
-#include <net/if.h>
-#include <ifaddrs.h>
#include <netinet/ip.h>
#include <netinet/ip_var.h>
@@ -29,12 +25,6 @@
// --------------------------------------------------------------------------------------------------------------------
// common definitions and variables
-#define KILO_FACTOR 1024
-#define MEGA_FACTOR 1048576 // 1024 * 1024
-#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024
-
-#define MAX_INT_DIGITS 10 // maximum number of digits for int
-
int system_pagesize = PAGE_SIZE;
int number_of_cpus = 1;
@@ -311,8 +301,10 @@ int do_kern_cp_times(int update_every, usec_t dt) {
static int mib[2] = {0, 0};
long cp_time[CPUSTATES];
static long *pcpu_cp_time = NULL;
+ static int old_number_of_cpus = 0;
- pcpu_cp_time = reallocz(pcpu_cp_time, sizeof(cp_time) * number_of_cpus);
+ if(unlikely(number_of_cpus != old_number_of_cpus))
+ pcpu_cp_time = reallocz(pcpu_cp_time, sizeof(cp_time) * number_of_cpus);
if (unlikely(GETSYSCTL_WSIZE("kern.cp_times", mib, pcpu_cp_time, sizeof(cp_time) * number_of_cpus))) {
error("DISABLED: cpu.cpuXX charts");
error("DISABLED: kern.cp_times module");
@@ -331,12 +323,10 @@ int do_kern_cp_times(int update_every, usec_t dt) {
RRDDIM *rd_interrupt;
RRDDIM *rd_idle;
} *all_cpu_charts = NULL;
- static int old_number_of_cpus = 0;
if(unlikely(number_of_cpus > old_number_of_cpus)) {
all_cpu_charts = reallocz(all_cpu_charts, sizeof(struct cpu_chart) * number_of_cpus);
memset(&all_cpu_charts[old_number_of_cpus], 0, sizeof(struct cpu_chart) * (number_of_cpus - old_number_of_cpus));
- old_number_of_cpus = number_of_cpus;
}
for (i = 0; i < number_of_cpus; i++) {
@@ -375,6 +365,8 @@ int do_kern_cp_times(int update_every, usec_t dt) {
rrdset_done(all_cpu_charts[i].st);
}
}
+
+ old_number_of_cpus = number_of_cpus;
}
return 0;
@@ -386,7 +378,7 @@ int do_kern_cp_times(int update_every, usec_t dt) {
int do_hw_intcnt(int update_every, usec_t dt) {
(void)dt;
static int mib_hw_intrcnt[2] = {0, 0};
- size_t intrcnt_size = sizeof(mib_hw_intrcnt);
+ size_t intrcnt_size = 0;
unsigned long i;
if (unlikely(GETSYSCTL_SIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt_size))) {
@@ -396,11 +388,13 @@ int do_hw_intcnt(int update_every, usec_t dt) {
return 1;
} else {
unsigned long nintr = 0;
+ static unsigned long old_nintr = 0;
static unsigned long *intrcnt = NULL;
unsigned long long totalintr = 0;
nintr = intrcnt_size / sizeof(u_long);
- intrcnt = reallocz(intrcnt, nintr * sizeof(u_long));
+ if (unlikely(nintr != old_nintr))
+ intrcnt = reallocz(intrcnt, nintr * sizeof(u_long));
if (unlikely(GETSYSCTL_WSIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt, nintr * sizeof(u_long)))) {
error("DISABLED: system.intr chart");
error("DISABLED: system.interrupts chart");
@@ -443,7 +437,8 @@ int do_hw_intcnt(int update_every, usec_t dt) {
static char *intrnames = NULL;
size = nintr * (MAXCOMLEN + 1);
- intrnames = reallocz(intrnames, size);
+ if (unlikely(nintr != old_nintr))
+ intrnames = reallocz(intrnames, size);
if (unlikely(GETSYSCTL_WSIZE("hw.intrnames", mib_hw_intrnames, intrnames, size))) {
error("DISABLED: system.intr chart");
error("DISABLED: system.interrupts chart");
@@ -484,6 +479,8 @@ int do_hw_intcnt(int update_every, usec_t dt) {
rrdset_done(st_interrupts);
}
}
+
+ old_nintr = nintr;
}
return 0;
@@ -931,8 +928,12 @@ int do_kern_ipc_sem(int update_every, usec_t dt) {
return 1;
} else {
static struct semid_kernel *ipc_sem_data = NULL;
+ static int old_semmni = 0;
- ipc_sem_data = reallocz(ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni);
+ if (unlikely(ipc_sem.semmni != old_semmni)) {
+ ipc_sem_data = reallocz(ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni);
+ old_semmni = ipc_sem.semmni;
+ }
if (unlikely(GETSYSCTL_WSIZE("kern.ipc.sema", mib_sema, ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni))) {
error("DISABLED: system.ipc_semaphores chart");
error("DISABLED: system.ipc_semaphore_arrays chart");
@@ -1019,8 +1020,12 @@ int do_kern_ipc_shm(int update_every, usec_t dt) {
return 1;
} else {
static struct shmid_kernel *ipc_shm_data = NULL;
+ static u_long old_shmmni = 0;
- ipc_shm_data = reallocz(ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni);
+ if (unlikely(ipc_shm.shmmni != old_shmmni)) {
+ ipc_shm_data = reallocz(ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni);
+ old_shmmni = ipc_shm.shmmni;
+ }
if (unlikely(
GETSYSCTL_WSIZE("kern.ipc.shmsegs", mib_shmsegs, ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni))) {
error("DISABLED: system.ipc_shared_mem_segs chart");
@@ -1111,8 +1116,12 @@ int do_kern_ipc_msq(int update_every, usec_t dt) {
return 1;
} else {
static struct msqid_kernel *ipc_msq_data = NULL;
+ static int old_msgmni = 0;
- ipc_msq_data = reallocz(ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni);
+ if (unlikely(ipc_msq.msgmni != old_msgmni)) {
+ ipc_msq_data = reallocz(ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni);
+ old_msgmni = ipc_msq.msgmni;
+ }
if (unlikely(
GETSYSCTL_WSIZE("kern.ipc.msqids", mib_msqids, ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni))) {
error("DISABLED: system.ipc_msq_queues chart");
@@ -1259,7 +1268,7 @@ int do_net_isr(int update_every, usec_t dt) {
static int mib_workstream[3] = {0, 0, 0}, mib_work[3] = {0, 0, 0};
int common_error = 0;
- size_t netisr_workstream_size = sizeof(mib_workstream), netisr_work_size = sizeof(mib_work);
+ size_t netisr_workstream_size = 0, netisr_work_size = 0;
unsigned long num_netisr_workstreams = 0, num_netisr_works = 0;
static struct sysctl_netisr_workstream *netisr_workstream = NULL;
static struct sysctl_netisr_work *netisr_work = NULL;
@@ -1276,14 +1285,25 @@ int do_net_isr(int update_every, usec_t dt) {
} else if (unlikely(GETSYSCTL_SIZE("net.isr.work", mib_work, netisr_work_size))) {
common_error = 1;
} else {
+ static size_t old_netisr_workstream_size = 0;
+
num_netisr_workstreams = netisr_workstream_size / sizeof(struct sysctl_netisr_workstream);
- netisr_workstream = reallocz(netisr_workstream, num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream));
+ if (unlikely(netisr_workstream_size != old_netisr_workstream_size)) {
+ netisr_workstream = reallocz(netisr_workstream,
+ num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream));
+ old_netisr_workstream_size = netisr_workstream_size;
+ }
if (unlikely(GETSYSCTL_WSIZE("net.isr.workstream", mib_workstream, netisr_workstream,
num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream)))){
common_error = 1;
} else {
+ static size_t old_netisr_work_size = 0;
+
num_netisr_works = netisr_work_size / sizeof(struct sysctl_netisr_work);
- netisr_work = reallocz(netisr_work, num_netisr_works * sizeof(struct sysctl_netisr_work));
+ if (unlikely(netisr_work_size != old_netisr_work_size)) {
+ netisr_work = reallocz(netisr_work, num_netisr_works * sizeof(struct sysctl_netisr_work));
+ old_netisr_work_size = netisr_work_size;
+ }
if (unlikely(GETSYSCTL_WSIZE("net.isr.work", mib_work, netisr_work,
num_netisr_works * sizeof(struct sysctl_netisr_work)))){
common_error = 1;
@@ -1301,8 +1321,12 @@ int do_net_isr(int update_every, usec_t dt) {
} else {
unsigned long i, n;
int j;
+ static int old_number_of_cpus = 0;
- netisr_stats = reallocz(netisr_stats, (number_of_cpus + 1) * sizeof(struct netisr_stats));
+ if (unlikely(number_of_cpus != old_number_of_cpus)) {
+ netisr_stats = reallocz(netisr_stats, (number_of_cpus + 1) * sizeof(struct netisr_stats));
+ old_number_of_cpus = number_of_cpus;
+ }
memset(netisr_stats, 0, (number_of_cpus + 1) * sizeof(struct netisr_stats));
for (i = 0; i < num_netisr_workstreams; i++) {
for (n = 0; n < num_netisr_works; n++) {
@@ -2749,1216 +2773,3 @@ int do_net_inet6_icmp6_stats(int update_every, usec_t dt) {
return 0;
}
-
-// --------------------------------------------------------------------------------------------------------------------
-// getmntinfo
-
-int do_getmntinfo(int update_every, usec_t dt) {
- (void)dt;
-
-#define DELAULT_EXLUDED_PATHS "/proc/*"
-// taken from gnulib/mountlist.c and shortened to FreeBSD related fstypes
-#define DEFAULT_EXCLUDED_FILESYSTEMS "autofs procfs subfs devfs none"
-#define CONFIG_SECTION_GETMNTINFO "plugin:freebsd:getmntinfo"
-
- static int do_space = -1, do_inodes = -1;
-
- if (unlikely(do_space == -1)) {
- do_space = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "space usage for all disks", CONFIG_BOOLEAN_AUTO);
- do_inodes = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "inodes usage for all disks", CONFIG_BOOLEAN_AUTO);
- }
-
- if (likely(do_space || do_inodes)) {
- struct statfs *mntbuf;
- int mntsize;
-
- // there is no mount info in sysctl MIBs
- if (unlikely(!(mntsize = getmntinfo(&mntbuf, MNT_NOWAIT)))) {
- error("FREEBSD: getmntinfo() failed");
- do_space = 0;
- error("DISABLED: disk_space.* charts");
- do_inodes = 0;
- error("DISABLED: disk_inodes.* charts");
- error("DISABLED: getmntinfo module");
- return 1;
- } else {
- // Data to be stored in DICTIONARY mount_points.
- // This DICTIONARY is used to lookup the settings of the mount point on each iteration.
- struct mount_point_metadata {
- int do_space;
- int do_inodes;
-
- size_t collected; // the number of times this has been collected
-
- // charts and dimensions
-
- RRDSET *st_space;
- RRDDIM *rd_space_used;
- RRDDIM *rd_space_avail;
- RRDDIM *rd_space_reserved;
-
- RRDSET *st_inodes;
- RRDDIM *rd_inodes_used;
- RRDDIM *rd_inodes_avail;
- };
- static DICTIONARY *mount_points = NULL;
- static SIMPLE_PATTERN *excluded_mountpoints = NULL;
- static SIMPLE_PATTERN *excluded_filesystems = NULL;
- int i;
-
- if(unlikely(!mount_points)) {
-
- excluded_mountpoints = simple_pattern_create(
- config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on paths",
- DELAULT_EXLUDED_PATHS),
- SIMPLE_PATTERN_EXACT
- );
-
- excluded_filesystems = simple_pattern_create(
- config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on filesystems",
- DEFAULT_EXCLUDED_FILESYSTEMS),
- SIMPLE_PATTERN_EXACT
- );
-
- mount_points = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
- }
-
- for (i = 0; i < mntsize; i++) {
-
- char title[4096 + 1];
- int def_space, def_inodes, iter_space, iter_inodes;
-
- struct mount_point_metadata *m = dictionary_get(mount_points, mntbuf[i].f_mntonname);
- if(unlikely(!m)) {
- char var_name[4096 + 1];
- snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETMNTINFO, mntbuf[i].f_mntonname);
-
- def_space = do_space;
- def_inodes = do_space;
-
- if(unlikely(simple_pattern_matches(excluded_mountpoints, mntbuf[i].f_mntonname))) {
- def_space = CONFIG_BOOLEAN_NO;
- def_inodes = CONFIG_BOOLEAN_NO;
- }
-
- if(unlikely(simple_pattern_matches(excluded_filesystems, mntbuf[i].f_fstypename))) {
- def_space = CONFIG_BOOLEAN_NO;
- def_inodes = CONFIG_BOOLEAN_NO;
- }
-
- iter_space = config_get_boolean_ondemand(var_name, "space usage", def_space);
- iter_inodes = config_get_boolean_ondemand(var_name, "inodes usage", def_inodes);
-
- struct mount_point_metadata mp = {
- .do_space = iter_space,
- .do_inodes = iter_inodes,
-
- .collected = 0,
-
- .st_space = NULL,
- .rd_space_avail = NULL,
- .rd_space_used = NULL,
- .rd_space_reserved = NULL,
-
- .st_inodes = NULL,
- .rd_inodes_avail = NULL,
- .rd_inodes_used = NULL,
- };
-
- m = dictionary_set(mount_points, mntbuf[i].f_mntonname, &mp, sizeof(struct mount_point_metadata));
- }
-
- if(unlikely(m->do_space == CONFIG_BOOLEAN_NO && m->do_inodes == CONFIG_BOOLEAN_NO))
- continue;
-
- if(unlikely(mntbuf[i].f_flags & MNT_RDONLY && !m->collected))
- continue;
-
- // --------------------------------------------------------------------------
-
- int rendered = 0;
-
- if (m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_blocks > 2))) {
- if (unlikely(!m->st_space)) {
- snprintfz(title, 4096, "Disk Space Usage for %s [%s]",
- mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
- m->st_space = rrdset_create_localhost("disk_space",
- mntbuf[i].f_mntonname,
- NULL,
- mntbuf[i].f_mntonname,
- "disk.space",
- title,
- "GB",
- 2023,
- update_every,
- RRDSET_TYPE_STACKED
- );
-
- m->rd_space_avail = rrddim_add(m->st_space, "avail", NULL,
- mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- m->rd_space_used = rrddim_add(m->st_space, "used", NULL,
- mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- m->rd_space_reserved = rrddim_add(m->st_space, "reserved_for_root", "reserved for root",
- mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- } else
- rrdset_next(m->st_space);
-
- rrddim_set_by_pointer(m->st_space, m->rd_space_avail, (collected_number) mntbuf[i].f_bavail);
- rrddim_set_by_pointer(m->st_space, m->rd_space_used, (collected_number) (mntbuf[i].f_blocks -
- mntbuf[i].f_bfree));
- rrddim_set_by_pointer(m->st_space, m->rd_space_reserved, (collected_number) (mntbuf[i].f_bfree -
- mntbuf[i].f_bavail));
- rrdset_done(m->st_space);
-
- rendered++;
- }
-
- // --------------------------------------------------------------------------
-
- if (m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO && (mntbuf[i].f_files > 1))) {
- if (unlikely(!m->st_inodes)) {
- snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]",
- mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
- m->st_inodes = rrdset_create_localhost("disk_inodes",
- mntbuf[i].f_mntonname,
- NULL,
- mntbuf[i].f_mntonname,
- "disk.inodes",
- title,
- "Inodes",
- 2024,
- update_every,
- RRDSET_TYPE_STACKED
- );
-
- m->rd_inodes_avail = rrddim_add(m->st_inodes, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- m->rd_inodes_used = rrddim_add(m->st_inodes, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- } else
- rrdset_next(m->st_inodes);
-
- rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_avail, (collected_number) mntbuf[i].f_ffree);
- rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_used, (collected_number) (mntbuf[i].f_files -
- mntbuf[i].f_ffree));
- rrdset_done(m->st_inodes);
-
- rendered++;
- }
-
- if(likely(rendered))
- m->collected++;
- }
- }
- } else {
- error("DISABLED: getmntinfo module");
- return 1;
- }
-
- return 0;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// getifaddrs
-
-int do_getifaddrs(int update_every, usec_t dt) {
- (void)dt;
-
-#define DELAULT_EXLUDED_INTERFACES "lo*"
-#define CONFIG_SECTION_GETIFADDRS "plugin:freebsd:getifaddrs"
-
- static int do_bandwidth_ipv4 = -1, do_bandwidth_ipv6 = -1, do_bandwidth = -1, do_packets = -1,
- do_errors = -1, do_drops = -1, do_events = -1;
-
- if (unlikely(do_bandwidth_ipv4 == -1)) {
- do_bandwidth_ipv4 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv4 interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_bandwidth_ipv6 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv6 interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "bandwidth for all interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_packets = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "packets for all interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_errors = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "errors for all interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_drops = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "drops for all interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_events = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "collisions for all interfaces",
- CONFIG_BOOLEAN_AUTO);
- }
-
- if (likely(do_bandwidth_ipv4 || do_bandwidth_ipv6 || do_bandwidth || do_packets || do_errors ||
- do_drops || do_events)) {
- struct ifaddrs *ifap;
-
- if (unlikely(getifaddrs(&ifap))) {
- error("FREEBSD: getifaddrs() failed");
- do_bandwidth_ipv4 = 0;
- error("DISABLED: system.ipv4 chart");
- do_bandwidth_ipv6 = 0;
- error("DISABLED: system.ipv6 chart");
- do_bandwidth = 0;
- error("DISABLED: net.* charts");
- do_packets = 0;
- error("DISABLED: net_packets.* charts");
- do_errors = 0;
- error("DISABLED: net_errors.* charts");
- do_drops = 0;
- error("DISABLED: net_drops.* charts");
- do_events = 0;
- error("DISABLED: net_events.* charts");
- error("DISABLED: getifaddrs module");
- return 1;
- } else {
- #define IFA_DATA(s) (((struct if_data *)ifa->ifa_data)->ifi_ ## s)
- struct ifaddrs *ifa;
- struct iftot {
- u_long ift_ibytes;
- u_long ift_obytes;
- } iftot = {0, 0};
-
- // --------------------------------------------------------------------
-
- if (likely(do_bandwidth_ipv4)) {
- iftot.ift_ibytes = iftot.ift_obytes = 0;
- for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
- if (ifa->ifa_addr->sa_family != AF_INET)
- continue;
- iftot.ift_ibytes += IFA_DATA(ibytes);
- iftot.ift_obytes += IFA_DATA(obytes);
- }
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost("system",
- "ipv4",
- NULL,
- "network",
- NULL,
- "IPv4 Bandwidth",
- "kilobits/s",
- 500,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st, "InOctets", "received", 8, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "OutOctets", "sent", -8, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
- } else
- rrdset_next(st);
-
- rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
- rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if (likely(do_bandwidth_ipv6)) {
- iftot.ift_ibytes = iftot.ift_obytes = 0;
- for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
- if (ifa->ifa_addr->sa_family != AF_INET6)
- continue;
- iftot.ift_ibytes += IFA_DATA(ibytes);
- iftot.ift_obytes += IFA_DATA(obytes);
- }
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost("system",
- "ipv6",
- NULL,
- "network",
- NULL,
- "IPv6 Bandwidth",
- "kilobits/s",
- 500,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st, "received", NULL, 8, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "sent", NULL, -8, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
- } else
- rrdset_next(st);
-
- rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
- rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- // Data to be stored in DICTIONARY interfaces.
- // This DICTIONARY is used to lookup the settings of the interfaces on each iteration.
- struct interfaces_metadata {
- int do_bandwidth;
- int do_packets;
- int do_errors;
- int do_drops;
- int do_events;
-
- // charts and dimensions
-
- RRDSET *st_bandwidth;
- RRDDIM *rd_bandwidth_in;
- RRDDIM *rd_bandwidth_out;
-
- RRDSET *st_packets;
- RRDDIM *rd_packets_in;
- RRDDIM *rd_packets_out;
- RRDDIM *rd_packets_m_in;
- RRDDIM *rd_packets_m_out;
-
- RRDSET *st_errors;
- RRDDIM *rd_errors_in;
- RRDDIM *rd_errors_out;
-
- RRDSET *st_drops;
- RRDDIM *rd_drops_in;
- RRDDIM *rd_drops_out;
-
- RRDSET *st_events;
- RRDDIM *rd_events_coll;
- };
- static DICTIONARY *interfaces = NULL;
- static SIMPLE_PATTERN *excluded_interfaces = NULL;
-
- if(unlikely(!interfaces)) {
-
- excluded_interfaces = simple_pattern_create(
- config_get(CONFIG_SECTION_GETIFADDRS, "disable by default interfaces matching",
- DELAULT_EXLUDED_INTERFACES)
- , SIMPLE_PATTERN_EXACT
- );
-
- interfaces = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
- }
-
- for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
- if (ifa->ifa_addr->sa_family != AF_LINK)
- continue;
-
- int def_bandwidth, def_packets, def_errors, def_drops, def_events,
- iter_bandwidth, iter_packets, iter_errors, iter_drops, iter_events;
-
- struct interfaces_metadata *ifm = dictionary_get(interfaces, ifa->ifa_name);
- if(unlikely(!ifm)) {
- char var_name[4096 + 1];
- snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETIFADDRS, ifa->ifa_name);
-
- def_bandwidth = do_bandwidth;
- def_packets = do_packets;
- def_errors = do_errors;
- def_drops = do_drops;
- def_events = do_events;
-
- if(unlikely(simple_pattern_matches(excluded_interfaces, ifa->ifa_name))) {
- def_bandwidth = CONFIG_BOOLEAN_NO;
- def_packets = CONFIG_BOOLEAN_NO;
- def_errors = CONFIG_BOOLEAN_NO;
- def_drops = CONFIG_BOOLEAN_NO;
- def_events = CONFIG_BOOLEAN_NO;
- }
-
- iter_bandwidth = config_get_boolean_ondemand(var_name, "bandwidth", def_bandwidth);
- iter_packets = config_get_boolean_ondemand(var_name, "packets", def_packets);
- iter_errors = config_get_boolean_ondemand(var_name, "errors", def_errors);
- iter_drops = config_get_boolean_ondemand(var_name, "drops", def_drops);
- iter_events = config_get_boolean_ondemand(var_name, "events", def_events);
-
- struct interfaces_metadata ifmp = {
- .do_bandwidth = iter_bandwidth,
- .do_packets = iter_packets,
- .do_errors = iter_errors,
- .do_drops = iter_drops,
- .do_events = iter_events,
-
- .st_bandwidth = NULL,
- .rd_bandwidth_in = NULL,
- .rd_bandwidth_out = NULL,
-
- .st_packets = NULL,
- .rd_packets_in = NULL,
- .rd_packets_out = NULL,
- .rd_packets_m_in = NULL,
- .rd_packets_m_out = NULL,
-
- .st_errors = NULL,
- .rd_errors_in = NULL,
- .rd_errors_out = NULL,
-
- .st_drops = NULL,
- .rd_drops_in = NULL,
- .rd_drops_out = NULL,
-
- .st_events = NULL,
- .rd_events_coll = NULL,
- };
-
- ifm = dictionary_set(interfaces, ifa->ifa_name, &ifmp, sizeof(struct interfaces_metadata));
- }
-
- // --------------------------------------------------------------------
-
- if (ifm->do_bandwidth == CONFIG_BOOLEAN_YES || (ifm->do_bandwidth == CONFIG_BOOLEAN_AUTO &&
- (IFA_DATA(ibytes) || IFA_DATA(obytes)))) {
- if (unlikely(!ifm->st_bandwidth)) {
- ifm->st_bandwidth = rrdset_create_localhost("net",
- ifa->ifa_name,
- NULL,
- ifa->ifa_name,
- "net.net",
- "Bandwidth",
- "kilobits/s",
- 7000,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- ifm->rd_bandwidth_in = rrddim_add(ifm->st_bandwidth, "received", NULL, 8, KILO_FACTOR,
- RRD_ALGORITHM_INCREMENTAL);
- ifm->rd_bandwidth_out = rrddim_add(ifm->st_bandwidth, "sent", NULL, -8, KILO_FACTOR,
- RRD_ALGORITHM_INCREMENTAL);
- } else
- rrdset_next(ifm->st_bandwidth);
-
- rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_in, IFA_DATA(ibytes));
- rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_out, IFA_DATA(obytes));
- rrdset_done(ifm->st_bandwidth);
- }
-
- // --------------------------------------------------------------------
-
- if (ifm->do_packets == CONFIG_BOOLEAN_YES || (ifm->do_packets == CONFIG_BOOLEAN_AUTO &&
- (IFA_DATA(ipackets) || IFA_DATA(opackets) || IFA_DATA(imcasts) || IFA_DATA(omcasts)))) {
- if (unlikely(!ifm->st_packets)) {
- ifm->st_packets = rrdset_create_localhost("net_packets",
- ifa->ifa_name,
- NULL,
- ifa->ifa_name,
- "net.packets",
- "Packets",
- "packets/s",
- 7001,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(ifm->st_packets, RRDSET_FLAG_DETAIL);
-
- ifm->rd_packets_in = rrddim_add(ifm->st_packets, "received", NULL, 1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- ifm->rd_packets_out = rrddim_add(ifm->st_packets, "sent", NULL, -1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- ifm->rd_packets_m_in = rrddim_add(ifm->st_packets, "multicast_received", NULL, 1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- ifm->rd_packets_m_out = rrddim_add(ifm->st_packets, "multicast_sent", NULL, -1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- } else
- rrdset_next(ifm->st_packets);
-
- rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_in, IFA_DATA(ipackets));
- rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_out, IFA_DATA(opackets));
- rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_in, IFA_DATA(imcasts));
- rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_out, IFA_DATA(omcasts));
- rrdset_done(ifm->st_packets);
- }
-
- // --------------------------------------------------------------------
-
- if (ifm->do_errors == CONFIG_BOOLEAN_YES || (ifm->do_errors == CONFIG_BOOLEAN_AUTO &&
- (IFA_DATA(ierrors) || IFA_DATA(oerrors)))) {
- if (unlikely(!ifm->st_errors)) {
- ifm->st_errors = rrdset_create_localhost("net_errors",
- ifa->ifa_name,
- NULL,
- ifa->ifa_name,
- "net.errors",
- "Interface Errors",
- "errors/s",
- 7002,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(ifm->st_errors, RRDSET_FLAG_DETAIL);
-
- ifm->rd_errors_in = rrddim_add(ifm->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- ifm->rd_errors_out = rrddim_add(ifm->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- } else
- rrdset_next(ifm->st_errors);
-
- rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_in, IFA_DATA(ierrors));
- rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_out, IFA_DATA(oerrors));
- rrdset_done(ifm->st_errors);
- }
- // --------------------------------------------------------------------
-
- if (ifm->do_drops == CONFIG_BOOLEAN_YES || (ifm->do_drops == CONFIG_BOOLEAN_AUTO &&
- (IFA_DATA(iqdrops) || IFA_DATA(oqdrops)))) {
- if (unlikely(!ifm->st_drops)) {
- ifm->st_drops = rrdset_create_localhost("net_drops",
- ifa->ifa_name,
- NULL,
- ifa->ifa_name,
- "net.drops",
- "Interface Drops",
- "drops/s",
- 7003,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(ifm->st_drops, RRDSET_FLAG_DETAIL);
-
- ifm->rd_drops_in = rrddim_add(ifm->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-#if __FreeBSD__ >= 11
- ifm->rd_drops_out = rrddim_add(ifm->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-#endif
- } else
- rrdset_next(ifm->st_drops);
-
- rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_in, IFA_DATA(iqdrops));
-#if __FreeBSD__ >= 11
- rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_out, IFA_DATA(oqdrops));
-#endif
- rrdset_done(ifm->st_drops);
- }
-
- // --------------------------------------------------------------------
-
- if (ifm->do_events == CONFIG_BOOLEAN_YES || (ifm->do_events == CONFIG_BOOLEAN_AUTO &&
- IFA_DATA(collisions))) {
- if (unlikely(!ifm->st_events)) {
- ifm->st_events = rrdset_create_localhost("net_events",
- ifa->ifa_name,
- NULL,
- ifa->ifa_name,
- "net.events",
- "Network Interface Events",
- "events/s",
- 7006,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(ifm->st_events, RRDSET_FLAG_DETAIL);
-
- ifm->rd_events_coll = rrddim_add(ifm->st_events, "collisions", NULL, -1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- } else
- rrdset_next(ifm->st_events);
-
- rrddim_set_by_pointer(ifm->st_events, ifm->rd_events_coll, IFA_DATA(collisions));
- rrdset_done(ifm->st_events);
- }
- }
-
- freeifaddrs(ifap);
- }
- } else {
- error("DISABLED: getifaddrs module");
- return 1;
- }
-
- return 0;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// kern.devstat
-
-int do_kern_devstat(int update_every, usec_t dt) {
-
-#define DELAULT_EXLUDED_DISKS ""
-#define CONFIG_SECTION_KERN_DEVSTAT "plugin:freebsd:kern.devstat"
-#define BINTIME_SCALE 5.42101086242752217003726400434970855712890625e-17 // this is 1000/2^64
-
- static int enable_pass_devices = -1, do_system_io = -1, do_io = -1, do_ops = -1, do_qops = -1, do_util = -1,
- do_iotime = -1, do_await = -1, do_avagsz = -1, do_svctm = -1;
-
- if (unlikely(enable_pass_devices == -1)) {
- enable_pass_devices = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT,
- "performance metrics for pass devices", CONFIG_BOOLEAN_AUTO);
-
- do_system_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "total bandwidth for all disks",
- CONFIG_BOOLEAN_YES);
-
- do_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "bandwidth for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_ops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "operations for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_qops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "queued operations for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_util = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "utilization percentage for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_iotime = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "i/o time for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_await = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o time for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_avagsz = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o bandwidth for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_svctm = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average service time for all disks",
- CONFIG_BOOLEAN_AUTO);
- }
-
- if (likely(do_system_io || do_io || do_ops || do_qops || do_util || do_iotime || do_await || do_avagsz || do_svctm)) {
- static int mib_numdevs[3] = {0, 0, 0};
- int numdevs;
- int common_error = 0;
-
- if (unlikely(GETSYSCTL_SIMPLE("kern.devstat.numdevs", mib_numdevs, numdevs))) {
- common_error = 1;
- } else {
- static int mib_devstat[3] = {0, 0, 0};
- static void *devstat_data = NULL;
-
- devstat_data = reallocz(devstat_data, sizeof(long) + sizeof(struct devstat) * numdevs); // there is generation number before devstat structures
- if (unlikely(GETSYSCTL_WSIZE("kern.devstat.all", mib_devstat, devstat_data,
- sizeof(long) + sizeof(struct devstat) * numdevs))) {
- common_error = 1;
- } else {
- struct devstat *dstat;
- int i;
- collected_number total_disk_kbytes_read = 0;
- collected_number total_disk_kbytes_write = 0;
-
- // Data to be stored in DICTIONARY disks.
- // This DICTIONARY is used to lookup the settings of the disks on each iteration.
- struct disks_metadata {
- int do_io;
- int do_ops;
- int do_qops;
- int do_util;
- int do_iotime;
- int do_await;
- int do_avagsz;
- int do_svctm;
-
-
- // data for differential charts
-
- struct prev_dstat {
- collected_number bytes_read;
- collected_number bytes_write;
- collected_number operations_read;
- collected_number operations_write;
- collected_number duration_read_ms;
- collected_number duration_write_ms;
- collected_number busy_time_ms;
- } prev_dstat;
-
- // charts and dimensions
-
- RRDSET *st_io;
- RRDDIM *rd_io_in;
- RRDDIM *rd_io_out;
-
- RRDSET *st_ops;
- RRDDIM *rd_ops_in;
- RRDDIM *rd_ops_out;
-
- RRDSET *st_qops;
- RRDDIM *rd_qops;
-
- RRDSET *st_util;
- RRDDIM *rd_util;
-
- RRDSET *st_iotime;
- RRDDIM *rd_iotime_in;
- RRDDIM *rd_iotime_out;
-
- RRDSET *st_await;
- RRDDIM *rd_await_in;
- RRDDIM *rd_await_out;
-
- RRDSET *st_avagsz;
- RRDDIM *rd_avagsz_in;
- RRDDIM *rd_avagsz_out;
-
- RRDSET *st_svctm;
- RRDDIM *rd_svctm;
-
- };
- static DICTIONARY *disks = NULL;
- static SIMPLE_PATTERN *excluded_disks = NULL;
-
- if(unlikely(!disks)) {
-
- excluded_disks = simple_pattern_create(
- config_get(CONFIG_SECTION_KERN_DEVSTAT, "disable by default disks matching",
- DELAULT_EXLUDED_DISKS)
- , SIMPLE_PATTERN_EXACT
- );
-
- disks = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
- }
-
- dstat = devstat_data + sizeof(long); // skip generation number
-
- for (i = 0; i < numdevs; i++) {
- if (likely(do_system_io)) {
- if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) || ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) {
- total_disk_kbytes_read += dstat[i].bytes[DEVSTAT_READ] / KILO_FACTOR;
- total_disk_kbytes_write += dstat[i].bytes[DEVSTAT_WRITE] / KILO_FACTOR;
- }
- }
-
- if (unlikely(!enable_pass_devices))
- if ((dstat[i].device_type & DEVSTAT_TYPE_PASS) == DEVSTAT_TYPE_PASS)
- continue;
-
- if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) || ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) {
- char disk[DEVSTAT_NAME_LEN + MAX_INT_DIGITS + 1];
- int def_io, def_ops, def_qops, def_util, def_iotime, def_await, def_avagsz, def_svctm,
- iter_io, iter_ops, iter_qops, iter_util, iter_iotime, iter_await, iter_avagsz, iter_svctm;
- struct cur_dstat {
- collected_number duration_read_ms;
- collected_number duration_write_ms;
- collected_number busy_time_ms;
- } cur_dstat;
-
- sprintf(disk, "%s%d", dstat[i].device_name, dstat[i].unit_number);
-
- struct disks_metadata *dm = dictionary_get(disks, disk);
- if(unlikely(!dm)) {
- char var_name[4096 + 1];
- snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_KERN_DEVSTAT, disk);
-
- def_io = do_io;
- def_ops = do_ops;
- def_qops = do_qops;
- def_util = do_util;
- def_iotime = do_iotime;
- def_await = do_await;
- def_avagsz = do_avagsz;
- def_svctm = do_svctm;
-
- if(unlikely(simple_pattern_matches(excluded_disks, disk))) {
- def_io = CONFIG_BOOLEAN_NO;
- def_ops = CONFIG_BOOLEAN_NO;
- def_qops = CONFIG_BOOLEAN_NO;
- def_util = CONFIG_BOOLEAN_NO;
- def_iotime = CONFIG_BOOLEAN_NO;
- def_await = CONFIG_BOOLEAN_NO;
- def_avagsz = CONFIG_BOOLEAN_NO;
- def_svctm = CONFIG_BOOLEAN_NO;
- }
-
- iter_io = config_get_boolean_ondemand(var_name, "bandwidth", def_io);
- iter_ops = config_get_boolean_ondemand(var_name, "operations", def_ops);
- iter_qops = config_get_boolean_ondemand(var_name, "queued operations", def_qops);
- iter_util = config_get_boolean_ondemand(var_name, "utilization percentage", def_util);
- iter_iotime = config_get_boolean_ondemand(var_name, "i/o time", def_iotime);
- iter_await = config_get_boolean_ondemand(var_name, "average completed i/o time", def_await);
- iter_avagsz = config_get_boolean_ondemand(var_name, "average completed i/o bandwidth",
- def_avagsz);
- iter_svctm = config_get_boolean_ondemand(var_name, "average service time", def_svctm);
-
- struct disks_metadata dmp = {
- .do_io = iter_io,
- .do_ops = iter_ops,
- .do_qops = iter_qops,
- .do_util = iter_util,
- .do_iotime = iter_iotime,
- .do_await = iter_await,
- .do_avagsz = iter_avagsz,
- .do_svctm = iter_svctm,
-
- .st_io = NULL,
- .rd_io_in = NULL,
- .rd_io_out = NULL,
-
- .st_ops = NULL,
- .rd_ops_in = NULL,
- .rd_ops_out = NULL,
-
- .st_qops = NULL,
- .rd_qops = NULL,
-
- .st_util = NULL,
- .rd_util = NULL,
-
- .st_iotime = NULL,
- .rd_iotime_in = NULL,
- .rd_iotime_out = NULL,
-
- .st_await = NULL,
- .rd_await_in = NULL,
- .rd_await_out = NULL,
-
- .st_avagsz = NULL,
- .rd_avagsz_in = NULL,
- .rd_avagsz_out = NULL,
-
- .st_svctm = NULL,
- .rd_svctm = NULL,
- };
-
- // initialise data for differential charts
-
- dmp.prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ];
- dmp.prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE];
- dmp.prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ];
- dmp.prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE];
- dmp.prev_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000
- + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
- dmp.prev_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000
- + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
- dmp.prev_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000
- + dstat[i].busy_time.frac * BINTIME_SCALE;
-
- dm = dictionary_set(disks, disk, &dmp, sizeof(struct disks_metadata));
- }
-
- cur_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000
- + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
- cur_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000
- + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
- cur_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000 + dstat[i].busy_time.frac * BINTIME_SCALE;
-
- // --------------------------------------------------------------------
-
- if(dm->do_io == CONFIG_BOOLEAN_YES || (dm->do_io == CONFIG_BOOLEAN_AUTO &&
- (dstat[i].bytes[DEVSTAT_READ] || dstat[i].bytes[DEVSTAT_WRITE]))) {
- if (unlikely(!dm->st_io)) {
- dm->st_io = rrdset_create_localhost("disk",
- disk,
- NULL,
- disk,
- "disk.io",
- "Disk I/O Bandwidth",
- "kilobytes/s",
- 2000,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- dm->rd_io_in = rrddim_add(dm->st_io, "reads", NULL, 1, KILO_FACTOR,
- RRD_ALGORITHM_INCREMENTAL);
- dm->rd_io_out = rrddim_add(dm->st_io, "writes", NULL, -1, KILO_FACTOR,
- RRD_ALGORITHM_INCREMENTAL);
- } else
- rrdset_next(dm->st_io);
-
- rrddim_set_by_pointer(dm->st_io, dm->rd_io_in, dstat[i].bytes[DEVSTAT_READ]);
- rrddim_set_by_pointer(dm->st_io, dm->rd_io_out, dstat[i].bytes[DEVSTAT_WRITE]);
- rrdset_done(dm->st_io);
- }
-
- // --------------------------------------------------------------------
-
- if(dm->do_ops == CONFIG_BOOLEAN_YES || (dm->do_ops == CONFIG_BOOLEAN_AUTO &&
- (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE]))) {
- if (unlikely(!dm->st_ops)) {
- dm->st_ops = rrdset_create_localhost("disk_ops",
- disk,
- NULL,
- disk,
- "disk.ops",
- "Disk Completed I/O Operations",
- "operations/s",
- 2001,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(dm->st_ops, RRDSET_FLAG_DETAIL);
-
- dm->rd_ops_in = rrddim_add(dm->st_ops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- dm->rd_ops_out = rrddim_add(dm->st_ops, "writes", NULL, -1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- } else
- rrdset_next(dm->st_ops);
-
- rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_in, dstat[i].operations[DEVSTAT_READ]);
- rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_out, dstat[i].operations[DEVSTAT_WRITE]);
- rrdset_done(dm->st_ops);
- }
-
- // --------------------------------------------------------------------
-
- if(dm->do_qops == CONFIG_BOOLEAN_YES || (dm->do_qops == CONFIG_BOOLEAN_AUTO &&
- (dstat[i].start_count || dstat[i].end_count))) {
- if (unlikely(!dm->st_qops)) {
- dm->st_qops = rrdset_create_localhost("disk_qops",
- disk,
- NULL,
- disk,
- "disk.qops",
- "Disk Current I/O Operations",
- "operations",
- 2002,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(dm->st_qops, RRDSET_FLAG_DETAIL);
-
- dm->rd_qops = rrddim_add(dm->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- } else
- rrdset_next(dm->st_qops);
-
- rrddim_set_by_pointer(dm->st_qops, dm->rd_qops, dstat[i].start_count - dstat[i].end_count);
- rrdset_done(dm->st_qops);
- }
-
- // --------------------------------------------------------------------
-
- if(dm->do_util == CONFIG_BOOLEAN_YES || (dm->do_util == CONFIG_BOOLEAN_AUTO &&
- cur_dstat.busy_time_ms)) {
- if (unlikely(!dm->st_util)) {
- dm->st_util = rrdset_create_localhost("disk_util",
- disk,
- NULL,
- disk,
- "disk.util",
- "Disk Utilization Time",
- "% of time working",
- 2004,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(dm->st_util, RRDSET_FLAG_DETAIL);
-
- dm->rd_util = rrddim_add(dm->st_util, "utilization", NULL, 1, 10,
- RRD_ALGORITHM_INCREMENTAL);
- } else
- rrdset_next(dm->st_util);
-
- rrddim_set_by_pointer(dm->st_util, dm->rd_util, cur_dstat.busy_time_ms);
- rrdset_done(dm->st_util);
- }
-
- // --------------------------------------------------------------------
-
- if(dm->do_iotime == CONFIG_BOOLEAN_YES || (dm->do_iotime == CONFIG_BOOLEAN_AUTO &&
- (cur_dstat.duration_read_ms || cur_dstat.duration_write_ms))) {
- if (unlikely(!dm->st_iotime)) {
- dm->st_iotime = rrdset_create_localhost("disk_iotime",
- disk,
- NULL,
- disk,
- "disk.iotime",
- "Disk Total I/O Time",
- "milliseconds/s",
- 2022,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(dm->st_iotime, RRDSET_FLAG_DETAIL);
-
- dm->rd_iotime_in = rrddim_add(dm->st_iotime, "reads", NULL, 1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- dm->rd_iotime_out = rrddim_add(dm->st_iotime, "writes", NULL, -1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- } else
- rrdset_next(dm->st_iotime);
-
- rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_in, cur_dstat.duration_read_ms);
- rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_out, cur_dstat.duration_write_ms);
- rrdset_done(dm->st_iotime);
- }
-
- // --------------------------------------------------------------------
- // calculate differential charts
- // only if this is not the first time we run
-
- if (likely(dt)) {
-
- // --------------------------------------------------------------------
-
- if(dm->do_await == CONFIG_BOOLEAN_YES || (dm->do_await == CONFIG_BOOLEAN_AUTO &&
- (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE]))) {
- if (unlikely(!dm->st_await)) {
- dm->st_await = rrdset_create_localhost("disk_await",
- disk,
- NULL,
- disk,
- "disk.await",
- "Average Completed I/O Operation Time",
- "ms per operation",
- 2005,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(dm->st_await, RRDSET_FLAG_DETAIL);
-
- dm->rd_await_in = rrddim_add(dm->st_await, "reads", NULL, 1, 1,
- RRD_ALGORITHM_ABSOLUTE);
- dm->rd_await_out = rrddim_add(dm->st_await, "writes", NULL, -1, 1,
- RRD_ALGORITHM_ABSOLUTE);
- } else
- rrdset_next(dm->st_await);
-
- rrddim_set_by_pointer(dm->st_await, dm->rd_await_in,
- (dstat[i].operations[DEVSTAT_READ] -
- dm->prev_dstat.operations_read) ?
- (cur_dstat.duration_read_ms - dm->prev_dstat.duration_read_ms) /
- (dstat[i].operations[DEVSTAT_READ] -
- dm->prev_dstat.operations_read) :
- 0);
- rrddim_set_by_pointer(dm->st_await, dm->rd_await_out,
- (dstat[i].operations[DEVSTAT_WRITE] -
- dm->prev_dstat.operations_write) ?
- (cur_dstat.duration_write_ms - dm->prev_dstat.duration_write_ms) /
- (dstat[i].operations[DEVSTAT_WRITE] -
- dm->prev_dstat.operations_write) :
- 0);
- rrdset_done(dm->st_await);
- }
-
- // --------------------------------------------------------------------
-
- if(dm->do_avagsz == CONFIG_BOOLEAN_YES || (dm->do_avagsz == CONFIG_BOOLEAN_AUTO &&
- (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE]))) {
- if (unlikely(!dm->st_avagsz)) {
- dm->st_avagsz = rrdset_create_localhost("disk_avgsz",
- disk,
- NULL,
- disk,
- "disk.avgsz",
- "Average Completed I/O Operation Bandwidth",
- "kilobytes per operation",
- 2006,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(dm->st_avagsz, RRDSET_FLAG_DETAIL);
-
- dm->rd_avagsz_in = rrddim_add(dm->st_avagsz, "reads", NULL, 1, KILO_FACTOR,
- RRD_ALGORITHM_ABSOLUTE);
- dm->rd_avagsz_out = rrddim_add(dm->st_avagsz, "writes", NULL, -1, KILO_FACTOR,
- RRD_ALGORITHM_ABSOLUTE);
- } else
- rrdset_next(dm->st_avagsz);
-
- rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_in,
- (dstat[i].operations[DEVSTAT_READ] -
- dm->prev_dstat.operations_read) ?
- (dstat[i].bytes[DEVSTAT_READ] - dm->prev_dstat.bytes_read) /
- (dstat[i].operations[DEVSTAT_READ] -
- dm->prev_dstat.operations_read) :
- 0);
- rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_out,
- (dstat[i].operations[DEVSTAT_WRITE] -
- dm->prev_dstat.operations_write) ?
- (dstat[i].bytes[DEVSTAT_WRITE] - dm->prev_dstat.bytes_write) /
- (dstat[i].operations[DEVSTAT_WRITE] -
- dm->prev_dstat.operations_write) :
- 0);
- rrdset_done(dm->st_avagsz);
- }
-
- // --------------------------------------------------------------------
-
- if(dm->do_svctm == CONFIG_BOOLEAN_YES || (dm->do_svctm == CONFIG_BOOLEAN_AUTO &&
- (dstat[i].operations[DEVSTAT_READ] || dstat[i].operations[DEVSTAT_WRITE]))) {
- if (unlikely(!dm->st_svctm)) {
- dm->st_svctm = rrdset_create_localhost("disk_svctm",
- disk,
- NULL,
- disk,
- "disk.svctm",
- "Average Service Time",
- "ms per operation",
- 2007,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(dm->st_svctm, RRDSET_FLAG_DETAIL);
-
- dm->rd_svctm = rrddim_add(dm->st_svctm, "svctm", NULL, 1, 1,
- RRD_ALGORITHM_ABSOLUTE);
- } else
- rrdset_next(dm->st_svctm);
-
- rrddim_set_by_pointer(dm->st_svctm, dm->rd_svctm,
- ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) +
- (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write)) ?
- (cur_dstat.busy_time_ms - dm->prev_dstat.busy_time_ms) /
- ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) +
- (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write)) :
- 0);
- rrdset_done(dm->st_svctm);
- }
-
- // --------------------------------------------------------------------
-
- dm->prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ];
- dm->prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE];
- dm->prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ];
- dm->prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE];
- dm->prev_dstat.duration_read_ms = cur_dstat.duration_read_ms;
- dm->prev_dstat.duration_write_ms = cur_dstat.duration_write_ms;
- dm->prev_dstat.busy_time_ms = cur_dstat.busy_time_ms;
- }
- }
- }
-
- // --------------------------------------------------------------------
-
- if (likely(do_system_io)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost("system",
- "io",
- NULL,
- "disk",
- NULL,
- "Disk I/O",
- "kilobytes/s",
- 150,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- } else
- rrdset_next(st);
-
- rrddim_set_by_pointer(st, rd_in, total_disk_kbytes_read);
- rrddim_set_by_pointer(st, rd_out, total_disk_kbytes_write);
- rrdset_done(st);
- }
- }
- }
- if (unlikely(common_error)) {
- do_system_io = 0;
- error("DISABLED: system.io chart");
- do_io = 0;
- error("DISABLED: disk.* charts");
- do_ops = 0;
- error("DISABLED: disk_ops.* charts");
- do_qops = 0;
- error("DISABLED: disk_qops.* charts");
- do_util = 0;
- error("DISABLED: disk_util.* charts");
- do_iotime = 0;
- error("DISABLED: disk_iotime.* charts");
- do_await = 0;
- error("DISABLED: disk_await.* charts");
- do_avagsz = 0;
- error("DISABLED: disk_avgsz.* charts");
- do_svctm = 0;
- error("DISABLED: disk_svctm.* charts");
- error("DISABLED: kern.devstat module");
- return 1;
- }
- } else {
- error("DISABLED: kern.devstat module");
- return 1;
- }
-
- return 0;
-}
diff --git a/src/freeipmi_plugin.c b/src/freeipmi_plugin.c
index 4459de7c..146268a5 100644
--- a/src/freeipmi_plugin.c
+++ b/src/freeipmi_plugin.c
@@ -242,7 +242,7 @@ _get_sensor_type_string (int sensor_type)
static int debug = 0;
-static int netdata_update_every = 5;
+static int netdata_update_every = 5; // this is the minimum update frequency
static int netdata_priority = 90000;
static int netdata_do_sel = 1;
@@ -1403,7 +1403,7 @@ int ipmi_detect_speed_secs(struct ipmi_monitoring_ipmi_config *ipmi_config) {
// we find the average in microseconds
// and we round-up to the closest second
- return (( total * 2 / checks / 1000000 ) + 1);
+ return (int)(( total * 2 / checks / 1000000 ) + 1);
}
int main (int argc, char **argv) {
@@ -1426,15 +1426,14 @@ int main (int argc, char **argv) {
int i, freq = 0;
for(i = 1; i < argc ; i++) {
- if(!freq) {
+ if(isdigit(*argv[i]) && !freq) {
int n = atoi(argv[i]);
- if(n > 0) {
+ if(n > 0 && freq < 86400) {
freq = n;
continue;
}
}
-
- if(strcmp("version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
+ else if(strcmp("version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
printf("freeipmi.plugin %s\n", VERSION);
exit(0);
}
@@ -1568,7 +1567,7 @@ int main (int argc, char **argv) {
freq = ipmi_detect_speed_secs(&ipmi_config);
if(debug) fprintf(stderr, "freeipmi.plugin: IPMI minimum update frequency was calculated to %d seconds.\n", freq);
- if(netdata_update_every < freq) {
+ if(freq > netdata_update_every) {
info("enforcing minimum data collection frequency, calculated to %d seconds.", freq);
netdata_update_every = freq;
}
diff --git a/src/health.c b/src/health.c
index 46b27db6..cc470f81 100644
--- a/src/health.c
+++ b/src/health.c
@@ -148,7 +148,7 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) {
snprintfz(command_to_run, ALARM_EXEC_COMMAND_LENGTH, "exec %s '%s' '%s' '%u' '%u' '%u' '%lu' '%s' '%s' '%s' '%s' '%s' '%0.0Lf' '%0.0Lf' '%s' '%u' '%u' '%s' '%s' '%s' '%s'",
exec,
recipient,
- host->hostname,
+ host->registry_hostname,
ae->unique_id,
ae->alarm_id,
ae->alarm_event_id,
@@ -356,9 +356,16 @@ void *health_main(void *ptr) {
// detect if boottime and realtime have twice the difference
// in which case we assume the system was just waken from hibernation
- if(unlikely(now - last_now > 2 * (now_boottime - last_now_boottime)))
+ if(unlikely(now - last_now > 2 * (now_boottime - last_now_boottime))) {
apply_hibernation_delay = 1;
+ info("Postponing alarm checks for %ld seconds, due to boottime discrepancy (realtime dt: %ld, boottime dt: %ld)."
+ , hibernation_delay
+ , (long)(now - last_now)
+ , (long)(now_boottime - last_now_boottime)
+ );
+ }
+
last_now = now;
last_now_boottime = now_boottime;
@@ -374,11 +381,9 @@ void *health_main(void *ptr) {
if(unlikely(apply_hibernation_delay)) {
- info("Postponing alarm checks for %ld seconds, on host '%s', due to boottime discrepancy (realtime dt: %ld, boottime dt: %ld)."
+ info("Postponing alarm checks for %ld seconds, on host '%s'."
, hibernation_delay
, host->hostname
- , (long)(now - last_now)
- , (long)(now_boottime - last_now_boottime)
);
host->health_delay_up_to = now + hibernation_delay;
diff --git a/src/health_config.c b/src/health_config.c
index ad954cbe..b4655dc7 100644
--- a/src/health_config.c
+++ b/src/health_config.c
@@ -32,8 +32,8 @@ static inline int rrdcalc_add_alarm_from_config(RRDHOST *host, RRDCALC *rc) {
return 0;
}
- if(!RRDCALC_HAS_DB_LOOKUP(rc) && !rc->warning && !rc->critical) {
- error("Health configuration for alarm '%s.%s' is useless (no calculation, no warning and no critical evaluation)", rc->chart?rc->chart:"NOCHART", rc->name);
+ if(!RRDCALC_HAS_DB_LOOKUP(rc) && !rc->calculation && !rc->warning && !rc->critical) {
+ error("Health configuration for alarm '%s.%s' is useless (no db lookup, no calculation, no warning and no critical expressions)", rc->chart?rc->chart:"NOCHART", rc->name);
return 0;
}
@@ -136,7 +136,7 @@ static inline int health_parse_duration(char *string, int *result) {
}
char *e = NULL;
- calculated_number n = strtold(string, &e);
+ calculated_number n = str2ld(string, &e);
if(e && *e) {
switch (*e) {
case 'Y':
@@ -241,10 +241,10 @@ static inline int health_parse_delay(
if(!given_max) {
if((*delay_max_duration) < (*delay_up_duration) * (*delay_multiplier))
- *delay_max_duration = (*delay_up_duration) * (*delay_multiplier);
+ *delay_max_duration = (int)((*delay_up_duration) * (*delay_multiplier));
if((*delay_max_duration) < (*delay_down_duration) * (*delay_multiplier))
- *delay_max_duration = (*delay_down_duration) * (*delay_multiplier);
+ *delay_max_duration = (int)((*delay_down_duration) * (*delay_multiplier));
}
return 1;
@@ -381,37 +381,6 @@ static inline int health_parse_db_lookup(
return 1;
}
-static inline char *trim_all_spaces(char *buffer) {
- char *d = buffer, *s = buffer;
-
- // skip spaces
- while(isspace(*s)) s++;
-
- while(*s) {
- // copy the non-space part
- while(*s && !isspace(*s)) *d++ = *s++;
-
- // add a space if we have to
- if(*s && isspace(*s)) {
- *d++ = ' ';
- s++;
- }
-
- // skip spaces
- while(isspace(*s)) s++;
- }
-
- *d = '\0';
-
- if(d > buffer) {
- d--;
- if(isspace(*d)) *d = '\0';
- }
-
- if(!buffer[0]) return NULL;
- return buffer;
-}
-
static inline char *health_source_file(size_t line, const char *path, const char *filename) {
char buffer[FILENAME_MAX + 1];
snprintfz(buffer, FILENAME_MAX, "%zu@%s/%s", line, path, filename);
@@ -485,7 +454,7 @@ int health_readfile(RRDHOST *host, const char *path, const char *filename) {
int stop_appending = !s;
line++;
s = trim(buffer);
- if(!s) continue;
+ if(!s || *s == '#') continue;
append = strlen(s);
if(!stop_appending && s[append - 1] == '\\') {
@@ -509,8 +478,8 @@ int health_readfile(RRDHOST *host, const char *path, const char *filename) {
s++;
char *value = s;
- key = trim_all_spaces(key);
- value = trim_all_spaces(value);
+ key = trim_all(key);
+ value = trim_all(value);
if(!key) {
error("Health configuration has invalid line %zu of file '%s/%s'. Keyword is empty. Ignoring it.", line, path, filename);
@@ -593,7 +562,7 @@ int health_readfile(RRDHOST *host, const char *path, const char *filename) {
}
else if(hash == hash_green && !strcasecmp(key, HEALTH_GREEN_KEY)) {
char *e;
- rc->green = strtold(value, &e);
+ rc->green = str2ld(value, &e);
if(e && *e) {
error("Health configuration at line %zu of file '%s/%s' for alarm '%s' at key '%s' leaves this string unmatched: '%s'.",
line, path, filename, rc->name, key, e);
@@ -601,7 +570,7 @@ int health_readfile(RRDHOST *host, const char *path, const char *filename) {
}
else if(hash == hash_red && !strcasecmp(key, HEALTH_RED_KEY)) {
char *e;
- rc->red = strtold(value, &e);
+ rc->red = str2ld(value, &e);
if(e && *e) {
error("Health configuration at line %zu of file '%s/%s' for alarm '%s' at key '%s' leaves this string unmatched: '%s'.",
line, path, filename, rc->name, key, e);
@@ -717,7 +686,7 @@ int health_readfile(RRDHOST *host, const char *path, const char *filename) {
}
else if(hash == hash_green && !strcasecmp(key, HEALTH_GREEN_KEY)) {
char *e;
- rt->green = strtold(value, &e);
+ rt->green = str2ld(value, &e);
if(e && *e) {
error("Health configuration at line %zu of file '%s/%s' for template '%s' at key '%s' leaves this string unmatched: '%s'.",
line, path, filename, rt->name, key, e);
@@ -725,7 +694,7 @@ int health_readfile(RRDHOST *host, const char *path, const char *filename) {
}
else if(hash == hash_red && !strcasecmp(key, HEALTH_RED_KEY)) {
char *e;
- rt->red = strtold(value, &e);
+ rt->red = str2ld(value, &e);
if(e && *e) {
error("Health configuration at line %zu of file '%s/%s' for template '%s' at key '%s' leaves this string unmatched: '%s'.",
line, path, filename, rt->name, key, e);
diff --git a/src/inlined.h b/src/inlined.h
index 0dc11c95..f1812ba1 100644
--- a/src/inlined.h
+++ b/src/inlined.h
@@ -123,6 +123,103 @@ static inline unsigned long long str2ull(const char *s) {
return n;
}
+static inline long long str2ll(const char *s, char **endptr) {
+ int negative = 0;
+
+ if(unlikely(*s == '-')) {
+ s++;
+ negative = 1;
+ }
+ else if(unlikely(*s == '+'))
+ s++;
+
+ long long n = 0;
+ char c;
+ for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) {
+ n *= 10;
+ n += c - '0';
+ }
+
+ if(unlikely(endptr))
+ *endptr = (char *)s;
+
+ if(unlikely(negative))
+ return -n;
+ else
+ return n;
+}
+
+static inline long double str2ld(const char *s, char **endptr) {
+ int negative = 0;
+ const char *start = s;
+ unsigned long long integer_part = 0;
+ unsigned long decimal_part = 0;
+ size_t decimal_digits = 0;
+
+ switch(*s) {
+ case '-':
+ s++;
+ negative = 1;
+ break;
+
+ case '+':
+ s++;
+ break;
+
+ case 'n':
+ if(s[1] == 'a' && s[2] == 'n') {
+ if(endptr) *endptr = (char *)&s[3];
+ return NAN;
+ }
+ break;
+
+ case 'i':
+ if(s[1] == 'n' && s[2] == 'f') {
+ if(endptr) *endptr = (char *)&s[3];
+ return INFINITY;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ while (*s >= '0' && *s <= '9') {
+ integer_part = (integer_part * 10) + (*s - '0');
+ s++;
+ }
+
+ if(unlikely(*s == '.')) {
+ decimal_part = 0;
+ s++;
+
+ while (*s >= '0' && *s <= '9') {
+ decimal_part = (decimal_part * 10) + (*s - '0');
+ s++;
+ decimal_digits++;
+ }
+ }
+
+ if(unlikely(*s == 'e' || *s == 'E'))
+ return strtold(start, endptr);
+
+ if(unlikely(endptr))
+ *endptr = (char *)s;
+
+ if(unlikely(negative)) {
+ if(unlikely(decimal_digits))
+ return -((long double)integer_part + (long double)decimal_part / powl(10.0, decimal_digits));
+ else
+ return -((long double)integer_part);
+ }
+ else {
+ if(unlikely(decimal_digits))
+ return (long double)integer_part + (long double)decimal_part / powl(10.0, decimal_digits);
+ else
+ return (long double)integer_part;
+ }
+}
+
#ifdef NETDATA_STRCMP_OVERRIDE
#ifdef strcmp
#undef strcmp
diff --git a/src/log.c b/src/log.c
index 855ecaee..b3dfc73d 100644
--- a/src/log.c
+++ b/src/log.c
@@ -23,6 +23,37 @@ void syslog_init(void) {
}
}
+#define LOG_DATE_LENGTH 26
+
+static inline void log_date(char *buffer, size_t len) {
+ if(unlikely(!buffer || !len))
+ return;
+
+ time_t t;
+ struct tm *tmp, tmbuf;
+
+ t = now_realtime_sec();
+ tmp = localtime_r(&t, &tmbuf);
+
+ if (tmp == NULL) {
+ buffer[0] = '\0';
+ return;
+ }
+
+ if (unlikely(strftime(buffer, len, "%Y-%m-%d %H:%M:%S", tmp) == 0))
+ buffer[0] = '\0';
+
+ buffer[len - 1] = '\0';
+}
+
+static netdata_mutex_t log_mutex = NETDATA_MUTEX_INITIALIZER;
+static inline void log_lock() {
+ netdata_mutex_lock(&log_mutex);
+}
+static inline void log_unlock() {
+ netdata_mutex_unlock(&log_mutex);
+}
+
int open_log_file(int fd, FILE **fp, const char *filename, int *enabled_syslog) {
int f;
@@ -136,8 +167,10 @@ int error_log_limit(int reset) {
if(reset) {
if(prevented) {
- log_date(stderr);
- fprintf(stderr, "%s: Resetting logging for process '%s' (prevented %lu logs in the last %ld seconds).\n"
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+ fprintf(stderr, "%s: %s Resetting logging for process '%s' (prevented %lu logs in the last %ld seconds).\n"
+ , date
, program_name
, program_name
, prevented
@@ -155,8 +188,10 @@ int error_log_limit(int reset) {
if(now - start > error_log_throttle_period) {
if(prevented) {
- log_date(stderr);
- fprintf(stderr, "%s: Resuming logging from process '%s' (prevented %lu logs in the last %ld seconds).\n"
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+ fprintf(stderr, "%s: %s Resuming logging from process '%s' (prevented %lu logs in the last %ld seconds).\n"
+ , date
, program_name
, program_name
, prevented
@@ -175,8 +210,10 @@ int error_log_limit(int reset) {
if(counter > error_log_errors_per_period) {
if(!prevented) {
- log_date(stderr);
- fprintf(stderr, "%s: Too many logs (%lu logs in %ld seconds, threshold is set to %lu logs in %ld seconds). Preventing more logs from process '%s' for %ld seconds.\n"
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+ fprintf(stderr, "%s: %s Too many logs (%lu logs in %ld seconds, threshold is set to %lu logs in %ld seconds). Preventing more logs from process '%s' for %ld seconds.\n"
+ , date
, program_name
, counter
, now - start
@@ -200,37 +237,16 @@ int error_log_limit(int reset) {
}
// ----------------------------------------------------------------------------
-// print the date
-
-// FIXME
-// this should print the date in a buffer the way it
-// is now, logs from multiple threads may be multiplexed
-
-void log_date(FILE *out)
-{
- char outstr[26];
- time_t t;
- struct tm *tmp, tmbuf;
-
- t = now_realtime_sec();
- tmp = localtime_r(&t, &tmbuf);
-
- if (tmp == NULL) return;
- if (unlikely(strftime(outstr, sizeof(outstr), "%Y-%m-%d %H:%M:%S", tmp) == 0)) return;
-
- fprintf(out, "%s: ", outstr);
-}
-
-// ----------------------------------------------------------------------------
// debug log
-void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... )
-{
+void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
va_list args;
- log_date(stdout);
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+
va_start( args, fmt );
- printf("%s: DEBUG (%04lu@%-10.10s:%-15.15s): ", program_name, line, file, function);
+ printf("%s: %s DEBUG (%04lu@%-10.10s:%-15.15s): ", date, program_name, line, file, function);
vprintf(fmt, args);
va_end( args );
putchar('\n');
@@ -254,21 +270,26 @@ void info_int( const char *file, const char *function, const unsigned long line,
// prevent logging too much
if(error_log_limit(0)) return;
- log_date(stderr);
+ if(error_log_syslog) {
+ va_start( args, fmt );
+ vsyslog(LOG_INFO, fmt, args );
+ va_end( args );
+ }
+
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+
+ log_lock();
va_start( args, fmt );
- if(debug_flags) fprintf(stderr, "%s: INFO : (%04lu@%-10.10s:%-15.15s): ", program_name, line, file, function);
- else fprintf(stderr, "%s: INFO : ", program_name);
+ if(debug_flags) fprintf(stderr, "%s: %s INFO : (%04lu@%-10.10s:%-15.15s): ", date, program_name, line, file, function);
+ else fprintf(stderr, "%s: %s INFO : ", date, program_name);
vfprintf( stderr, fmt, args );
va_end( args );
fputc('\n', stderr);
- if(error_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_INFO, fmt, args );
- va_end( args );
- }
+ log_unlock();
}
// ----------------------------------------------------------------------------
@@ -296,56 +317,67 @@ static const char *strerror_result_string(const char *a, const char *b) { (void)
#error "cannot detect the format of function strerror_r()"
#endif
-void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... )
-{
+void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
+ // save a copy of errno - just in case this function generates a new error
+ int __errno = errno;
+
va_list args;
// prevent logging too much
if(error_log_limit(0)) return;
- log_date(stderr);
+ if(error_log_syslog) {
+ va_start( args, fmt );
+ vsyslog(LOG_ERR, fmt, args );
+ va_end( args );
+ }
+
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+
+ log_lock();
va_start( args, fmt );
- if(debug_flags) fprintf(stderr, "%s: %s: (%04lu@%-10.10s:%-15.15s): ", program_name, prefix, line, file, function);
- else fprintf(stderr, "%s: %s: ", program_name, prefix);
+ if(debug_flags) fprintf(stderr, "%s: %s %s: (%04lu@%-10.10s:%-15.15s): ", date, program_name, prefix, line, file, function);
+ else fprintf(stderr, "%s: %s %s: ", date, program_name, prefix);
vfprintf( stderr, fmt, args );
va_end( args );
- if(errno) {
+ if(__errno) {
char buf[1024];
- fprintf(stderr, " (errno %d, %s)\n", errno, strerror_result(strerror_r(errno, buf, 1023), buf));
+ fprintf(stderr, " (errno %d, %s)\n", __errno, strerror_result(strerror_r(__errno, buf, 1023), buf));
errno = 0;
}
else
fputc('\n', stderr);
+ log_unlock();
+}
+
+void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
+ va_list args;
+
if(error_log_syslog) {
va_start( args, fmt );
- vsyslog(LOG_ERR, fmt, args );
+ vsyslog(LOG_CRIT, fmt, args );
va_end( args );
}
-}
-void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... )
-{
- va_list args;
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
- log_date(stderr);
+ log_lock();
va_start( args, fmt );
- if(debug_flags) fprintf(stderr, "%s: FATAL: (%04lu@%-10.10s:%-15.15s): ", program_name, line, file, function);
- else fprintf(stderr, "%s: FATAL: ", program_name);
+ if(debug_flags) fprintf(stderr, "%s: %s FATAL: (%04lu@%-10.10s:%-15.15s): ", date, program_name, line, file, function);
+ else fprintf(stderr, "%s: %s FATAL: ", date, program_name);
vfprintf( stderr, fmt, args );
va_end( args );
perror(" # ");
fputc('\n', stderr);
- if(error_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_CRIT, fmt, args );
- va_end( args );
- }
+ log_unlock();
netdata_cleanup_and_exit(1);
}
@@ -353,23 +385,29 @@ void fatal_int( const char *file, const char *function, const unsigned long line
// ----------------------------------------------------------------------------
// access log
-void log_access( const char *fmt, ... )
-{
+void log_access( const char *fmt, ... ) {
va_list args;
+ if(access_log_syslog) {
+ va_start( args, fmt );
+ vsyslog(LOG_INFO, fmt, args );
+ va_end( args );
+ }
+
if(stdaccess) {
- log_date(stdaccess);
+ static netdata_mutex_t access_mutex = NETDATA_MUTEX_INITIALIZER;
+
+ netdata_mutex_lock(&access_mutex);
+
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+ fprintf(stdaccess, "%s: ", date);
va_start( args, fmt );
vfprintf( stdaccess, fmt, args );
va_end( args );
fputc('\n', stdaccess);
- }
- if(access_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_INFO, fmt, args );
- va_end( args );
+ netdata_mutex_unlock(&access_mutex);
}
}
-
diff --git a/src/log.h b/src/log.h
index d8ff0654..c0414df8 100644
--- a/src/log.h
+++ b/src/log.h
@@ -28,6 +28,10 @@
#define D_CONNECT_TO 0x0000000001000000
#define D_RRDHOST 0x0000000002000000
#define D_LOCKS 0x0000000004000000
+#define D_BACKEND 0x0000000008000000
+#define D_STATSD 0x0000000010000000
+#define D_POLLFD 0x0000000020000000
+#define D_STREAM 0x0000000040000000
#define D_SYSTEM 0x8000000000000000
//#define DEBUG (D_WEB_CLIENT_ACCESS|D_LISTENER|D_RRD_STATS)
@@ -56,16 +60,22 @@ extern int error_log_limit(int reset);
extern void open_all_log_files();
extern void reopen_all_log_files();
+static inline void debug_dummy(void) {}
+
#define error_log_limit_reset() do { error_log_throttle_period = error_log_throttle_period_backup; error_log_limit(1); } while(0)
#define error_log_limit_unlimited() do { error_log_throttle_period = 0; } while(0)
+#ifdef NETDATA_INTERNAL_CHECKS
#define debug(type, args...) do { if(unlikely(debug_flags & type)) debug_int(__FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
+#else
+#define debug(type, args...) debug_dummy()
+#endif
+
#define info(args...) info_int(__FILE__, __FUNCTION__, __LINE__, ##args)
#define infoerr(args...) error_int("INFO", __FILE__, __FUNCTION__, __LINE__, ##args)
#define error(args...) error_int("ERROR", __FILE__, __FUNCTION__, __LINE__, ##args)
#define fatal(args...) fatal_int(__FILE__, __FUNCTION__, __LINE__, ##args)
-extern void log_date(FILE *out);
extern void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5);
extern void info_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5);
extern void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(5, 6);
diff --git a/src/main.c b/src/main.c
index a72585e2..bf5d787a 100644
--- a/src/main.c
+++ b/src/main.c
@@ -9,8 +9,8 @@ void netdata_cleanup_and_exit(int ret) {
debug(D_EXIT, "Called: netdata_cleanup_and_exit()");
- // save the database
- rrdhost_save_all();
+ // cleanup the database
+ rrdhost_cleanup_all();
// unlink the pid
if(pidfile[0]) {
@@ -56,6 +56,7 @@ struct netdata_static_thread static_threads[] = {
{"web", NULL, NULL, 1, NULL, NULL, socket_listen_main_multi_threaded},
{"web-single-threaded", NULL, NULL, 0, NULL, NULL, socket_listen_main_single_threaded},
{"push-metrics", NULL, NULL, 0, NULL, NULL, rrdpush_sender_thread},
+ {"statsd", NULL, NULL, 1, NULL, NULL, statsd_main},
{NULL, NULL, NULL, 0, NULL, NULL, NULL}
};
@@ -66,14 +67,16 @@ void web_server_threading_selection(void) {
int single_threaded = (web_server_mode == WEB_SERVER_MODE_SINGLE_THREADED);
int i;
- for(i = 0; static_threads[i].name ; i++) {
- if(static_threads[i].start_routine == socket_listen_main_multi_threaded)
+ for (i = 0; static_threads[i].name; i++) {
+ if (static_threads[i].start_routine == socket_listen_main_multi_threaded)
static_threads[i].enabled = multi_threaded;
- if(static_threads[i].start_routine == socket_listen_main_single_threaded)
+ if (static_threads[i].start_routine == socket_listen_main_single_threaded)
static_threads[i].enabled = single_threaded;
}
+}
+void web_server_config_options(void) {
web_client_timeout = (int) config_get_number(CONFIG_SECTION_WEB, "disconnect idle clients after seconds", DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS);
respect_web_browser_do_not_track_policy = config_get_boolean(CONFIG_SECTION_WEB, "respect do not track policy", respect_web_browser_do_not_track_policy);
@@ -299,6 +302,8 @@ void help(int exitcode) {
" -W stacksize=N Set the stacksize (in bytes).\n\n"
" -W debug_flags=N Set runtime tracing to debug.log.\n\n"
" -W unittest Run internal unittests and exit.\n\n"
+ " -W set section option value\n"
+ " set netdata.conf option from the command line.\n\n"
" -W simple-pattern pattern string\n"
" Check if string matches pattern and exit.\n\n"
);
@@ -405,6 +410,9 @@ static void backwards_compatible_config() {
config_move(CONFIG_SECTION_GLOBAL, "web files group",
CONFIG_SECTION_WEB, "web files group");
+
+ config_move(CONFIG_SECTION_BACKEND, "opentsdb host tags",
+ CONFIG_SECTION_BACKEND, "host tags");
}
static void get_netdata_configured_variables() {
@@ -420,8 +428,6 @@ static void get_netdata_configured_variables() {
netdata_configured_hostname = config_get(CONFIG_SECTION_GLOBAL, "hostname", buf);
debug(D_OPTIONS, "hostname set to '%s'", netdata_configured_hostname);
- netdata_configured_hostname = config_get(CONFIG_SECTION_GLOBAL, "hostname", CONFIG_DIR);
-
// ------------------------------------------------------------------------
// get default database size
@@ -502,7 +508,9 @@ void set_global_environment() {
// avoid flood calls to stat(/etc/localtime)
// http://stackoverflow.com/questions/4554271/how-to-avoid-excessive-stat-etc-localtime-calls-in-strftime-on-linux
- setenv("TZ", ":/etc/localtime", 0);
+ const char *tz = getenv("TZ");
+ if(!tz || !*tz)
+ setenv("TZ", config_get(CONFIG_SECTION_GLOBAL, "TZ environment variable", ":/etc/localtime"), 0);
// set the path we need
char path[1024 + 1], *p = getenv("PATH");
@@ -625,10 +633,12 @@ int main(int argc, char **argv) {
{
char* stacksize_string = "stacksize=";
char* debug_flags_string = "debug_flags=";
+
if(strcmp(optarg, "unittest") == 0) {
- default_rrd_update_every = 1;
- default_rrd_memory_mode = RRD_MEMORY_MODE_RAM;
- if(!config_loaded) config_load(NULL, 0);
+ if(unit_test_str2ld()) exit(1);
+ //default_rrd_update_every = 1;
+ //default_rrd_memory_mode = RRD_MEMORY_MODE_RAM;
+ //if(!config_loaded) config_load(NULL, 0);
get_netdata_configured_variables();
default_rrd_update_every = 1;
default_rrd_memory_mode = RRD_MEMORY_MODE_RAM;
@@ -691,9 +701,71 @@ int main(int argc, char **argv) {
config_set(CONFIG_SECTION_GLOBAL, "debug flags", optarg);
debug_flags = strtoull(optarg, NULL, 0);
}
+ else if(strcmp(optarg, "set") == 0) {
+ if(optind + 3 > argc) {
+ fprintf(stderr, "%s", "\nUSAGE: -W set 'section' 'key' 'value'\n\n"
+ " Overwrites settings of netdata.conf.\n"
+ "\n"
+ " These options interact with: -c netdata.conf\n"
+ " If -c netdata.conf is given on the command line,\n"
+ " before -W set... the user may overwrite command\n"
+ " line parameters at netdata.conf\n"
+ " If -c netdata.conf is given after (or missing)\n"
+ " -W set... the user cannot overwrite the command line\n"
+ " parameters."
+ "\n"
+ );
+ exit(1);
+ }
+ const char *section = argv[optind];
+ const char *key = argv[optind + 1];
+ const char *value = argv[optind + 2];
+ optind += 3;
+
+ // set this one as the default
+ // only if it is not already set in the config file
+ // so the caller can use -c netdata.conf before or
+ // after this parameter to prevent or allow overwriting
+ // variables at netdata.conf
+ config_set_default(section, key, value);
+
+ // fprintf(stderr, "SET section '%s', key '%s', value '%s'\n", section, key, value);
+ }
+ else if(strcmp(optarg, "get") == 0) {
+ if(optind + 3 > argc) {
+ fprintf(stderr, "%s", "\nUSAGE: -W get 'section' 'key' 'value'\n\n"
+ " Prints settings of netdata.conf.\n"
+ "\n"
+ " These options interact with: -c netdata.conf\n"
+ " -c netdata.conf has to be given before -W get.\n"
+ "\n"
+ );
+ exit(1);
+ }
+
+ if(!config_loaded) {
+ fprintf(stderr, "warning: no configuration file has been loaded. Use -c CONFIG_FILE, before -W get. Using default config.\n");
+ config_load(NULL, 0);
+ }
+
+ backwards_compatible_config();
+ get_netdata_configured_variables();
+
+ const char *section = argv[optind];
+ const char *key = argv[optind + 1];
+ const char *def = argv[optind + 2];
+ const char *value = config_get(section, key, def);
+ printf("%s\n", value);
+ exit(0);
+ }
+ else {
+ fprintf(stderr, "Unknown -W parameter '%s'\n", optarg);
+ help(1);
+ }
}
break;
default: /* ? */
+ fprintf(stderr, "Unknown parameter '%c'\n", opt);
help(1);
break;
}
@@ -875,8 +947,10 @@ int main(int argc, char **argv) {
// --------------------------------------------------------------------
// create the listening sockets
+ web_server_threading_selection();
+
if(web_server_mode != WEB_SERVER_MODE_NONE)
- create_listen_sockets();
+ api_listen_sockets_setup();
}
// initialize the log files
@@ -928,7 +1002,7 @@ int main(int argc, char **argv) {
// ------------------------------------------------------------------------
// spawn the threads
- web_server_threading_selection();
+ web_server_config_options();
for (i = 0; static_threads[i].name != NULL ; i++) {
struct netdata_static_thread *st = &static_threads[i];
diff --git a/src/plugin_freebsd.c b/src/plugin_freebsd.c
index 31ab6e0c..020fdb41 100644
--- a/src/plugin_freebsd.c
+++ b/src/plugin_freebsd.c
@@ -53,6 +53,12 @@ static struct freebsd_module {
// network interfaces metrics
{ .name = "getifaddrs", .dim = "getifaddrs", .enabled = 1, .func = do_getifaddrs },
+ // ZFS metrics
+ { .name = "kstat.zfs.misc.arcstats", .dim = "arcstats", .enabled = 1, .func = do_kstat_zfs_misc_arcstats },
+
+ // ipfw metrics
+ { .name = "ipfw", .dim = "ipfw", .enabled = 1, .func = do_ipfw },
+
// the terminator of this array
{ .name = NULL, .dim = NULL, .enabled = 0, .func = NULL }
};
diff --git a/src/plugin_freebsd.h b/src/plugin_freebsd.h
index 166c6433..541bf852 100644
--- a/src/plugin_freebsd.h
+++ b/src/plugin_freebsd.h
@@ -3,6 +3,12 @@
#include <sys/sysctl.h>
+#define KILO_FACTOR 1024
+#define MEGA_FACTOR 1048576 // 1024 * 1024
+#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024
+
+#define MAX_INT_DIGITS 10 // maximum number of digits for int
+
void *freebsd_main(void *ptr);
extern int freebsd_plugin_init();
@@ -35,6 +41,8 @@ extern int do_net_inet6_icmp6_stats(int update_every, usec_t dt);
extern int do_getifaddrs(int update_every, usec_t dt);
extern int do_getmntinfo(int update_every, usec_t dt);
extern int do_kern_devstat(int update_every, usec_t dt);
+extern int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt);
+extern int do_ipfw(int update_every, usec_t dt);
#define GETSYSCTL_MIB(name, mib) getsysctl_mib(name, mib, sizeof(mib)/sizeof(int))
diff --git a/src/plugin_idlejitter.c b/src/plugin_idlejitter.c
index 2ed78160..89f49023 100644
--- a/src/plugin_idlejitter.c
+++ b/src/plugin_idlejitter.c
@@ -13,41 +13,71 @@ void *cpuidlejitter_main(void *ptr) {
if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
error("Cannot set pthread cancel state to ENABLE.");
- int sleep_ms = (int) config_get_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS);
- if(sleep_ms <= 0) {
+ usec_t sleep_ut = config_get_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS;
+ if(sleep_ut <= 0) {
config_set_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS);
- sleep_ms = CPU_IDLEJITTER_SLEEP_TIME_MS;
- }
-
- RRDSET *st = rrdset_find_localhost("system.idlejitter");
- if(!st) {
- st = rrdset_create_localhost("system", "idlejitter", NULL, "processes", NULL, "CPU Idle Jitter"
- , "microseconds lost/s", 9999, localhost->rrd_update_every, RRDSET_TYPE_LINE);
- rrddim_add(st, "jitter", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ sleep_ut = CPU_IDLEJITTER_SLEEP_TIME_MS * USEC_PER_MS;
}
+ RRDSET *st = rrdset_create_localhost(
+ "system"
+ , "idlejitter"
+ , NULL
+ , "processes"
+ , NULL
+ , "CPU Idle Jitter"
+ , "microseconds lost/s"
+ , 9999
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_AREA
+ );
+ RRDDIM *rd_min = rrddim_add(st, "min", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_max = rrddim_add(st, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_avg = rrddim_add(st, "average", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ usec_t update_every_ut = localhost->rrd_update_every * USEC_PER_SEC;
struct timeval before, after;
unsigned long long counter;
for(counter = 0; 1 ;counter++) {
- usec_t usec = 0, susec = 0;
+ int iterations = 0;
+ usec_t error_total = 0,
+ error_min = 0,
+ error_max = 0,
+ elapsed = 0;
if(netdata_exit) break;
- while(susec < (localhost->rrd_update_every * USEC_PER_SEC)) {
-
+ while(elapsed < update_every_ut) {
now_monotonic_timeval(&before);
- sleep_usec(sleep_ms * 1000);
+ sleep_usec(sleep_ut);
now_monotonic_timeval(&after);
- // calculate the time it took for a full loop
- usec = dt_usec(&after, &before);
- susec += usec;
+ usec_t dt = dt_usec(&after, &before);
+ elapsed += dt;
+
+ usec_t error = dt - sleep_ut;
+ error_total += error;
+
+ if(unlikely(!iterations))
+ error_min = error;
+ else if(error < error_min)
+ error_min = error;
+
+ if(error > error_max)
+ error_max = error;
+
+ iterations++;
}
- usec -= (sleep_ms * 1000);
- if(counter) rrdset_next(st);
- rrddim_set(st, "jitter", usec);
- rrdset_done(st);
+ if(netdata_exit) break;
+
+ if(iterations) {
+ if (likely(counter)) rrdset_next(st);
+ rrddim_set_by_pointer(st, rd_min, error_min);
+ rrddim_set_by_pointer(st, rd_max, error_max);
+ rrddim_set_by_pointer(st, rd_avg, error_total / iterations);
+ rrdset_done(st);
+ }
}
info("IDLEJITTER thread exiting");
diff --git a/src/plugin_proc.c b/src/plugin_proc.c
index 2ca77491..e64f5739 100644
--- a/src/plugin_proc.c
+++ b/src/plugin_proc.c
@@ -32,7 +32,7 @@ static struct proc_module {
// network metrics
{ .name = "/proc/net/dev", .dim = "netdev", .func = do_proc_net_dev },
- { .name = "/proc/net/netstat", .dim = "netstat", .func = do_proc_net_netstat },
+ { .name = "/proc/net/netstat", .dim = "netstat", .func = do_proc_net_netstat }, // this has to be before /proc/net/snmp, because there is a shared metric
{ .name = "/proc/net/snmp", .dim = "snmp", .func = do_proc_net_snmp },
{ .name = "/proc/net/snmp6", .dim = "snmp6", .func = do_proc_net_snmp6 },
{ .name = "/proc/net/softnet_stat", .dim = "softnet", .func = do_proc_net_softnet_stat },
@@ -49,6 +49,9 @@ static struct proc_module {
{ .name = "/proc/net/rpc/nfsd", .dim = "nfsd", .func = do_proc_net_rpc_nfsd },
{ .name = "/proc/net/rpc/nfs", .dim = "nfs", .func = do_proc_net_rpc_nfs },
+ // ZFS metrics
+ { .name = "/proc/spl/kstat/zfs/arcstats", .dim = "zfs_arcstats", .func = do_proc_spl_kstat_zfs_arcstats },
+
// IPC metrics
{ .name = "ipc", .dim = "ipc", .func = do_ipc },
diff --git a/src/plugin_proc.h b/src/plugin_proc.h
index 5dee7853..688b23de 100644
--- a/src/plugin_proc.h
+++ b/src/plugin_proc.h
@@ -25,7 +25,11 @@ extern int do_proc_net_softnet_stat(int update_every, usec_t dt);
extern int do_proc_uptime(int update_every, usec_t dt);
extern int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt);
extern int do_proc_sys_devices_system_node(int update_every, usec_t dt);
+extern int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt);
extern int get_numa_node_count(void);
+// metrics that need to be shared among data collectors
+extern unsigned long long tcpext_TCPSynRetrans;
+
#endif /* NETDATA_PLUGIN_PROC_H */
diff --git a/src/plugin_proc_diskspace.c b/src/plugin_proc_diskspace.c
index 37133e04..750086a2 100644
--- a/src/plugin_proc_diskspace.c
+++ b/src/plugin_proc_diskspace.c
@@ -45,7 +45,7 @@ struct mount_point_metadata {
static DICTIONARY *dict_mountpoints = NULL;
-#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE); st = NULL; } } while(st)
+#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); st = NULL; } } while(st)
int mount_point_cleanup(void *entry, void *data) {
(void)data;
@@ -125,6 +125,33 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) {
def_inodes = CONFIG_BOOLEAN_NO;
}
+ // check if the mount point is a directory #2407
+ {
+ struct stat bs;
+ if(stat(mi->mount_point, &bs) == -1) {
+ error("DISKSPACE: Cannot stat() mount point '%s' (disk '%s', filesystem '%s', root '%s')."
+ , mi->mount_point
+ , disk
+ , mi->filesystem?mi->filesystem:""
+ , mi->root?mi->root:""
+ );
+ def_space = CONFIG_BOOLEAN_NO;
+ def_inodes = CONFIG_BOOLEAN_NO;
+ }
+ else {
+ if((bs.st_mode & S_IFMT) != S_IFDIR) {
+ error("DISKSPACE: Mount point '%s' (disk '%s', filesystem '%s', root '%s') is not a directory."
+ , mi->mount_point
+ , disk
+ , mi->filesystem?mi->filesystem:""
+ , mi->root?mi->root:""
+ );
+ def_space = CONFIG_BOOLEAN_NO;
+ def_inodes = CONFIG_BOOLEAN_NO;
+ }
+ }
+ }
+
do_space = config_get_boolean_ondemand(var_name, "space usage", def_space);
do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", def_inodes);
@@ -161,7 +188,7 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) {
struct statvfs buff_statvfs;
if (statvfs(mi->mount_point, &buff_statvfs) < 0) {
if(!m->shown_error) {
- error("Failed statvfs() for '%s' (disk '%s', filesystem '%s', root '%s')"
+ error("DISKSPACE: failed to statvfs() mount point '%s' (disk '%s', filesystem '%s', root '%s')"
, mi->mount_point
, disk
, mi->filesystem?mi->filesystem:""
@@ -188,7 +215,7 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) {
#ifdef NETDATA_INTERNAL_CHECKS
if(unlikely(btotal != bavail + breserved_root + bused))
- error("Disk block statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)btotal, (unsigned long long)bavail, (unsigned long long)breserved_root, (unsigned long long)bused);
+ error("DISKSPACE: disk block statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)btotal, (unsigned long long)bavail, (unsigned long long)breserved_root, (unsigned long long)bused);
#endif
// --------------------------------------------------------------------------
@@ -201,7 +228,7 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) {
#ifdef NETDATA_INTERNAL_CHECKS
if(unlikely(btotal != bavail + breserved_root + bused))
- error("Disk inode statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)ftotal, (unsigned long long)favail, (unsigned long long)freserved_root, (unsigned long long)fused);
+ error("DISKSPACE: disk inode statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)ftotal, (unsigned long long)favail, (unsigned long long)freserved_root, (unsigned long long)fused);
#endif
// --------------------------------------------------------------------------
@@ -294,10 +321,10 @@ void *proc_diskspace_main(void *ptr) {
info("DISKSPACE thread created with task id %d", gettid());
if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0)
- error("Cannot set pthread cancel type to DEFERRED.");
+ error("DISKSPACE: Cannot set pthread cancel type to DEFERRED.");
if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
- error("Cannot set pthread cancel state to ENABLE.");
+ error("DISKSPACE: Cannot set pthread cancel state to ENABLE.");
int vdo_cpu_netdata = config_get_boolean("plugin:proc", "netdata server resources", 1);
@@ -334,7 +361,7 @@ void *proc_diskspace_main(void *ptr) {
struct mountinfo *mi;
for(mi = disk_mountinfo_root; mi; mi = mi->next) {
- if(unlikely(mi->flags & (MOUNTINFO_IS_DUMMY | MOUNTINFO_IS_BIND | MOUNTINFO_IS_SAME_DEV | MOUNTINFO_NO_STAT | MOUNTINFO_NO_SIZE)))
+ if(unlikely(mi->flags & (MOUNTINFO_IS_DUMMY | MOUNTINFO_IS_BIND)))
continue;
do_disk_space_stats(mi, update_every);
diff --git a/src/plugin_tc.c b/src/plugin_tc.c
index 7dcfedb3..6bf5782a 100644
--- a/src/plugin_tc.c
+++ b/src/plugin_tc.c
@@ -713,7 +713,7 @@ static inline void tc_device_free_all()
tc_device_free(tc_device_root);
}
-#define MAX_WORDS 20
+#define PLUGINSD_MAX_WORDS 20
static inline int tc_space(char c) {
switch(c) {
@@ -779,7 +779,7 @@ void *tc_main(void *ptr) {
RRDSET *stcpu = NULL, *sttime = NULL;
char buffer[TC_LINE_MAX+1] = "";
- char *words[MAX_WORDS] = { NULL };
+ char *words[PLUGINSD_MAX_WORDS] = { NULL };
uint32_t BEGIN_HASH = simple_hash("BEGIN");
uint32_t END_HASH = simple_hash("END");
@@ -822,7 +822,7 @@ void *tc_main(void *ptr) {
buffer[TC_LINE_MAX] = '\0';
// debug(D_TC_LOOP, "TC: read '%s'", buffer);
- tc_split_words(buffer, words, MAX_WORDS);
+ tc_split_words(buffer, words, PLUGINSD_MAX_WORDS);
if(unlikely(!words[0] || !*words[0])) {
// debug(D_TC_LOOP, "empty line");
diff --git a/src/plugins_d.c b/src/plugins_d.c
index 7fa19eaf..9eb10277 100644
--- a/src/plugins_d.c
+++ b/src/plugins_d.c
@@ -2,8 +2,6 @@
struct plugind *pluginsd_root = NULL;
-#define MAX_WORDS 20
-
static inline int pluginsd_space(char c) {
switch(c) {
case ' ':
@@ -18,7 +16,8 @@ static inline int pluginsd_space(char c) {
}
}
-static int pluginsd_split_words(char *str, char **words, int max_words) {
+// split a text into words, respecting quotes
+inline int pluginsd_split_words(char *str, char **words, int max_words) {
char *s = str, quote = 0;
int i = 0, j;
@@ -95,14 +94,13 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int
char line[PLUGINSD_LINE_MAX + 1];
- char *words[MAX_WORDS] = { NULL };
- /* uint32_t HOST_HASH = simple_hash("HOST"); */
- uint32_t BEGIN_HASH = simple_hash("BEGIN");
- uint32_t END_HASH = simple_hash("END");
- uint32_t FLUSH_HASH = simple_hash("FLUSH");
- uint32_t CHART_HASH = simple_hash("CHART");
- uint32_t DIMENSION_HASH = simple_hash("DIMENSION");
- uint32_t DISABLE_HASH = simple_hash("DISABLE");
+ char *words[PLUGINSD_MAX_WORDS] = { NULL };
+ uint32_t BEGIN_HASH = simple_hash(PLUGINSD_KEYWORD_BEGIN);
+ uint32_t END_HASH = simple_hash(PLUGINSD_KEYWORD_END);
+ uint32_t FLUSH_HASH = simple_hash(PLUGINSD_KEYWORD_FLUSH);
+ uint32_t CHART_HASH = simple_hash(PLUGINSD_KEYWORD_CHART);
+ uint32_t DIMENSION_HASH = simple_hash(PLUGINSD_KEYWORD_DIMENSION);
+ uint32_t DISABLE_HASH = simple_hash(PLUGINSD_KEYWORD_DISABLE);
RRDSET *st = NULL;
uint32_t hash;
@@ -130,7 +128,7 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int
// debug(D_PLUGINSD, "PLUGINSD: %s: %s", cd->filename, line);
- int w = pluginsd_split_words(line, words, MAX_WORDS);
+ int w = pluginsd_split_words(line, words, PLUGINSD_MAX_WORDS);
char *s = words[0];
if(unlikely(!s || !*s || !w)) {
// debug(D_PLUGINSD, "PLUGINSD: empty line");
@@ -159,9 +157,18 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int
if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) debug(D_PLUGINSD, "PLUGINSD: '%s' is setting dimension %s/%s to %s", cd->fullfilename, st->id, dimension, value?value:"<nothing>");
- if(value) rrddim_set(st, dimension, strtoll(value, NULL, 0));
+ if(value) {
+ RRDDIM *rd = rrddim_find(st, dimension);
+ if(unlikely(!rd)) {
+ error("PLUGINSD: '%s' is requesting a SET to dimension with id '%s' on stats '%s' (%s) on host '%s', which does not exist. Disabling it.", cd->fullfilename, dimension, st->name, st->id, st->rrdhost->hostname);
+ enabled = 0;
+ break;
+ }
+ else
+ rrddim_set_by_pointer(st, rd, strtoll(value, NULL, 0));
+ }
}
- else if(likely(hash == BEGIN_HASH && !strcmp(s, "BEGIN"))) {
+ else if(likely(hash == BEGIN_HASH && !strcmp(s, PLUGINSD_KEYWORD_BEGIN))) {
char *id = words[1];
char *microseconds_txt = words[2];
@@ -191,7 +198,7 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int
else rrdset_next(st);
}
}
- else if(likely(hash == END_HASH && !strcmp(s, "END"))) {
+ else if(likely(hash == END_HASH && !strcmp(s, PLUGINSD_KEYWORD_END))) {
if(unlikely(!st)) {
error("PLUGINSD: '%s' is requesting an END, without a BEGIN on host '%s'. Disabling it.", cd->fullfilename, host->hostname);
enabled = 0;
@@ -205,28 +212,11 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int
count++;
}
-/* else if(likely(hash == HOST_HASH && !strcmp(s, "HOST"))) {
- char *guid = words[1];
- char *hostname = words[2];
-
- if(unlikely(!guid || !*guid)) {
- error("PLUGINSD: '%s' is requesting HOST with guid '%s' and hostname '%s', without a guid. Disabling it.", cd->fullfilename, guid?guid:"", hostname?hostname:"");
- enabled = 0;
- break;
- }
- if(unlikely(!hostname || !*hostname)) {
- error("PLUGINSD: '%s' is requesting HOST with guid '%s' and hostname '%s', without a hostname. Disabling it.", cd->fullfilename, guid?guid:"", hostname?hostname:"");
- enabled = 0;
- break;
- }
-
- host = rrdhost_find_or_create(hostname, guid);
- } */
- else if(likely(hash == FLUSH_HASH && !strcmp(s, "FLUSH"))) {
+ else if(likely(hash == FLUSH_HASH && !strcmp(s, PLUGINSD_KEYWORD_FLUSH))) {
debug(D_PLUGINSD, "PLUGINSD: '%s' is requesting a FLUSH", cd->fullfilename);
st = NULL;
}
- else if(likely(hash == CHART_HASH && !strcmp(s, "CHART"))) {
+ else if(likely(hash == CHART_HASH && !strcmp(s, PLUGINSD_KEYWORD_CHART))) {
int noname = 0;
st = NULL;
@@ -247,6 +237,7 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int
char *chart = words[7];
char *priority_s = words[8];
char *update_every_s = words[9];
+ char *options = words[10];
if(unlikely(!type || !*type || !id || !*id)) {
error("PLUGINSD: '%s' is requesting a CHART, without a type.id, on host '%s'. Disabling it.", cd->fullfilename, host->hostname);
@@ -284,8 +275,25 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int
cd->update_every = update_every;
}
else debug(D_PLUGINSD, "PLUGINSD: Chart '%s' already exists. Not adding it again.", st->id);
+
+ if(options && *options) {
+ if(strstr(options, "obsolete"))
+ rrdset_is_obsolete(st);
+ else
+ rrdset_isnot_obsolete(st);
+
+ if(strstr(options, "detail"))
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+ else
+ rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
+
+ if(strstr(options, "store_first"))
+ rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST);
+ else
+ rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST);
+ }
}
- else if(likely(hash == DIMENSION_HASH && !strcmp(s, "DIMENSION"))) {
+ else if(likely(hash == DIMENSION_HASH && !strcmp(s, PLUGINSD_KEYWORD_DIMENSION))) {
char *id = words[1];
char *name = words[2];
char *algorithm = words[3];
@@ -326,21 +334,16 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int
, options?options:""
);
- RRDDIM *rd = rrddim_find(st, id);
- if(unlikely(!rd)) {
- rd = rrddim_add(st, id, name, multiplier, divisor, rrd_algorithm_id(algorithm));
- rrddim_flag_clear(rd, RRDDIM_FLAG_HIDDEN);
- rrddim_flag_clear(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
- if(options && *options) {
- if(strstr(options, "hidden") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_HIDDEN);
- if(strstr(options, "noreset") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
- if(strstr(options, "nooverflow") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
- }
+ RRDDIM *rd = rrddim_add(st, id, name, multiplier, divisor, rrd_algorithm_id(algorithm));
+ rrddim_flag_clear(rd, RRDDIM_FLAG_HIDDEN);
+ rrddim_flag_clear(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
+ if(options && *options) {
+ if(strstr(options, "hidden") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_HIDDEN);
+ if(strstr(options, "noreset") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
+ if(strstr(options, "nooverflow") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
}
- else if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_PLUGINSD, "PLUGINSD: dimension %s/%s already exists. Not adding it again.", st->id, id);
}
- else if(unlikely(hash == DISABLE_HASH && !strcmp(s, "DISABLE"))) {
+ else if(unlikely(hash == DISABLE_HASH && !strcmp(s, PLUGINSD_KEYWORD_DISABLE))) {
info("PLUGINSD: '%s' called DISABLE. Disabling it.", cd->fullfilename);
enabled = 0;
break;
diff --git a/src/plugins_d.h b/src/plugins_d.h
index d34c4030..595a515c 100644
--- a/src/plugins_d.h
+++ b/src/plugins_d.h
@@ -4,7 +4,16 @@
#define PLUGINSD_FILE_SUFFIX ".plugin"
#define PLUGINSD_FILE_SUFFIX_LEN strlen(PLUGINSD_FILE_SUFFIX)
#define PLUGINSD_CMD_MAX (FILENAME_MAX*2)
+
+#define PLUGINSD_KEYWORD_CHART "CHART"
+#define PLUGINSD_KEYWORD_DIMENSION "DIMENSION"
+#define PLUGINSD_KEYWORD_BEGIN "BEGIN"
+#define PLUGINSD_KEYWORD_END "END"
+#define PLUGINSD_KEYWORD_FLUSH "FLUSH"
+#define PLUGINSD_KEYWORD_DISABLE "DISABLE"
+
#define PLUGINSD_LINE_MAX 1024
+#define PLUGINSD_MAX_WORDS 20
struct plugind {
char id[CONFIG_MAX_NAME+1]; // config node id
@@ -35,5 +44,6 @@ extern struct plugind *pluginsd_root;
extern void *pluginsd_main(void *ptr);
extern size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations);
+extern int pluginsd_split_words(char *str, char **words, int max_words);
#endif /* NETDATA_PLUGINS_D_H */
diff --git a/src/proc_diskstats.c b/src/proc_diskstats.c
index a1b4072d..d3fed5a6 100644
--- a/src/proc_diskstats.c
+++ b/src/proc_diskstats.c
@@ -10,7 +10,8 @@
#define DELAULT_EXLUDED_DISKS "loop* ram*"
static struct disk {
- char *disk; // the name of the disk (sda, sdb, etc)
+ char *disk; // the name of the disk (sda, sdb, etc, after being looked up)
+ char *device; // the device of the disk (before being looked up)
unsigned long major;
unsigned long minor;
int sector_size;
@@ -44,12 +45,75 @@ static struct disk {
struct disk *next;
} *disk_root = NULL;
-#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE); st = NULL; } } while(st)
+#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); st = NULL; } } while(st)
+
+static char *path_to_get_hw_sector_size = NULL;
+static char *path_to_get_hw_sector_size_partitions = NULL;
+static char *path_to_find_block_device = NULL;
+static char *path_to_device_mapper = NULL;
+
+static inline char *get_disk_name(unsigned long major, unsigned long minor, char *disk) {
+ static int enabled = 1;
+
+ if(!enabled) goto cleanup;
+
+ char filename[FILENAME_MAX + 1];
+ char link[FILENAME_MAX + 1];
+
+ DIR *dir = opendir(path_to_device_mapper);
+ if (!dir) {
+ error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot open directory '%s'. Disabling device-mapper support.", disk, major, minor, path_to_device_mapper);
+ enabled = 0;
+ goto cleanup;
+ }
+
+ struct dirent *de = NULL;
+ while ((de = readdir(dir))) {
+ if(de->d_type != DT_LNK) continue;
+
+ snprintfz(filename, FILENAME_MAX, "%s/%s", path_to_device_mapper, de->d_name);
+ ssize_t len = readlink(filename, link, FILENAME_MAX);
+ if(len <= 0) {
+ error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot read link '%s'.", disk, major, minor, filename);
+ continue;
+ }
+
+ link[len] = '\0';
+ if(link[0] != '/')
+ snprintfz(filename, FILENAME_MAX, "%s/%s", path_to_device_mapper, link);
+ else
+ strncpyz(filename, link, FILENAME_MAX);
+
+ struct stat sb;
+ if(stat(filename, &sb) == -1) {
+ error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot stat() file '%s'.", disk, major, minor, filename);
+ continue;
+ }
+
+ if((sb.st_mode & S_IFMT) != S_IFBLK) {
+ // info("DEVICE-MAPPER ('%s', %lu:%lu): file '%s' is not a block device.", disk, major, minor, filename);
+ continue;
+ }
+
+ if(major(sb.st_rdev) != major || minor(sb.st_rdev) != minor) {
+ // info("DEVICE-MAPPER ('%s', %lu:%lu): filename '%s' does not match %lu:%lu.", disk, major, minor, filename, (unsigned long)major(sb.st_rdev), (unsigned long)minor(sb.st_rdev));
+ continue;
+ }
+
+ // info("DEVICE-MAPPER ('%s', %lu:%lu): filename '%s' matches.", disk, major, minor, filename);
+
+ strncpy(link, de->d_name, FILENAME_MAX);
+ netdata_fix_chart_name(link);
+ disk = link;
+ break;
+ }
+ closedir(dir);
+
+cleanup:
+ return strdupz(disk);
+}
static struct disk *get_disk(unsigned long major, unsigned long minor, char *disk) {
- static char path_to_get_hw_sector_size[FILENAME_MAX + 1] = "";
- static char path_to_get_hw_sector_size_partitions[FILENAME_MAX + 1] = "";
- static char path_find_block_device[FILENAME_MAX + 1] = "";
static struct mountinfo *disk_mountinfo_root = NULL;
struct disk *d;
@@ -66,7 +130,8 @@ static struct disk *get_disk(unsigned long major, unsigned long minor, char *dis
// create a new disk structure
d = (struct disk *)callocz(1, sizeof(struct disk));
- d->disk = strdupz(disk);
+ d->disk = get_disk_name(major, minor, disk);
+ d->device = strdupz(disk);
d->major = major;
d->minor = minor;
d->type = DISK_TYPE_PHYSICAL; // Default type. Changed later if not correct.
@@ -83,27 +148,17 @@ static struct disk *get_disk(unsigned long major, unsigned long minor, char *dis
last->next = d;
}
- // ------------------------------------------------------------------------
- // find the type of the device
-
- char buffer[FILENAME_MAX + 1];
-
- // get the default path for finding info about the block device
- if(unlikely(!path_find_block_device[0])) {
- snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/%s");
- snprintfz(path_find_block_device, FILENAME_MAX, "%s", config_get(CONFIG_SECTION_DISKSTATS, "path to get block device infos", buffer));
- }
-
// find if it is a partition
// by checking if /sys/dev/block/MAJOR:MINOR/partition is readable.
- snprintfz(buffer, FILENAME_MAX, path_find_block_device, major, minor, "partition");
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, path_to_find_block_device, major, minor, "partition");
if(likely(access(buffer, R_OK) == 0)) {
d->type = DISK_TYPE_PARTITION;
}
else {
// find if it is a container
// by checking if /sys/dev/block/MAJOR:MINOR/slaves has entries
- snprintfz(buffer, FILENAME_MAX, path_find_block_device, major, minor, "slaves/");
+ snprintfz(buffer, FILENAME_MAX, path_to_find_block_device, major, minor, "slaves/");
DIR *dirp = opendir(buffer);
if(likely(dirp != NULL)) {
struct dirent *dp;
@@ -143,18 +198,9 @@ static struct disk *get_disk(unsigned long major, unsigned long minor, char *dis
// ------------------------------------------------------------------------
// find the disk sector size
- if(unlikely(!path_to_get_hw_sector_size[0])) {
- snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/queue/hw_sector_size");
- snprintfz(path_to_get_hw_sector_size, FILENAME_MAX, "%s", config_get(CONFIG_SECTION_DISKSTATS, "path to get h/w sector size", buffer));
- }
- if(unlikely(!path_to_get_hw_sector_size_partitions[0])) {
- snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/subsystem/%s/../queue/hw_sector_size");
- snprintfz(path_to_get_hw_sector_size_partitions, FILENAME_MAX, "%s", config_get(CONFIG_SECTION_DISKSTATS, "path to get h/w sector size for partitions", buffer));
- }
-
{
char tf[FILENAME_MAX + 1], *t;
- strncpyz(tf, d->disk, FILENAME_MAX);
+ strncpyz(tf, d->device, FILENAME_MAX);
// replace all / with !
for(t = tf; *t ;t++)
@@ -173,15 +219,15 @@ static struct disk *get_disk(unsigned long major, unsigned long minor, char *dis
if(likely(tmp)) {
d->sector_size = str2i(tmp);
if(unlikely(d->sector_size <= 0)) {
- error("Invalid sector size %d for device %s in %s. Assuming 512.", d->sector_size, d->disk, buffer);
+ error("Invalid sector size %d for device %s in %s. Assuming 512.", d->sector_size, d->device, buffer);
d->sector_size = 512;
}
}
- else error("Cannot read data for sector size for device %s from %s. Assuming 512.", d->disk, buffer);
+ else error("Cannot read data for sector size for device %s from %s. Assuming 512.", d->device, buffer);
fclose(fpss);
}
- else error("Cannot read sector size for device %s from %s. Assuming 512.", d->disk, buffer);
+ else error("Cannot read sector size for device %s from %s. Assuming 512.", d->device, buffer);
}
return d;
@@ -230,6 +276,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
globals_initialized = 0;
if(unlikely(!globals_initialized)) {
+ globals_initialized = 1;
+
global_enable_new_disks_detected_at_runtime = config_get_boolean(CONFIG_SECTION_DISKSTATS, "enable new disks detected at runtime", global_enable_new_disks_detected_at_runtime);
global_enable_performance_for_physical_disks = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "performance metrics for physical disks", global_enable_performance_for_physical_disks);
global_enable_performance_for_virtual_disks = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "performance metrics for virtual disks", global_enable_performance_for_virtual_disks);
@@ -243,7 +291,19 @@ int do_proc_diskstats(int update_every, usec_t dt) {
global_do_util = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "utilization percentage for all disks", global_do_util);
global_do_backlog = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "backlog for all disks", global_do_backlog);
- globals_initialized = 1;
+ char buffer[FILENAME_MAX + 1];
+
+ snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/%s");
+ path_to_find_block_device = config_get(CONFIG_SECTION_DISKSTATS, "path to get block device infos", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/queue/hw_sector_size");
+ path_to_get_hw_sector_size = config_get(CONFIG_SECTION_DISKSTATS, "path to get h/w sector size", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/subsystem/%s/../queue/hw_sector_size");
+ path_to_get_hw_sector_size_partitions = config_get(CONFIG_SECTION_DISKSTATS, "path to get h/w sector size for partitions", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s/dev/mapper", netdata_configured_host_prefix);
+ path_to_device_mapper = config_get(CONFIG_SECTION_DISKSTATS, "path to device mapper", buffer);
}
// --------------------------------------------------------------------------
@@ -339,7 +399,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
// Set its family based on mount point
char *family = d->mount_point;
- if(!family) family = disk;
+ if(!family) family = d->disk;
// --------------------------------------------------------------------------
@@ -359,11 +419,11 @@ int do_proc_diskstats(int update_every, usec_t dt) {
int def_enable = global_enable_new_disks_detected_at_runtime;
- if(def_enable != CONFIG_BOOLEAN_NO && simple_pattern_matches(excluded_disks, disk))
+ if(def_enable != CONFIG_BOOLEAN_NO && (simple_pattern_matches(excluded_disks, d->device) || simple_pattern_matches(excluded_disks, d->disk)))
def_enable = CONFIG_BOOLEAN_NO;
char var_name[4096 + 1];
- snprintfz(var_name, 4096, "plugin:proc:/proc/diskstats:%s", disk);
+ snprintfz(var_name, 4096, "plugin:proc:/proc/diskstats:%s", d->disk);
def_enable = config_get_boolean_ondemand(var_name, "enable", def_enable);
if(unlikely(def_enable == CONFIG_BOOLEAN_NO)) {
@@ -449,8 +509,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if(unlikely(!d->st_io)) {
d->st_io = rrdset_create_localhost(
RRD_TYPE_DISK
- , disk
- , NULL
+ , d->device
+ , d->disk
, family
, "disk.io"
, "Disk I/O Bandwidth"
@@ -478,8 +538,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if(unlikely(!d->st_ops)) {
d->st_ops = rrdset_create_localhost(
"disk_ops"
- , disk
- , NULL
+ , d->device
+ , d->disk
, family
, "disk.ops"
, "Disk Completed I/O Operations"
@@ -509,8 +569,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if(unlikely(!d->st_qops)) {
d->st_qops = rrdset_create_localhost(
"disk_qops"
- , disk
- , NULL
+ , d->device
+ , d->disk
, family
, "disk.qops"
, "Disk Current I/O Operations"
@@ -538,8 +598,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if(unlikely(!d->st_backlog)) {
d->st_backlog = rrdset_create_localhost(
"disk_backlog"
- , disk
- , NULL
+ , d->device
+ , d->disk
, family
, "disk.backlog"
, "Disk Backlog"
@@ -567,8 +627,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if(unlikely(!d->st_util)) {
d->st_util = rrdset_create_localhost(
"disk_util"
- , disk
- , NULL
+ , d->device
+ , d->disk
, family
, "disk.util"
, "Disk Utilization Time"
@@ -596,8 +656,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if(unlikely(!d->st_mops)) {
d->st_mops = rrdset_create_localhost(
"disk_mops"
- , disk
- , NULL
+ , d->device
+ , d->disk
, family
, "disk.mops"
, "Disk Merged Operations"
@@ -627,8 +687,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if(unlikely(!d->st_iotime)) {
d->st_iotime = rrdset_create_localhost(
"disk_iotime"
- , disk
- , NULL
+ , d->device
+ , d->disk
, family
, "disk.iotime"
, "Disk Total I/O Time"
@@ -661,8 +721,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if(unlikely(!d->st_await)) {
d->st_await = rrdset_create_localhost(
"disk_await"
- , disk
- , NULL
+ , d->device
+ , d->disk
, family
, "disk.await"
, "Average Completed I/O Operation Time"
@@ -690,8 +750,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if(unlikely(!d->st_avgsz)) {
d->st_avgsz = rrdset_create_localhost(
"disk_avgsz"
- , disk
- , NULL
+ , d->device
+ , d->disk
, family
, "disk.avgsz"
, "Average Completed I/O Operation Bandwidth"
@@ -719,8 +779,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if(unlikely(!d->st_svctm)) {
d->st_svctm = rrdset_create_localhost(
"disk_svctm"
- , disk
- , NULL
+ , d->device
+ , d->disk
, family
, "disk.svctm"
, "Average Service Time"
@@ -769,6 +829,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
}
freez(t->disk);
+ freez(t->device);
freez(t->mount_point);
freez(t);
}
diff --git a/src/proc_loadavg.c b/src/proc_loadavg.c
index e7863f11..a48801b3 100644
--- a/src/proc_loadavg.c
+++ b/src/proc_loadavg.c
@@ -68,9 +68,10 @@ int do_proc_loadavg(int update_every, usec_t dt) {
rrddim_set(load_chart, "load5", (collected_number) (load5 * 1000));
rrddim_set(load_chart, "load15", (collected_number) (load15 * 1000));
rrdset_done(load_chart);
- }
- next_loadavg_dt = load_chart->update_every * USEC_PER_SEC;
+ next_loadavg_dt = load_chart->update_every * USEC_PER_SEC;
+ }
+ else next_loadavg_dt = MIN_LOADAVG_UPDATE_EVERY * USEC_PER_SEC;
}
else next_loadavg_dt -= dt;
diff --git a/src/proc_net_dev.c b/src/proc_net_dev.c
index 1b00758d..ee758899 100644
--- a/src/proc_net_dev.c
+++ b/src/proc_net_dev.c
@@ -73,13 +73,13 @@ static struct netdev *netdev_root = NULL, *netdev_last_used = NULL;
static size_t netdev_added = 0, netdev_found = 0;
static void netdev_free(struct netdev *d) {
- if(d->st_bandwidth) rrdset_flag_set(d->st_bandwidth, RRDSET_FLAG_OBSOLETE);
- if(d->st_packets) rrdset_flag_set(d->st_packets, RRDSET_FLAG_OBSOLETE);
- if(d->st_errors) rrdset_flag_set(d->st_errors, RRDSET_FLAG_OBSOLETE);
- if(d->st_drops) rrdset_flag_set(d->st_drops, RRDSET_FLAG_OBSOLETE);
- if(d->st_fifo) rrdset_flag_set(d->st_fifo, RRDSET_FLAG_OBSOLETE);
- if(d->st_compressed) rrdset_flag_set(d->st_compressed, RRDSET_FLAG_OBSOLETE);
- if(d->st_events) rrdset_flag_set(d->st_events, RRDSET_FLAG_OBSOLETE);
+ if(d->st_bandwidth) rrdset_is_obsolete(d->st_bandwidth);
+ if(d->st_packets) rrdset_is_obsolete(d->st_packets);
+ if(d->st_errors) rrdset_is_obsolete(d->st_errors);
+ if(d->st_drops) rrdset_is_obsolete(d->st_drops);
+ if(d->st_fifo) rrdset_is_obsolete(d->st_fifo);
+ if(d->st_compressed) rrdset_is_obsolete(d->st_compressed);
+ if(d->st_events) rrdset_is_obsolete(d->st_events);
netdev_added--;
freez(d->name);
diff --git a/src/proc_net_netstat.c b/src/proc_net_netstat.c
index 2677a6c1..322e51d1 100644
--- a/src/proc_net_netstat.c
+++ b/src/proc_net_netstat.c
@@ -1,5 +1,7 @@
#include "common.h"
+unsigned long long tcpext_TCPSynRetrans;
+
static void parse_line_pair(procfile *ff, ARL_BASE *base, size_t header_line, size_t values_line) {
size_t hwords = procfile_linewords(ff, header_line);
size_t vwords = procfile_linewords(ff, values_line);
@@ -94,6 +96,9 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
// IPv4 TCP memory pressures
static unsigned long long tcpext_TCPMemoryPressures = 0;
+ // shared: tcpext_TCPSynRetrans
+
+
if(unlikely(!arl_ipext)) {
hash_ipext = simple_hash("IpExt");
hash_tcpext = simple_hash("TcpExt");
@@ -191,6 +196,9 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(do_tcpext_memory != CONFIG_BOOLEAN_NO) {
arl_expect(arl_tcpext, "TCPMemoryPressures", &tcpext_TCPMemoryPressures);
}
+
+ // shared metrics
+ arl_expect(arl_tcpext, "TCPSynRetrans", &tcpext_TCPSynRetrans);
}
if(unlikely(!ff)) {
diff --git a/src/proc_net_snmp.c b/src/proc_net_snmp.c
index ba7b4001..7c0fd9b4 100644
--- a/src/proc_net_snmp.c
+++ b/src/proc_net_snmp.c
@@ -645,24 +645,35 @@ int do_proc_net_snmp(int update_every, usec_t dt) {
if(do_tcp_handshake) {
st = rrdset_find_localhost(RRD_TYPE_NET_SNMP ".tcphandshake");
if(!st) {
- st = rrdset_create_localhost(RRD_TYPE_NET_SNMP, "tcphandshake", NULL, "tcp", NULL
- , "IPv4 TCP Handshake Issues", "events/s", 2900, update_every
- , RRDSET_TYPE_LINE);
+ st = rrdset_create_localhost(
+ RRD_TYPE_NET_SNMP
+ , "tcphandshake"
+ , NULL
+ , "tcp"
+ , NULL
+ , "IPv4 TCP Handshake Issues"
+ , "events/s"
+ , 2900
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
- rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutRsts", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "OutRsts", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st, "TCPSynRetrans", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
else rrdset_next(st);
- rrddim_set(st, "EstabResets", *tcp_EstabResets);
- rrddim_set(st, "OutRsts", *tcp_OutRsts);
- rrddim_set(st, "ActiveOpens", *tcp_ActiveOpens);
- rrddim_set(st, "PassiveOpens", *tcp_PassiveOpens);
- rrddim_set(st, "AttemptFails", *tcp_AttemptFails);
+ rrddim_set(st, "EstabResets", *tcp_EstabResets);
+ rrddim_set(st, "OutRsts", *tcp_OutRsts);
+ rrddim_set(st, "ActiveOpens", *tcp_ActiveOpens);
+ rrddim_set(st, "PassiveOpens", *tcp_PassiveOpens);
+ rrddim_set(st, "AttemptFails", *tcp_AttemptFails);
+ rrddim_set(st, "TCPSynRetrans", tcpext_TCPSynRetrans);
rrdset_done(st);
}
}
diff --git a/src/proc_net_snmp6.c b/src/proc_net_snmp6.c
index 8c4581c1..aa9ab220 100644
--- a/src/proc_net_snmp6.c
+++ b/src/proc_net_snmp6.c
@@ -311,8 +311,8 @@ int do_proc_net_snmp6(int update_every, usec_t dt) {
rrddim_set(st, "sent", Ip6OutRequests);
rrddim_set(st, "received", Ip6InReceives);
- rrddim_set(st, "forwarded", Ip6InDelivers);
- rrddim_set(st, "delivers", Ip6OutForwDatagrams);
+ rrddim_set(st, "forwarded", Ip6OutForwDatagrams);
+ rrddim_set(st, "delivers", Ip6InDelivers);
rrdset_done(st);
}
diff --git a/src/proc_net_softnet_stat.c b/src/proc_net_softnet_stat.c
index 40946a7a..b03a43c5 100644
--- a/src/proc_net_softnet_stat.c
+++ b/src/proc_net_softnet_stat.c
@@ -79,7 +79,7 @@ int do_proc_net_softnet_stat(int update_every, usec_t dt) {
st = rrdset_find_bytype_localhost("system", "softnet_stat");
if(unlikely(!st)) {
- st = rrdset_create_localhost("system", "softnet_stat", NULL, "softnet_stat", NULL, "System softnet_stat"
+ st = rrdset_create_localhost("system", "softnet_stat", NULL, "softnet_stat", "system.softnet_stat", "System softnet_stat"
, "events/s", 955, update_every, RRDSET_TYPE_LINE);
for(w = 0; w < allocated_columns ;w++)
if(unlikely(softnet_column_name(w)))
@@ -103,7 +103,7 @@ int do_proc_net_softnet_stat(int update_every, usec_t dt) {
char title[100+1];
snprintfz(title, 100, "CPU%zu softnet_stat", l);
- st = rrdset_create_localhost("cpu", id, NULL, "softnet_stat", NULL, title, "events/s", 4101 + l
+ st = rrdset_create_localhost("cpu", id, NULL, "softnet_stat", "cpu.softnet_stat", title, "events/s", 4101 + l
, update_every, RRDSET_TYPE_LINE);
for(w = 0; w < allocated_columns ;w++)
if(unlikely(softnet_column_name(w)))
diff --git a/src/proc_spl_kstat_zfs.c b/src/proc_spl_kstat_zfs.c
new file mode 100644
index 00000000..dee7a6b3
--- /dev/null
+++ b/src/proc_spl_kstat_zfs.c
@@ -0,0 +1,153 @@
+#include "common.h"
+#include "zfs_common.h"
+
+#define ZFS_PROC_ARCSTATS "/proc/spl/kstat/zfs/arcstats"
+
+struct arcstats arcstats = { 0 };
+
+int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) {
+ (void)dt;
+
+ static procfile *ff = NULL;
+ static ARL_BASE *arl_base = NULL;
+
+ l2exist = -1;
+
+ if(unlikely(!arl_base)) {
+ arl_base = arl_create("arcstats", NULL, 60);
+
+ arl_expect(arl_base, "hits", &arcstats.hits);
+ arl_expect(arl_base, "misses", &arcstats.misses);
+ arl_expect(arl_base, "demand_data_hits", &arcstats.demand_data_hits);
+ arl_expect(arl_base, "demand_data_misses", &arcstats.demand_data_misses);
+ arl_expect(arl_base, "demand_metadata_hits", &arcstats.demand_metadata_hits);
+ arl_expect(arl_base, "demand_metadata_misses", &arcstats.demand_metadata_misses);
+ arl_expect(arl_base, "prefetch_data_hits", &arcstats.prefetch_data_hits);
+ arl_expect(arl_base, "prefetch_data_misses", &arcstats.prefetch_data_misses);
+ arl_expect(arl_base, "prefetch_metadata_hits", &arcstats.prefetch_metadata_hits);
+ arl_expect(arl_base, "prefetch_metadata_misses", &arcstats.prefetch_metadata_misses);
+ arl_expect(arl_base, "mru_hits", &arcstats.mru_hits);
+ arl_expect(arl_base, "mru_ghost_hits", &arcstats.mru_ghost_hits);
+ arl_expect(arl_base, "mfu_hits", &arcstats.mfu_hits);
+ arl_expect(arl_base, "mfu_ghost_hits", &arcstats.mfu_ghost_hits);
+ arl_expect(arl_base, "deleted", &arcstats.deleted);
+ arl_expect(arl_base, "mutex_miss", &arcstats.mutex_miss);
+ arl_expect(arl_base, "evict_skip", &arcstats.evict_skip);
+ arl_expect(arl_base, "evict_not_enough", &arcstats.evict_not_enough);
+ arl_expect(arl_base, "evict_l2_cached", &arcstats.evict_l2_cached);
+ arl_expect(arl_base, "evict_l2_eligible", &arcstats.evict_l2_eligible);
+ arl_expect(arl_base, "evict_l2_ineligible", &arcstats.evict_l2_ineligible);
+ arl_expect(arl_base, "evict_l2_skip", &arcstats.evict_l2_skip);
+ arl_expect(arl_base, "hash_elements", &arcstats.hash_elements);
+ arl_expect(arl_base, "hash_elements_max", &arcstats.hash_elements_max);
+ arl_expect(arl_base, "hash_collisions", &arcstats.hash_collisions);
+ arl_expect(arl_base, "hash_chains", &arcstats.hash_chains);
+ arl_expect(arl_base, "hash_chain_max", &arcstats.hash_chain_max);
+ arl_expect(arl_base, "p", &arcstats.p);
+ arl_expect(arl_base, "c", &arcstats.c);
+ arl_expect(arl_base, "c_min", &arcstats.c_min);
+ arl_expect(arl_base, "c_max", &arcstats.c_max);
+ arl_expect(arl_base, "size", &arcstats.size);
+ arl_expect(arl_base, "hdr_size", &arcstats.hdr_size);
+ arl_expect(arl_base, "data_size", &arcstats.data_size);
+ arl_expect(arl_base, "metadata_size", &arcstats.metadata_size);
+ arl_expect(arl_base, "other_size", &arcstats.other_size);
+ arl_expect(arl_base, "anon_size", &arcstats.anon_size);
+ arl_expect(arl_base, "anon_evictable_data", &arcstats.anon_evictable_data);
+ arl_expect(arl_base, "anon_evictable_metadata", &arcstats.anon_evictable_metadata);
+ arl_expect(arl_base, "mru_size", &arcstats.mru_size);
+ arl_expect(arl_base, "mru_evictable_data", &arcstats.mru_evictable_data);
+ arl_expect(arl_base, "mru_evictable_metadata", &arcstats.mru_evictable_metadata);
+ arl_expect(arl_base, "mru_ghost_size", &arcstats.mru_ghost_size);
+ arl_expect(arl_base, "mru_ghost_evictable_data", &arcstats.mru_ghost_evictable_data);
+ arl_expect(arl_base, "mru_ghost_evictable_metadata", &arcstats.mru_ghost_evictable_metadata);
+ arl_expect(arl_base, "mfu_size", &arcstats.mfu_size);
+ arl_expect(arl_base, "mfu_evictable_data", &arcstats.mfu_evictable_data);
+ arl_expect(arl_base, "mfu_evictable_metadata", &arcstats.mfu_evictable_metadata);
+ arl_expect(arl_base, "mfu_ghost_size", &arcstats.mfu_ghost_size);
+ arl_expect(arl_base, "mfu_ghost_evictable_data", &arcstats.mfu_ghost_evictable_data);
+ arl_expect(arl_base, "mfu_ghost_evictable_metadata", &arcstats.mfu_ghost_evictable_metadata);
+ arl_expect(arl_base, "l2_hits", &arcstats.l2_hits);
+ arl_expect(arl_base, "l2_misses", &arcstats.l2_misses);
+ arl_expect(arl_base, "l2_feeds", &arcstats.l2_feeds);
+ arl_expect(arl_base, "l2_rw_clash", &arcstats.l2_rw_clash);
+ arl_expect(arl_base, "l2_read_bytes", &arcstats.l2_read_bytes);
+ arl_expect(arl_base, "l2_write_bytes", &arcstats.l2_write_bytes);
+ arl_expect(arl_base, "l2_writes_sent", &arcstats.l2_writes_sent);
+ arl_expect(arl_base, "l2_writes_done", &arcstats.l2_writes_done);
+ arl_expect(arl_base, "l2_writes_error", &arcstats.l2_writes_error);
+ arl_expect(arl_base, "l2_writes_lock_retry", &arcstats.l2_writes_lock_retry);
+ arl_expect(arl_base, "l2_evict_lock_retry", &arcstats.l2_evict_lock_retry);
+ arl_expect(arl_base, "l2_evict_reading", &arcstats.l2_evict_reading);
+ arl_expect(arl_base, "l2_evict_l1cached", &arcstats.l2_evict_l1cached);
+ arl_expect(arl_base, "l2_free_on_write", &arcstats.l2_free_on_write);
+ arl_expect(arl_base, "l2_cdata_free_on_write", &arcstats.l2_cdata_free_on_write);
+ arl_expect(arl_base, "l2_abort_lowmem", &arcstats.l2_abort_lowmem);
+ arl_expect(arl_base, "l2_cksum_bad", &arcstats.l2_cksum_bad);
+ arl_expect(arl_base, "l2_io_error", &arcstats.l2_io_error);
+ arl_expect(arl_base, "l2_size", &arcstats.l2_size);
+ arl_expect(arl_base, "l2_asize", &arcstats.l2_asize);
+ arl_expect(arl_base, "l2_hdr_size", &arcstats.l2_hdr_size);
+ arl_expect(arl_base, "l2_compress_successes", &arcstats.l2_compress_successes);
+ arl_expect(arl_base, "l2_compress_zeros", &arcstats.l2_compress_zeros);
+ arl_expect(arl_base, "l2_compress_failures", &arcstats.l2_compress_failures);
+ arl_expect(arl_base, "memory_throttle_count", &arcstats.memory_throttle_count);
+ arl_expect(arl_base, "duplicate_buffers", &arcstats.duplicate_buffers);
+ arl_expect(arl_base, "duplicate_buffers_size", &arcstats.duplicate_buffers_size);
+ arl_expect(arl_base, "duplicate_reads", &arcstats.duplicate_reads);
+ arl_expect(arl_base, "memory_direct_count", &arcstats.memory_direct_count);
+ arl_expect(arl_base, "memory_indirect_count", &arcstats.memory_indirect_count);
+ arl_expect(arl_base, "arc_no_grow", &arcstats.arc_no_grow);
+ arl_expect(arl_base, "arc_tempreserve", &arcstats.arc_tempreserve);
+ arl_expect(arl_base, "arc_loaned_bytes", &arcstats.arc_loaned_bytes);
+ arl_expect(arl_base, "arc_prune", &arcstats.arc_prune);
+ arl_expect(arl_base, "arc_meta_used", &arcstats.arc_meta_used);
+ arl_expect(arl_base, "arc_meta_limit", &arcstats.arc_meta_limit);
+ arl_expect(arl_base, "arc_meta_max", &arcstats.arc_meta_max);
+ arl_expect(arl_base, "arc_meta_min", &arcstats.arc_meta_min);
+ arl_expect(arl_base, "arc_need_free", &arcstats.arc_need_free);
+ arl_expect(arl_base, "arc_sys_free", &arcstats.arc_sys_free);
+ }
+
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, ZFS_PROC_ARCSTATS);
+ ff = procfile_open(config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0; // we return 0, so that we will retry to open it next time
+
+ size_t lines = procfile_lines(ff), l;
+
+ arl_begin(arl_base);
+
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ if(unlikely(words < 3)) {
+ if(unlikely(words)) error("Cannot read " ZFS_PROC_ARCSTATS " line %zu. Expected 3 params, read %zu.", l, words);
+ continue;
+ }
+
+ const char *key = procfile_lineword(ff, l, 0);
+ const char *value = procfile_lineword(ff, l, 2);
+
+ if(unlikely(l2exist == -1)) {
+ if(key[0] == 'l' && key[1] == '2' && key[2] == '_')
+ l2exist = 1;
+ }
+
+ if(unlikely(arl_check(arl_base, key, value))) break;
+ }
+
+ if(unlikely(l2exist == -1))
+ l2exist = 0;
+
+ generate_charts_arcstats(update_every);
+ generate_charts_arc_summary(update_every);
+
+ return 0;
+}
diff --git a/src/proc_vmstat.c b/src/proc_vmstat.c
index 84748736..a2416313 100644
--- a/src/proc_vmstat.c
+++ b/src/proc_vmstat.c
@@ -169,11 +169,11 @@ int do_proc_vmstat(int update_every, usec_t dt) {
// The following stats depend on CONFIG_NUMA_BALANCING in the
// kernel.
- rrddim_add(st_numa, "pte updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_numa, "huge pte updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_numa, "hint faults", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_numa, "hint faults local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st_numa, "pages migrated", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st_numa, "pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st_numa, "huge_pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st_numa, "hint_faults", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st_numa, "hint_faults_local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(st_numa, "pages_migrated", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
else rrdset_next(st_numa);
@@ -182,11 +182,11 @@ int do_proc_vmstat(int update_every, usec_t dt) {
rrddim_set(st_numa, "interleave", numa_interleave);
rrddim_set(st_numa, "other", numa_other);
- rrddim_set(st_numa, "pte updates", numa_pte_updates);
- rrddim_set(st_numa, "huge pte updates", numa_huge_pte_updates);
- rrddim_set(st_numa, "hint faults", numa_hint_faults);
- rrddim_set(st_numa, "hint faults local", numa_hint_faults_local);
- rrddim_set(st_numa, "pages migrated", numa_pages_migrated);
+ rrddim_set(st_numa, "pte_updates", numa_pte_updates);
+ rrddim_set(st_numa, "huge_pte_updates", numa_huge_pte_updates);
+ rrddim_set(st_numa, "hint_faults", numa_hint_faults);
+ rrddim_set(st_numa, "hint_faults_local", numa_hint_faults_local);
+ rrddim_set(st_numa, "pages_migrated", numa_pages_migrated);
rrdset_done(st_numa);
}
diff --git a/src/registry.c b/src/registry.c
index ed9be984..76e3fa4d 100644
--- a/src/registry.c
+++ b/src/registry.c
@@ -45,7 +45,7 @@ static inline void registry_json_header(RRDHOST *host, struct web_client *w, con
buffer_flush(w->response.data);
w->response.data->contenttype = CT_APPLICATION_JSON;
buffer_sprintf(w->response.data, "{\n\t\"action\": \"%s\",\n\t\"status\": \"%s\",\n\t\"hostname\": \"%s\",\n\t\"machine_guid\": \"%s\"",
- action, status, (host == localhost)?registry.hostname:host->hostname, host->machine_guid);
+ action, status, host->registry_hostname, host->machine_guid);
}
static inline void registry_json_footer(struct web_client *w) {
diff --git a/src/registry.h b/src/registry.h
index 2c4592b9..9aa24156 100644
--- a/src/registry.h
+++ b/src/registry.h
@@ -70,6 +70,8 @@ extern int registry_request_hello_json(RRDHOST *host, struct web_client *w);
extern void registry_statistics(void);
extern char *registry_get_this_machine_guid(void);
+extern char *registry_get_this_machine_hostname(void);
+
extern int regenerate_guid(const char *guid, char *result);
#endif /* NETDATA_REGISTRY_H */
diff --git a/src/registry_init.c b/src/registry_init.c
index 2a41d36e..654f66d1 100644
--- a/src/registry_init.c
+++ b/src/registry_init.c
@@ -34,7 +34,7 @@ int registry_init(void) {
registry.persons_expiration = config_get_number(CONFIG_SECTION_REGISTRY, "registry expire idle persons days", 365) * 86400;
registry.registry_domain = config_get(CONFIG_SECTION_REGISTRY, "registry domain", "");
registry.registry_to_announce = config_get(CONFIG_SECTION_REGISTRY, "registry to announce", "https://registry.my-netdata.io");
- registry.hostname = config_get(CONFIG_SECTION_REGISTRY, "registry hostname", config_get(CONFIG_SECTION_GLOBAL, "hostname", "localhost"));
+ registry.hostname = config_get(CONFIG_SECTION_REGISTRY, "registry hostname", netdata_configured_hostname);
registry.verify_cookies_redirects = config_get_boolean(CONFIG_SECTION_REGISTRY, "verify browser cookies support", 1);
setenv("NETDATA_REGISTRY_HOSTNAME", registry.hostname, 1);
diff --git a/src/registry_internals.c b/src/registry_internals.c
index 9ec91ba4..fd3c295c 100644
--- a/src/registry_internals.c
+++ b/src/registry_internals.c
@@ -274,6 +274,10 @@ static inline int is_machine_guid_blacklisted(const char *guid) {
return 0;
}
+char *registry_get_this_machine_hostname(void) {
+ return registry.hostname;
+}
+
char *registry_get_this_machine_guid(void) {
static char guid[GUID_LEN + 1] = "";
diff --git a/src/rrd.c b/src/rrd.c
index a9ff6243..85ce93dd 100644
--- a/src/rrd.c
+++ b/src/rrd.c
@@ -31,19 +31,28 @@ inline const char *rrd_memory_mode_name(RRD_MEMORY_MODE id) {
return RRD_MEMORY_MODE_NONE_NAME;
case RRD_MEMORY_MODE_SAVE:
- default:
return RRD_MEMORY_MODE_SAVE_NAME;
+
+ case RRD_MEMORY_MODE_ALLOC:
+ return RRD_MEMORY_MODE_ALLOC_NAME;
}
+
+ return RRD_MEMORY_MODE_SAVE_NAME;
}
RRD_MEMORY_MODE rrd_memory_mode_id(const char *name) {
if(unlikely(!strcmp(name, RRD_MEMORY_MODE_RAM_NAME)))
return RRD_MEMORY_MODE_RAM;
+
else if(unlikely(!strcmp(name, RRD_MEMORY_MODE_MAP_NAME)))
return RRD_MEMORY_MODE_MAP;
+
else if(unlikely(!strcmp(name, RRD_MEMORY_MODE_NONE_NAME)))
return RRD_MEMORY_MODE_NONE;
+ else if(unlikely(!strcmp(name, RRD_MEMORY_MODE_ALLOC_NAME)))
+ return RRD_MEMORY_MODE_ALLOC;
+
return RRD_MEMORY_MODE_SAVE;
}
diff --git a/src/rrd.h b/src/rrd.h
index 2f4f2127..5bc61dcb 100644
--- a/src/rrd.h
+++ b/src/rrd.h
@@ -5,7 +5,7 @@
#define UPDATE_EVERY_MAX 3600
#define RRD_DEFAULT_HISTORY_ENTRIES 3600
-#define RRD_HISTORY_ENTRIES_MAX (86400*10)
+#define RRD_HISTORY_ENTRIES_MAX (86400*365)
extern int default_rrd_update_every;
extern int default_rrd_history_entries;
@@ -42,13 +42,15 @@ typedef enum rrd_memory_mode {
RRD_MEMORY_MODE_NONE = 0,
RRD_MEMORY_MODE_RAM = 1,
RRD_MEMORY_MODE_MAP = 2,
- RRD_MEMORY_MODE_SAVE = 3
+ RRD_MEMORY_MODE_SAVE = 3,
+ RRD_MEMORY_MODE_ALLOC = 4
} RRD_MEMORY_MODE;
#define RRD_MEMORY_MODE_NONE_NAME "none"
#define RRD_MEMORY_MODE_RAM_NAME "ram"
#define RRD_MEMORY_MODE_MAP_NAME "map"
#define RRD_MEMORY_MODE_SAVE_NAME "save"
+#define RRD_MEMORY_MODE_ALLOC_NAME "alloc"
extern RRD_MEMORY_MODE default_rrd_memory_mode;
@@ -101,9 +103,15 @@ typedef enum rrddim_flags {
RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS = 1 << 1 // do not offer RESET or OVERFLOW info to callers
} RRDDIM_FLAGS;
+#ifdef HAVE_C___ATOMIC
+#define rrddim_flag_check(rd, flag) (__atomic_load_n(&((rd)->flags), __ATOMIC_SEQ_CST) & flag)
+#define rrddim_flag_set(rd, flag) __atomic_or_fetch(&((rd)->flags), flag, __ATOMIC_SEQ_CST)
+#define rrddim_flag_clear(rd, flag) __atomic_and_fetch(&((rd)->flags), ~flag, __ATOMIC_SEQ_CST)
+#else
#define rrddim_flag_check(rd, flag) ((rd)->flags & flag)
#define rrddim_flag_set(rd, flag) (rd)->flags |= flag
#define rrddim_flag_clear(rd, flag) (rd)->flags &= ~flag
+#endif
// ----------------------------------------------------------------------------
@@ -210,16 +218,28 @@ typedef struct rrddim RRDDIM;
// and may lead to missing information.
typedef enum rrdset_flags {
- RRDSET_FLAG_ENABLED = 1 << 0, // enables or disables a chart
- RRDSET_FLAG_DETAIL = 1 << 1, // if set, the data set should be considered as a detail of another
- // (the master data set should be the one that has the same family and is not detail)
- RRDSET_FLAG_DEBUG = 1 << 2, // enables or disables debugging for a chart
- RRDSET_FLAG_OBSOLETE = 1 << 3 // this is marked by the collector/module as obsolete
+ RRDSET_FLAG_ENABLED = 1 << 0, // enables or disables a chart
+ RRDSET_FLAG_DETAIL = 1 << 1, // if set, the data set should be considered as a detail of another
+ // (the master data set should be the one that has the same family and is not detail)
+ RRDSET_FLAG_DEBUG = 1 << 2, // enables or disables debugging for a chart
+ RRDSET_FLAG_OBSOLETE = 1 << 3, // this is marked by the collector/module as obsolete
+ RRDSET_FLAG_BACKEND_SEND = 1 << 4, // if set, this chart should be sent to backends
+ RRDSET_FLAG_BACKEND_IGNORE = 1 << 5, // if set, this chart should not be sent to backends
+ RRDSET_FLAG_EXPOSED_UPSTREAM = 1 << 6, // if set, we have sent this chart to netdata master (streaming)
+ RRDSET_FLAG_STORE_FIRST = 1 << 7, // if set, do not eliminate the first collection during interpolation
+ RRDSET_FLAG_HETEROGENEOUS = 1 << 8, // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers)
+ RRDSET_FLAG_HOMEGENEOUS_CHECK= 1 << 9 // if set, the chart should be checked to determine if the dimensions as homogeneous
} RRDSET_FLAGS;
+#ifdef HAVE_C___ATOMIC
+#define rrdset_flag_check(st, flag) (__atomic_load_n(&((st)->flags), __ATOMIC_SEQ_CST) & flag)
+#define rrdset_flag_set(st, flag) __atomic_or_fetch(&((st)->flags), flag, __ATOMIC_SEQ_CST)
+#define rrdset_flag_clear(st, flag) __atomic_and_fetch(&((st)->flags), ~flag, __ATOMIC_SEQ_CST)
+#else
#define rrdset_flag_check(st, flag) ((st)->flags & flag)
#define rrdset_flag_set(st, flag) (st)->flags |= flag
#define rrdset_flag_clear(st, flag) (st)->flags &= ~flag
+#endif
struct rrdset {
// ------------------------------------------------------------------------
@@ -279,7 +299,8 @@ struct rrdset {
size_t counter_done; // the number of times rrdset_done() has been called
time_t last_accessed_time; // the last time this RRDSET has been accessed
- size_t unused[9];
+ time_t upstream_resync_time; // the timestamp up to which we should resync clock upstream
+ size_t unused[8];
uint32_t hash; // a simple hash on the id, to speed up searching
// we first compare hashes, and only if the hashes are equal we do string comparisons
@@ -347,14 +368,28 @@ typedef struct rrdset RRDSET;
// and may lead to missing information.
typedef enum rrdhost_flags {
- RRDHOST_ORPHAN = 1 << 0, // this host is orphan
- RRDHOST_DELETE_OBSOLETE_FILES = 1 << 1, // delete files of obsolete charts
- RRDHOST_DELETE_ORPHAN_FILES = 1 << 2 // delete the entire host when orphan
+ RRDHOST_ORPHAN = 1 << 0, // this host is orphan (not receiving data)
+ RRDHOST_DELETE_OBSOLETE_CHARTS = 1 << 1, // delete files of obsolete charts
+ RRDHOST_DELETE_ORPHAN_HOST = 1 << 2 // delete the entire host when orphan
} RRDHOST_FLAGS;
+#ifdef HAVE_C___ATOMIC
+#define rrdhost_flag_check(host, flag) (__atomic_load_n(&((host)->flags), __ATOMIC_SEQ_CST) & flag)
+#define rrdhost_flag_set(host, flag) __atomic_or_fetch(&((host)->flags), flag, __ATOMIC_SEQ_CST)
+#define rrdhost_flag_clear(host, flag) __atomic_and_fetch(&((host)->flags), ~flag, __ATOMIC_SEQ_CST)
+#else
#define rrdhost_flag_check(host, flag) ((host)->flags & flag)
#define rrdhost_flag_set(host, flag) (host)->flags |= flag
#define rrdhost_flag_clear(host, flag) (host)->flags &= ~flag
+#endif
+
+#ifdef NETDATA_INTERNAL_CHECKS
+#define rrdset_debug(st, fmt, args...) do { if(unlikely(debug_flags & D_RRD_STATS && rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) \
+ debug_int(__FILE__, __FUNCTION__, __LINE__, "%s: " fmt, st->name, ##args); } while(0)
+#else
+#define rrdset_debug(st, fmt, args...) debug_dummy()
+#endif
+
// ----------------------------------------------------------------------------
// RRD HOST
@@ -368,10 +403,13 @@ struct rrdhost {
char *hostname; // the hostname of this host
uint32_t hash_hostname; // the hostname hash
+ char *registry_hostname; // the registry hostname for this host
+
char machine_guid[GUID_LEN + 1]; // the unique ID of this host
uint32_t hash_machine_guid; // the hash of the unique ID
- char *os; // the O/S type of the host
+ const char *os; // the O/S type of the host
+ const char *tags; // tags for this host
uint32_t flags; // flags about this RRDHOST
@@ -488,8 +526,10 @@ extern RRDHOST *rrdhost_find_by_guid(const char *guid, uint32_t hash);
extern RRDHOST *rrdhost_find_or_create(
const char *hostname
+ , const char *registry_hostname
, const char *guid
, const char *os
+ , const char *tags
, int update_every
, long history
, RRD_MEMORY_MODE mode
@@ -528,7 +568,7 @@ extern void __rrd_check_wrlock(const char *file, const char *function, const uns
extern void rrdset_set_name(RRDSET *st, const char *name);
-extern RRDSET *rrdset_create(RRDHOST *host
+extern RRDSET *rrdset_create_custom(RRDHOST *host
, const char *type
, const char *id
, const char *name
@@ -538,18 +578,27 @@ extern RRDSET *rrdset_create(RRDHOST *host
, const char *units
, long priority
, int update_every
- , RRDSET_TYPE chart_type);
+ , RRDSET_TYPE chart_type
+ , RRD_MEMORY_MODE memory_mode
+ , long history_entries);
+
+#define rrdset_create(host, type, id, name, family, context, title, units, priority, update_every, chart_type) \
+ rrdset_create_custom(host, type, id, name, family, context, title, units, priority, update_every, chart_type, (host)->rrd_memory_mode, (host)->rrd_history_entries)
-#define rrdset_create_localhost(type, id, name, family, context, title, units, priority, update_every, chart_type) rrdset_create(localhost, type, id, name, family, context, title, units, priority, update_every, chart_type)
+#define rrdset_create_localhost(type, id, name, family, context, title, units, priority, update_every, chart_type) \
+ rrdset_create(localhost, type, id, name, family, context, title, units, priority, update_every, chart_type)
extern void rrdhost_free_all(void);
extern void rrdhost_save_all(void);
+extern void rrdhost_cleanup_all(void);
-extern void rrdhost_cleanup_orphan(RRDHOST *protected);
+extern void rrdhost_cleanup_orphan_hosts(RRDHOST *protected);
extern void rrdhost_free(RRDHOST *host);
extern void rrdhost_save(RRDHOST *host);
extern void rrdhost_delete(RRDHOST *host);
+extern void rrdset_update_heterogeneous_flag(RRDSET *st);
+
extern RRDSET *rrdset_find(RRDHOST *host, const char *id);
#define rrdset_find_localhost(id) rrdset_find(localhost, id)
@@ -565,8 +614,12 @@ extern void rrdset_next_usec(RRDSET *st, usec_t microseconds);
extern void rrdset_done(RRDSET *st);
+extern void rrdset_is_obsolete(RRDSET *st);
+extern void rrdset_isnot_obsolete(RRDSET *st);
+
// checks if the RRDSET should be offered to viewers
-#define rrdset_is_available_for_viewers(st) (rrdset_flag_check(st, RRDSET_FLAG_ENABLED) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && (st)->dimensions)
+#define rrdset_is_available_for_viewers(st) (rrdset_flag_check(st, RRDSET_FLAG_ENABLED) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && (st)->dimensions && (st)->rrd_memory_mode != RRD_MEMORY_MODE_NONE)
+#define rrdset_is_available_for_backends(st) (rrdset_flag_check(st, RRDSET_FLAG_ENABLED) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && (st)->dimensions)
// get the total duration in seconds of the round robin database
#define rrdset_duration(st) ((time_t)( (((st)->counter >= ((unsigned long)(st)->entries))?(unsigned long)(st)->entries:(st)->counter) * (st)->update_every ))
@@ -604,9 +657,14 @@ extern void rrdset_done(RRDSET *st);
// ----------------------------------------------------------------------------
// RRD DIMENSION functions
-extern RRDDIM *rrddim_add(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm);
+extern RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm, RRD_MEMORY_MODE memory_mode);
+#define rrddim_add(st, id, name, multiplier, divisor, algorithm) rrddim_add_custom(st, id, name, multiplier, divisor, algorithm, (st)->rrd_memory_mode)
+
+extern int rrddim_set_name(RRDSET *st, RRDDIM *rd, const char *name);
+extern int rrddim_set_algorithm(RRDSET *st, RRDDIM *rd, RRD_ALGORITHM algorithm);
+extern int rrddim_set_multiplier(RRDSET *st, RRDDIM *rd, collected_number multiplier);
+extern int rrddim_set_divisor(RRDSET *st, RRDDIM *rd, collected_number divisor);
-extern void rrddim_set_name(RRDSET *st, RRDDIM *rd, const char *name);
extern RRDDIM *rrddim_find(RRDSET *st, const char *id);
extern int rrddim_hide(RRDSET *st, const char *id);
@@ -647,7 +705,7 @@ extern void rrdset_reset(RRDSET *st);
extern void rrdset_save(RRDSET *st);
extern void rrdset_delete(RRDSET *st);
-extern void rrdhost_cleanup_obsolete(RRDHOST *host);
+extern void rrdhost_cleanup_obsolete_charts(RRDHOST *host);
#endif /* NETDATA_RRD_INTERNALS */
diff --git a/src/rrd2json.c b/src/rrd2json.c
index 4d853930..98080139 100644
--- a/src/rrd2json.c
+++ b/src/rrd2json.c
@@ -11,7 +11,7 @@ void rrd_stats_api_v1_chart_with_data(RRDSET *st, BUFFER *wb, size_t *dimensions
"\t\t\t\"type\": \"%s\",\n"
"\t\t\t\"family\": \"%s\",\n"
"\t\t\t\"context\": \"%s\",\n"
- "\t\t\t\"title\": \"%s\",\n"
+ "\t\t\t\"title\": \"%s (%s)\",\n"
"\t\t\t\"priority\": %ld,\n"
"\t\t\t\"enabled\": %s,\n"
"\t\t\t\"units\": \"%s\",\n"
@@ -27,7 +27,7 @@ void rrd_stats_api_v1_chart_with_data(RRDSET *st, BUFFER *wb, size_t *dimensions
, st->type
, st->family
, st->context
- , st->title
+ , st->title, st->name
, st->priority
, rrdset_flag_check(st, RRDSET_FLAG_ENABLED)?"true":"false"
, st->units
@@ -167,79 +167,6 @@ void rrd_stats_api_v1_charts(RRDHOST *host, BUFFER *wb) {
}
// ----------------------------------------------------------------------------
-// PROMETHEUS
-// /api/v1/allmetrics?format=prometheus
-
-static inline size_t prometheus_name_copy(char *d, const char *s, size_t usable) {
- size_t n;
-
- for(n = 0; *s && n < usable ; d++, s++, n++) {
- register char c = *s;
-
- if(unlikely(!isalnum(c))) *d = '_';
- else *d = c;
- }
- *d = '\0';
-
- return n;
-}
-
-#define PROMETHEUS_ELEMENT_MAX 256
-
-void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER *wb) {
- rrdhost_rdlock(host);
-
- char hostname[PROMETHEUS_ELEMENT_MAX + 1];
- prometheus_name_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
-
- // for each chart
- RRDSET *st;
- rrdset_foreach_read(st, host) {
- char chart[PROMETHEUS_ELEMENT_MAX + 1];
- prometheus_name_copy(chart, st->id, PROMETHEUS_ELEMENT_MAX);
-
- buffer_strcat(wb, "\n");
- if(rrdset_is_available_for_viewers(st)) {
- rrdset_rdlock(st);
-
- // for each dimension
- RRDDIM *rd;
- rrddim_foreach_read(rd, st) {
- if(rd->collections_counter) {
- char dimension[PROMETHEUS_ELEMENT_MAX + 1];
- prometheus_name_copy(dimension, rd->id, PROMETHEUS_ELEMENT_MAX);
-
- // buffer_sprintf(wb, "# HELP %s.%s %s\n", st->id, rd->id, st->units);
-
- switch(rd->algorithm) {
- case RRD_ALGORITHM_INCREMENTAL:
- case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL:
- buffer_sprintf(wb, "# TYPE %s_%s counter\n", chart, dimension);
- break;
-
- default:
- buffer_sprintf(wb, "# TYPE %s_%s gauge\n", chart, dimension);
- break;
- }
-
- // calculated_number n = (calculated_number)rd->last_collected_value * (calculated_number)(abs(rd->multiplier)) / (calculated_number)(abs(rd->divisor));
- // buffer_sprintf(wb, "%s.%s " CALCULATED_NUMBER_FORMAT " %llu\n", st->id, rd->id, n, timeval_msec(&rd->last_collected_time));
-
- buffer_sprintf(wb, "%s_%s{instance=\"%s\"} " COLLECTED_NUMBER_FORMAT " %llu\n",
- chart, dimension, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time)
- );
-
- }
- }
-
- rrdset_unlock(st);
- }
- }
-
- rrdhost_unlock(host);
-}
-
-// ----------------------------------------------------------------------------
// BASH
// /api/v1/allmetrics?format=bash
@@ -267,7 +194,7 @@ void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb) {
rrdset_foreach_read(st, host) {
calculated_number total = 0.0;
char chart[SHELL_ELEMENT_MAX + 1];
- shell_name_copy(chart, st->id, SHELL_ELEMENT_MAX);
+ shell_name_copy(chart, st->name?st->name:st->id, SHELL_ELEMENT_MAX);
buffer_sprintf(wb, "\n# chart: %s (name: %s)\n", st->id, st->name);
if(rrdset_is_available_for_viewers(st)) {
@@ -278,7 +205,7 @@ void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb) {
rrddim_foreach_read(rd, st) {
if(rd->collections_counter) {
char dimension[SHELL_ELEMENT_MAX + 1];
- shell_name_copy(dimension, rd->id, SHELL_ELEMENT_MAX);
+ shell_name_copy(dimension, rd->name?rd->name:rd->id, SHELL_ELEMENT_MAX);
calculated_number n = rd->last_stored_value;
@@ -306,7 +233,7 @@ void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb) {
if(!rc->rrdset) continue;
char chart[SHELL_ELEMENT_MAX + 1];
- shell_name_copy(chart, rc->rrdset->id, SHELL_ELEMENT_MAX);
+ shell_name_copy(chart, rc->rrdset->name?rc->rrdset->name:rc->rrdset->id, SHELL_ELEMENT_MAX);
char alarm[SHELL_ELEMENT_MAX + 1];
shell_name_copy(alarm, rc->name, SHELL_ELEMENT_MAX);
diff --git a/src/rrd2json.h b/src/rrd2json.h
index f2f03c64..a3b9c950 100644
--- a/src/rrd2json.h
+++ b/src/rrd2json.h
@@ -31,13 +31,15 @@
#define DATASOURCE_FORMAT_SSV_COMMA "ssvcomma"
#define DATASOURCE_FORMAT_CSV_JSON_ARRAY "csvjsonarray"
-#define ALLMETRICS_FORMAT_SHELL "shell"
-#define ALLMETRICS_FORMAT_PROMETHEUS "prometheus"
-#define ALLMETRICS_FORMAT_JSON "json"
+#define ALLMETRICS_FORMAT_SHELL "shell"
+#define ALLMETRICS_FORMAT_PROMETHEUS "prometheus"
+#define ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "prometheus_all_hosts"
+#define ALLMETRICS_FORMAT_JSON "json"
-#define ALLMETRICS_SHELL 1
-#define ALLMETRICS_PROMETHEUS 2
-#define ALLMETRICS_JSON 3
+#define ALLMETRICS_SHELL 1
+#define ALLMETRICS_PROMETHEUS 2
+#define ALLMETRICS_JSON 3
+#define ALLMETRICS_PROMETHEUS_ALL_HOSTS 4
#define GROUP_UNDEFINED 0
#define GROUP_AVERAGE 1
@@ -65,13 +67,13 @@ extern void rrd_stats_api_v1_charts(RRDHOST *host, BUFFER *wb);
extern void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, BUFFER *wb);
extern void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb);
-extern void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER *wb);
extern int rrdset2anything_api_v1(RRDSET *st, BUFFER *out, BUFFER *dimensions, uint32_t format, long points
- , long long after, long long before, int group_method, uint32_t options
- , time_t *latest_timestamp);
+ , long long after, long long before, int group_method, uint32_t options
+ , time_t *latest_timestamp);
+
extern int rrdset2value_api_v1(RRDSET *st, BUFFER *wb, calculated_number *n, const char *dimensions, long points
- , long long after, long long before, int group_method, uint32_t options
- , time_t *db_before, time_t *db_after, int *value_is_null);
+ , long long after, long long before, int group_method, uint32_t options
+ , time_t *db_before, time_t *db_after, int *value_is_null);
#endif /* NETDATA_RRD2JSON_H */
diff --git a/src/rrd2json_api_old.c b/src/rrd2json_api_old.c
index 6710f31c..003b8626 100644
--- a/src/rrd2json_api_old.c
+++ b/src/rrd2json_api_old.c
@@ -14,7 +14,7 @@ unsigned long rrdset_info2json_api_old(RRDSET *st, char *options, BUFFER *wb) {
"\t\t\t\"type\": \"%s\",\n"
"\t\t\t\"family\": \"%s\",\n"
"\t\t\t\"context\": \"%s\",\n"
- "\t\t\t\"title\": \"%s\",\n"
+ "\t\t\t\"title\": \"%s (%s)\",\n"
"\t\t\t\"priority\": %ld,\n"
"\t\t\t\"enabled\": %d,\n"
"\t\t\t\"units\": \"%s\",\n"
@@ -37,7 +37,7 @@ unsigned long rrdset_info2json_api_old(RRDSET *st, char *options, BUFFER *wb) {
, st->type
, st->family
, st->context
- , st->title
+ , st->title, st->name
, st->priority
, rrdset_flag_check(st, RRDSET_FLAG_ENABLED)?1:0
, st->units
diff --git a/src/rrdcalc.c b/src/rrdcalc.c
index 1f184540..bb90a4c6 100644
--- a/src/rrdcalc.c
+++ b/src/rrdcalc.c
@@ -219,7 +219,7 @@ inline int rrdcalc_exists(RRDHOST *host, const char *chart, const char *name, ui
for(rc = host->alarms; rc ; rc = rc->next) {
if (unlikely(rc->chart && rc->hash == hash_name && rc->hash_chart == hash_chart && !strcmp(name, rc->name) && !strcmp(chart, rc->chart))) {
debug(D_HEALTH, "Health alarm '%s.%s' already exists in host '%s'.", chart, name, host->hostname);
- error("Health alarm '%s.%s' already exists in host '%s'.", chart, name, host->hostname);
+ info("Health alarm '%s.%s' already exists in host '%s'.", chart, name, host->hostname);
return 1;
}
}
diff --git a/src/rrdcalctemplate.c b/src/rrdcalctemplate.c
index 2c5e2bd1..b5d2c7d6 100644
--- a/src/rrdcalctemplate.c
+++ b/src/rrdcalctemplate.c
@@ -12,7 +12,7 @@ void rrdcalctemplate_link_matching(RRDSET *st) {
&& (!rt->family_pattern || simple_pattern_matches(rt->family_pattern, st->family))) {
RRDCALC *rc = rrdcalc_create(st->rrdhost, rt, st->id);
if(unlikely(!rc))
- error("Health tried to create alarm from template '%s', but it failed", rt->name);
+ info("Health tried to create alarm from template '%s' on chart '%s' of host '%s', but it failed", rt->name, st->id, st->rrdhost->hostname);
#ifdef NETDATA_INTERNAL_CHECKS
else if(rc->rrdset != st)
diff --git a/src/rrddim.c b/src/rrddim.c
index 54a17522..e75aa3fd 100644
--- a/src/rrddim.c
+++ b/src/rrddim.c
@@ -35,9 +35,9 @@ inline RRDDIM *rrddim_find(RRDSET *st, const char *id) {
// ----------------------------------------------------------------------------
// RRDDIM rename a dimension
-inline void rrddim_set_name(RRDSET *st, RRDDIM *rd, const char *name) {
- if(unlikely(!strcmp(rd->name, name)))
- return;
+inline int rrddim_set_name(RRDSET *st, RRDDIM *rd, const char *name) {
+ if(unlikely(!name || !*name || !strcmp(rd->name, name)))
+ return 0;
debug(D_RRD_CALLS, "rrddim_set_name() from %s.%s to %s.%s", st->name, rd->name, st->name, name);
@@ -45,18 +45,57 @@ inline void rrddim_set_name(RRDSET *st, RRDDIM *rd, const char *name) {
snprintfz(varname, CONFIG_MAX_NAME, "dim %s name", rd->id);
rd->name = config_set_default(st->config_section, varname, name);
rd->hash_name = simple_hash(rd->name);
-
rrddimvar_rename_all(rd);
+ rd->exposed = 0;
+ return 1;
+}
+
+inline int rrddim_set_algorithm(RRDSET *st, RRDDIM *rd, RRD_ALGORITHM algorithm) {
+ if(unlikely(rd->algorithm == algorithm))
+ return 0;
+
+ debug(D_RRD_CALLS, "Updating algorithm of dimension '%s/%s' from %s to %s", st->id, rd->name, rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(algorithm));
+ rd->algorithm = algorithm;
+ rd->exposed = 0;
+ rrdset_flag_set(st, RRDSET_FLAG_HOMEGENEOUS_CHECK);
+ return 1;
+}
+
+inline int rrddim_set_multiplier(RRDSET *st, RRDDIM *rd, collected_number multiplier) {
+ if(unlikely(rd->multiplier == multiplier))
+ return 0;
+
+ debug(D_RRD_CALLS, "Updating multiplier of dimension '%s/%s' from " COLLECTED_NUMBER_FORMAT " to " COLLECTED_NUMBER_FORMAT, st->id, rd->name, rd->multiplier, multiplier);
+ rd->multiplier = multiplier;
+ rd->exposed = 0;
+ rrdset_flag_set(st, RRDSET_FLAG_HOMEGENEOUS_CHECK);
+ return 1;
}
+inline int rrddim_set_divisor(RRDSET *st, RRDDIM *rd, collected_number divisor) {
+ if(unlikely(rd->divisor == divisor))
+ return 0;
+
+ debug(D_RRD_CALLS, "Updating divisor of dimension '%s/%s' from " COLLECTED_NUMBER_FORMAT " to " COLLECTED_NUMBER_FORMAT, st->id, rd->name, rd->divisor, divisor);
+ rd->divisor = divisor;
+ rd->exposed = 0;
+ rrdset_flag_set(st, RRDSET_FLAG_HOMEGENEOUS_CHECK);
+ return 1;
+}
// ----------------------------------------------------------------------------
// RRDDIM create a dimension
-RRDDIM *rrddim_add(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm) {
+RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm, RRD_MEMORY_MODE memory_mode) {
RRDDIM *rd = rrddim_find(st, id);
if(unlikely(rd)) {
debug(D_RRD_CALLS, "Cannot create rrd dimension '%s/%s', it already exists.", st->id, name?name:"<NONAME>");
+
+ rrddim_set_name(st, rd, name);
+ rrddim_set_algorithm(st, rd, algorithm);
+ rrddim_set_multiplier(st, rd, multiplier);
+ rrddim_set_divisor(st, rd, divisor);
+
return rd;
}
@@ -71,8 +110,14 @@ RRDDIM *rrddim_add(RRDSET *st, const char *id, const char *name, collected_numbe
rrdset_strncpyz_name(filename, id, FILENAME_MAX);
snprintfz(fullfilename, FILENAME_MAX, "%s/%s.db", st->cache_dir, filename);
- if(st->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || st->rrd_memory_mode == RRD_MEMORY_MODE_MAP) {
- rd = (RRDDIM *)mymmap(fullfilename, size, ((st->rrd_memory_mode == RRD_MEMORY_MODE_MAP) ? MAP_SHARED : MAP_PRIVATE), 1);
+ if(memory_mode == RRD_MEMORY_MODE_SAVE || memory_mode == RRD_MEMORY_MODE_MAP || memory_mode == RRD_MEMORY_MODE_RAM) {
+ rd = (RRDDIM *)mymmap(
+ (memory_mode == RRD_MEMORY_MODE_RAM)?NULL:fullfilename
+ , size
+ , ((memory_mode == RRD_MEMORY_MODE_MAP) ? MAP_SHARED : MAP_PRIVATE)
+ , 1
+ );
+
if(likely(rd)) {
// we have a file mapped for rd
@@ -83,56 +128,64 @@ RRDDIM *rrddim_add(RRDSET *st, const char *id, const char *name, collected_numbe
rd->variables = NULL;
rd->next = NULL;
rd->rrdset = NULL;
+ rd->exposed = 0;
struct timeval now;
now_realtime_timeval(&now);
- if(strcmp(rd->magic, RRDDIMENSION_MAGIC) != 0) {
- errno = 0;
- info("Initializing file %s.", fullfilename);
+ if(memory_mode == RRD_MEMORY_MODE_RAM) {
memset(rd, 0, size);
}
- else if(rd->memsize != size) {
- errno = 0;
- error("File %s does not have the desired size. Clearing it.", fullfilename);
- memset(rd, 0, size);
- }
- else if(rd->multiplier != multiplier) {
- errno = 0;
- error("File %s does not have the same multiplier. Clearing it.", fullfilename);
- memset(rd, 0, size);
- }
- else if(rd->divisor != divisor) {
- errno = 0;
- error("File %s does not have the same divisor. Clearing it.", fullfilename);
- memset(rd, 0, size);
- }
- else if(rd->update_every != st->update_every) {
- errno = 0;
- error("File %s does not have the same refresh frequency. Clearing it.", fullfilename);
- memset(rd, 0, size);
- }
- else if(dt_usec(&now, &rd->last_collected_time) > (rd->entries * rd->update_every * USEC_PER_SEC)) {
- errno = 0;
- error("File %s is too old. Clearing it.", fullfilename);
- memset(rd, 0, size);
+ else {
+ int reset = 0;
+
+ if(strcmp(rd->magic, RRDDIMENSION_MAGIC) != 0) {
+ info("Initializing file %s.", fullfilename);
+ memset(rd, 0, size);
+ reset = 1;
+ }
+ else if(rd->memsize != size) {
+ error("File %s does not have the desired size, expected %lu but found %lu. Clearing it.", fullfilename, size, rd->memsize);
+ memset(rd, 0, size);
+ reset = 1;
+ }
+ else if(rd->update_every != st->update_every) {
+ error("File %s does not have the same update frequency, expected %d but found %d. Clearing it.", fullfilename, st->update_every, rd->update_every);
+ memset(rd, 0, size);
+ reset = 1;
+ }
+ else if(dt_usec(&now, &rd->last_collected_time) > (rd->entries * rd->update_every * USEC_PER_SEC)) {
+ error("File %s is too old (last collected %llu seconds ago, but the database is %ld seconds). Clearing it.", fullfilename, dt_usec(&now, &rd->last_collected_time) / USEC_PER_SEC, rd->entries * rd->update_every);
+ memset(rd, 0, size);
+ reset = 1;
+ }
+
+ if(!reset) {
+ if(rd->algorithm != algorithm) {
+ info("File %s does not have the expected algorithm (expected %u '%s', found %u '%s'). Previous values may be wrong.",
+ fullfilename, algorithm, rrd_algorithm_name(algorithm), rd->algorithm, rrd_algorithm_name(rd->algorithm));
+ }
+
+ if(rd->multiplier != multiplier) {
+ info("File %s does not have the expected multiplier (expected " COLLECTED_NUMBER_FORMAT ", found " COLLECTED_NUMBER_FORMAT ". Previous values may be wrong.", fullfilename, multiplier, rd->multiplier);
+ }
+
+ if(rd->divisor != divisor) {
+ info("File %s does not have the expected divisor (expected " COLLECTED_NUMBER_FORMAT ", found " COLLECTED_NUMBER_FORMAT ". Previous values may be wrong.", fullfilename, divisor, rd->divisor);
+ }
+ }
}
- if(rd->algorithm && rd->algorithm != algorithm)
- error("File %s does not have the expected algorithm (expected %u '%s', found %u '%s'). Previous values may be wrong."
- , fullfilename, algorithm, rrd_algorithm_name(algorithm), rd->algorithm,
- rrd_algorithm_name(rd->algorithm));
-
// make sure we have the right memory mode
// even if we cleared the memory
- rd->rrd_memory_mode = st->rrd_memory_mode;
+ rd->rrd_memory_mode = memory_mode;
}
}
if(unlikely(!rd)) {
// if we didn't manage to get a mmap'd dimension, just create one
rd = callocz(1, size);
- rd->rrd_memory_mode = (st->rrd_memory_mode == RRD_MEMORY_MODE_NONE) ? RRD_MEMORY_MODE_NONE : RRD_MEMORY_MODE_RAM;
+ rd->rrd_memory_mode = (memory_mode == RRD_MEMORY_MODE_NONE) ? RRD_MEMORY_MODE_NONE : RRD_MEMORY_MODE_ALLOC;
}
rd->memsize = size;
@@ -161,8 +214,11 @@ RRDDIM *rrddim_add(RRDSET *st, const char *id, const char *name, collected_numbe
rd->entries = st->entries;
rd->update_every = st->update_every;
- // prevent incremental calculation spikes
- rd->collections_counter = 0;
+ if(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST))
+ rd->collections_counter = 1;
+ else
+ rd->collections_counter = 0;
+
rd->updated = 0;
rd->flags = 0x00000000;
@@ -173,7 +229,7 @@ RRDDIM *rrddim_add(RRDSET *st, const char *id, const char *name, collected_numbe
rd->collected_volume = 0;
rd->stored_volume = 0;
rd->last_stored_value = 0;
- rd->values[st->current_entry] = pack_storage_number(0, SN_NOT_EXISTS);
+ rd->values[st->current_entry] = SN_EMPTY_SLOT; // pack_storage_number(0, SN_NOT_EXISTS);
rd->last_collected_time.tv_sec = 0;
rd->last_collected_time.tv_usec = 0;
rd->rrdset = st;
@@ -184,6 +240,23 @@ RRDDIM *rrddim_add(RRDSET *st, const char *id, const char *name, collected_numbe
st->dimensions = rd;
else {
RRDDIM *td = st->dimensions;
+
+ if(td->algorithm != rd->algorithm || abs(td->multiplier) != abs(rd->multiplier) || abs(td->divisor) != abs(rd->divisor)) {
+ if(!rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ info("Dimension '%s' added on chart '%s' of host '%s' is not homogeneous to other dimensions already present (algorithm is '%s' vs '%s', multiplier is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ", divisor is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ").",
+ rd->name,
+ st->name,
+ st->rrdhost->hostname,
+ rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(td->algorithm),
+ rd->multiplier, td->multiplier,
+ rd->divisor, td->divisor
+ );
+ #endif
+ rrdset_flag_set(st, RRDSET_FLAG_HETEROGENEOUS);
+ }
+ }
+
for(; td->next; td = td->next) ;
td->next = rd;
}
@@ -202,7 +275,6 @@ RRDDIM *rrddim_add(RRDSET *st, const char *id, const char *name, collected_numbe
return(rd);
}
-
// ----------------------------------------------------------------------------
// RRDDIM remove / free a dimension
@@ -233,19 +305,16 @@ void rrddim_free(RRDSET *st, RRDDIM *rd)
switch(rd->rrd_memory_mode) {
case RRD_MEMORY_MODE_SAVE:
- debug(D_RRD_CALLS, "Saving dimension '%s' to '%s'.", rd->name, rd->cache_filename);
- savememory(rd->cache_filename, rd, rd->memsize);
- // continue to map mode - no break;
-
case RRD_MEMORY_MODE_MAP:
+ case RRD_MEMORY_MODE_RAM:
debug(D_RRD_CALLS, "Unmapping dimension '%s'.", rd->name);
freez((void *)rd->id);
freez(rd->cache_filename);
munmap(rd, rd->memsize);
break;
+ case RRD_MEMORY_MODE_ALLOC:
case RRD_MEMORY_MODE_NONE:
- case RRD_MEMORY_MODE_RAM:
debug(D_RRD_CALLS, "Removing dimension '%s'.", rd->name);
freez((void *)rd->id);
freez(rd->cache_filename);
@@ -263,7 +332,7 @@ int rrddim_hide(RRDSET *st, const char *id) {
RRDDIM *rd = rrddim_find(st, id);
if(unlikely(!rd)) {
- error("Cannot find dimension with id '%s' on stats '%s' (%s).", id, st->name, st->id);
+ error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, st->name, st->id, st->rrdhost->hostname);
return 1;
}
@@ -276,7 +345,7 @@ int rrddim_unhide(RRDSET *st, const char *id) {
RRDDIM *rd = rrddim_find(st, id);
if(unlikely(!rd)) {
- error("Cannot find dimension with id '%s' on stats '%s' (%s).", id, st->name, st->id);
+ error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, st->name, st->id, st->rrdhost->hostname);
return 1;
}
@@ -305,7 +374,7 @@ inline collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_
collected_number rrddim_set(RRDSET *st, const char *id, collected_number value) {
RRDDIM *rd = rrddim_find(st, id);
if(unlikely(!rd)) {
- error("Cannot find dimension with id '%s' on stats '%s' (%s).", id, st->name, st->id);
+ error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, st->name, st->id, st->rrdhost->hostname);
return 0;
}
diff --git a/src/rrdhost.c b/src/rrdhost.c
index a2310330..ff8aa561 100644
--- a/src/rrdhost.c
+++ b/src/rrdhost.c
@@ -58,15 +58,32 @@ RRDHOST *rrdhost_find_by_hostname(const char *hostname, uint32_t hash) {
// ----------------------------------------------------------------------------
// RRDHOST - internal helpers
+static inline void rrdhost_init_tags(RRDHOST *host, const char *tags) {
+ if(host->tags && tags && !strcmp(host->tags, tags))
+ return;
+
+ void *old = (void *)host->tags;
+ host->tags = (tags && *tags)?strdupz(tags):NULL;
+ freez(old);
+}
+
static inline void rrdhost_init_hostname(RRDHOST *host, const char *hostname) {
- freez(host->hostname);
- host->hostname = strdupz(hostname);
+ if(host->hostname && hostname && !strcmp(host->hostname, hostname))
+ return;
+
+ void *old = host->hostname;
+ host->hostname = strdupz(hostname?hostname:"localhost");
host->hash_hostname = simple_hash(host->hostname);
+ freez(old);
}
static inline void rrdhost_init_os(RRDHOST *host, const char *os) {
- freez(host->os);
+ if(host->os && os && !strcmp(host->os, os))
+ return;
+
+ void *old = (void *)host->os;
host->os = strdupz(os?os:"unknown");
+ freez(old);
}
static inline void rrdhost_init_machine_guid(RRDHOST *host, const char *machine_guid) {
@@ -80,16 +97,18 @@ static inline void rrdhost_init_machine_guid(RRDHOST *host, const char *machine_
// RRDHOST - add a host
RRDHOST *rrdhost_create(const char *hostname,
- const char *guid,
- const char *os,
- int update_every,
- long entries,
- RRD_MEMORY_MODE memory_mode,
- int health_enabled,
- int rrdpush_enabled,
- char *rrdpush_destination,
- char *rrdpush_api_key,
- int is_localhost
+ const char *registry_hostname,
+ const char *guid,
+ const char *os,
+ const char *tags,
+ int update_every,
+ long entries,
+ RRD_MEMORY_MODE memory_mode,
+ int health_enabled,
+ int rrdpush_enabled,
+ char *rrdpush_destination,
+ char *rrdpush_api_key,
+ int is_localhost
) {
debug(D_RRDHOST, "Host '%s': adding with guid '%s'", hostname, guid);
@@ -115,6 +134,8 @@ RRDHOST *rrdhost_create(const char *hostname,
rrdhost_init_hostname(host, hostname);
rrdhost_init_machine_guid(host, guid);
rrdhost_init_os(host, os);
+ rrdhost_init_tags(host, tags);
+ host->registry_hostname = strdupz((registry_hostname && *registry_hostname)?registry_hostname:hostname);
avl_init_lock(&(host->rrdset_root_index), rrdset_compare);
avl_init_lock(&(host->rrdset_root_index_name), rrdset_compare_name);
@@ -122,10 +143,10 @@ RRDHOST *rrdhost_create(const char *hostname,
avl_init_lock(&(host->variables_root_index), rrdvar_compare);
if(config_get_boolean(CONFIG_SECTION_GLOBAL, "delete obsolete charts files", 1))
- rrdhost_flag_set(host, RRDHOST_DELETE_OBSOLETE_FILES);
+ rrdhost_flag_set(host, RRDHOST_DELETE_OBSOLETE_CHARTS);
if(config_get_boolean(CONFIG_SECTION_GLOBAL, "delete orphan hosts files", 1) && !is_localhost)
- rrdhost_flag_set(host, RRDHOST_DELETE_ORPHAN_FILES);
+ rrdhost_flag_set(host, RRDHOST_DELETE_ORPHAN_HOST);
// ------------------------------------------------------------------------
@@ -229,8 +250,9 @@ RRDHOST *rrdhost_create(const char *hostname,
host = NULL;
}
else {
- info("Host '%s' with guid '%s' initialized"
+ info("Host '%s' (at registry as '%s') with guid '%s' initialized"
", os %s"
+ ", tags '%s'"
", update every %d"
", memory mode %s"
", history entries %ld"
@@ -243,8 +265,10 @@ RRDHOST *rrdhost_create(const char *hostname,
", alarms default handler '%s'"
", alarms default recipient '%s'"
, host->hostname
+ , host->registry_hostname
, host->machine_guid
, host->os
+ , (host->tags)?host->tags:""
, host->rrd_update_every
, rrd_memory_mode_name(host->rrd_memory_mode)
, host->rrd_history_entries
@@ -267,8 +291,10 @@ RRDHOST *rrdhost_create(const char *hostname,
RRDHOST *rrdhost_find_or_create(
const char *hostname
+ , const char *registry_hostname
, const char *guid
, const char *os
+ , const char *tags
, int update_every
, long history
, RRD_MEMORY_MODE mode
@@ -284,8 +310,10 @@ RRDHOST *rrdhost_find_or_create(
if(!host) {
host = rrdhost_create(
hostname
+ , registry_hostname
, guid
, os
+ , tags
, update_every
, history
, mode
@@ -307,22 +335,25 @@ RRDHOST *rrdhost_find_or_create(
}
if(host->rrd_update_every != update_every)
- error("Host '%s' has an update frequency of %d seconds, but the wanted one is %d seconds.", host->hostname, host->rrd_update_every, update_every);
+ error("Host '%s' has an update frequency of %d seconds, but the wanted one is %d seconds. Restart netdata here to apply the new settings.", host->hostname, host->rrd_update_every, update_every);
- if(host->rrd_history_entries != history)
- error("Host '%s' has history of %ld entries, but the wanted one is %ld entries.", host->hostname, host->rrd_history_entries, history);
+ if(host->rrd_history_entries < history)
+ error("Host '%s' has history of %ld entries, but the wanted one is %ld entries. Restart netdata here to apply the new settings.", host->hostname, host->rrd_history_entries, history);
if(host->rrd_memory_mode != mode)
- error("Host '%s' has memory mode '%s', but the wanted one is '%s'.", host->hostname, rrd_memory_mode_name(host->rrd_memory_mode), rrd_memory_mode_name(mode));
+ error("Host '%s' has memory mode '%s', but the wanted one is '%s'. Restart netdata here to apply the new settings.", host->hostname, rrd_memory_mode_name(host->rrd_memory_mode), rrd_memory_mode_name(mode));
+
+ // update host tags
+ rrdhost_init_tags(host, tags);
}
rrd_unlock();
- rrdhost_cleanup_orphan(host);
+ rrdhost_cleanup_orphan_hosts(host);
return host;
}
-static inline int rrdhost_should_be_deleted(RRDHOST *host, RRDHOST *protected, time_t now) {
+static inline int rrdhost_should_be_removed(RRDHOST *host, RRDHOST *protected, time_t now) {
if(host != protected
&& host != localhost
&& !host->connected_senders
@@ -333,7 +364,7 @@ static inline int rrdhost_should_be_deleted(RRDHOST *host, RRDHOST *protected, t
return 0;
}
-void rrdhost_cleanup_orphan(RRDHOST *protected) {
+void rrdhost_cleanup_orphan_hosts(RRDHOST *protected) {
time_t now = now_realtime_sec();
rrd_wrlock();
@@ -342,10 +373,10 @@ void rrdhost_cleanup_orphan(RRDHOST *protected) {
restart_after_removal:
rrdhost_foreach_write(host) {
- if(rrdhost_should_be_deleted(host, protected, now)) {
+ if(rrdhost_should_be_removed(host, protected, now)) {
info("Host '%s' with machine guid '%s' is obsolete - cleaning up.", host->hostname, host->machine_guid);
- if(rrdset_flag_check(host, RRDHOST_ORPHAN))
+ if(rrdset_flag_check(host, RRDHOST_DELETE_ORPHAN_HOST) && rrdset_flag_check(host, RRDHOST_ORPHAN))
rrdhost_delete(host);
else
rrdhost_save(host);
@@ -372,8 +403,10 @@ void rrd_init(char *hostname) {
rrd_wrlock();
localhost = rrdhost_create(
hostname
+ , registry_get_this_machine_hostname()
, registry_get_this_machine_guid()
, os_type
+ , config_get(CONFIG_SECTION_BACKEND, "host tags", "")
, default_rrd_update_every
, default_rrd_history_entries
, default_rrd_memory_mode
@@ -473,7 +506,8 @@ void rrdhost_free(RRDHOST *host) {
// ------------------------------------------------------------------------
// free it
- freez(host->os);
+ freez((void *)host->tags);
+ freez((void *)host->os);
freez(host->cache_dir);
freez(host->varlib_dir);
freez(host->rrdpush_api_key);
@@ -482,6 +516,7 @@ void rrdhost_free(RRDHOST *host) {
freez(host->health_default_recipient);
freez(host->health_log_filename);
freez(host->hostname);
+ freez(host->registry_hostname);
rrdhost_unlock(host);
netdata_rwlock_destroy(&host->health_log.alarm_log_rwlock);
netdata_rwlock_destroy(&host->rrdhost_rwlock);
@@ -497,12 +532,12 @@ void rrdhost_free_all(void) {
}
// ----------------------------------------------------------------------------
-// RRDHOST - save
+// RRDHOST - save host files
void rrdhost_save(RRDHOST *host) {
if(!host) return;
- info("Saving database of host '%s'...", host->hostname);
+ info("Saving/Closing database of host '%s'...", host->hostname);
RRDSET *st;
@@ -520,7 +555,7 @@ void rrdhost_save(RRDHOST *host) {
}
// ----------------------------------------------------------------------------
-// RRDHOST - delete files
+// RRDHOST - delete host files
void rrdhost_delete(RRDHOST *host) {
if(!host) return;
@@ -539,9 +574,43 @@ void rrdhost_delete(RRDHOST *host) {
rrdset_unlock(st);
}
+ recursively_delete_dir(host->cache_dir, "left over host");
+
rrdhost_unlock(host);
}
+// ----------------------------------------------------------------------------
+// RRDHOST - cleanup host files
+
+void rrdhost_cleanup(RRDHOST *host) {
+ if(!host) return;
+
+ info("Cleaning up database of host '%s'...", host->hostname);
+
+ RRDSET *st;
+
+ // we get a write lock
+ // to ensure only one thread is saving the database
+ rrdhost_wrlock(host);
+
+ rrdset_foreach_write(st, host) {
+ rrdset_rdlock(st);
+
+ if(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && rrdhost_flag_check(host, RRDHOST_DELETE_OBSOLETE_CHARTS))
+ rrdset_delete(st);
+ else
+ rrdset_save(st);
+
+ rrdset_unlock(st);
+ }
+
+ rrdhost_unlock(host);
+}
+
+
+// ----------------------------------------------------------------------------
+// RRDHOST - save all hosts to disk
+
void rrdhost_save_all(void) {
info("Saving database [%zu hosts(s)]...", rrd_hosts_available);
@@ -554,7 +623,30 @@ void rrdhost_save_all(void) {
rrd_unlock();
}
-void rrdhost_cleanup_obsolete(RRDHOST *host) {
+// ----------------------------------------------------------------------------
+// RRDHOST - save or delete all hosts from disk
+
+void rrdhost_cleanup_all(void) {
+ info("Cleaning up database [%zu hosts(s)]...", rrd_hosts_available);
+
+ rrd_rdlock();
+
+ RRDHOST *host;
+ rrdhost_foreach_read(host) {
+ if(host != localhost && rrdhost_flag_check(host, RRDHOST_DELETE_OBSOLETE_CHARTS) && !host->connected_senders)
+ rrdhost_delete(host);
+ else
+ rrdhost_cleanup(host);
+ }
+
+ rrd_unlock();
+}
+
+
+// ----------------------------------------------------------------------------
+// RRDHOST - save or delete all the host charts from disk
+
+void rrdhost_cleanup_obsolete_charts(RRDHOST *host) {
time_t now = now_realtime_sec();
RRDSET *st;
@@ -569,7 +661,7 @@ restart_after_removal:
rrdset_rdlock(st);
- if(rrdhost_flag_check(host, RRDHOST_DELETE_OBSOLETE_FILES))
+ if(rrdhost_flag_check(host, RRDHOST_DELETE_OBSOLETE_CHARTS))
rrdset_delete(st);
else
rrdset_save(st);
diff --git a/src/rrdpush.c b/src/rrdpush.c
index 72e6d8a7..6def90fe 100644
--- a/src/rrdpush.c
+++ b/src/rrdpush.c
@@ -57,13 +57,16 @@ int rrdpush_init() {
// to its current clock, we send for this many
// iterations a BEGIN line without microseconds
// this is for the first iterations of each chart
-static unsigned int remote_clock_resync_iterations = 60;
+unsigned int remote_clock_resync_iterations = 60;
#define rrdpush_lock(host) netdata_mutex_lock(&((host)->rrdpush_mutex))
#define rrdpush_unlock(host) netdata_mutex_unlock(&((host)->rrdpush_mutex))
// checks if the current chart definition has been sent
static inline int need_to_send_chart_definition(RRDSET *st) {
+ if(unlikely(!(rrdset_flag_check(st, RRDSET_FLAG_EXPOSED_UPSTREAM))))
+ return 1;
+
RRDDIM *rd;
rrddim_foreach_read(rd, st)
if(!rd->exposed)
@@ -74,7 +77,9 @@ static inline int need_to_send_chart_definition(RRDSET *st) {
// sends the current chart definition
static inline void send_chart_definition(RRDSET *st) {
- buffer_sprintf(st->rrdhost->rrdpush_buffer, "CHART '%s' '%s' '%s' '%s' '%s' '%s' '%s' %ld %d\n"
+ rrdset_flag_set(st, RRDSET_FLAG_EXPOSED_UPSTREAM);
+
+ buffer_sprintf(st->rrdhost->rrdpush_buffer, "CHART \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" \"%s\" %ld %d \"%s %s %s\"\n"
, st->id
, st->name
, st->title
@@ -84,11 +89,14 @@ static inline void send_chart_definition(RRDSET *st) {
, rrdset_type_name(st->chart_type)
, st->priority
, st->update_every
+ , rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)?"obsolete":""
+ , rrdset_flag_check(st, RRDSET_FLAG_DETAIL)?"detail":""
+ , rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST)?"store_first":""
);
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
- buffer_sprintf(st->rrdhost->rrdpush_buffer, "DIMENSION '%s' '%s' '%s' " COLLECTED_NUMBER_FORMAT " " COLLECTED_NUMBER_FORMAT " '%s %s'\n"
+ buffer_sprintf(st->rrdhost->rrdpush_buffer, "DIMENSION \"%s\" \"%s\" \"%s\" " COLLECTED_NUMBER_FORMAT " " COLLECTED_NUMBER_FORMAT " \"%s %s\"\n"
, rd->id
, rd->name
, rrd_algorithm_name(rd->algorithm)
@@ -99,11 +107,13 @@ static inline void send_chart_definition(RRDSET *st) {
);
rd->exposed = 1;
}
+
+ st->upstream_resync_time = st->last_collected_time.tv_sec + (remote_clock_resync_iterations * st->update_every);
}
// sends the current chart dimensions
static inline void send_chart_metrics(RRDSET *st) {
- buffer_sprintf(st->rrdhost->rrdpush_buffer, "BEGIN %s %llu\n", st->id, (st->counter_done > remote_clock_resync_iterations)?st->usec_since_last_update:0);
+ buffer_sprintf(st->rrdhost->rrdpush_buffer, "BEGIN %s %llu\n", st->id, (st->upstream_resync_time > st->last_collected_time.tv_sec)?st->usec_since_last_update:0);
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
@@ -117,7 +127,17 @@ static inline void send_chart_metrics(RRDSET *st) {
buffer_strcat(st->rrdhost->rrdpush_buffer, "END\n");
}
-void rrdpush_sender_thread_spawn(RRDHOST *host);
+static void rrdpush_sender_thread_spawn(RRDHOST *host);
+
+void rrdset_push_chart_definition(RRDSET *st) {
+ RRDHOST *host = st->rrdhost;
+
+ rrdset_rdlock(st);
+ rrdpush_lock(host);
+ send_chart_definition(st);
+ rrdpush_unlock(host);
+ rrdset_unlock(st);
+}
void rrdset_done_push(RRDSET *st) {
RRDHOST *host = st->rrdhost;
@@ -167,9 +187,7 @@ static void rrdpush_sender_thread_reset_all_charts(RRDHOST *host) {
RRDSET *st;
rrdset_foreach_read(st, host) {
- // make it re-align the current time
- // on the remote host
- st->counter_done = 0;
+ st->upstream_resync_time = 0;
rrdset_rdlock(st);
@@ -219,8 +237,6 @@ static void rrdpush_sender_thread_cleanup_locked_all(RRDHOST *host) {
host->rrdpush_buffer = NULL;
host->rrdpush_spawn = 0;
-
- rrdhost_flag_set(host, RRDHOST_ORPHAN);
}
void rrdpush_sender_thread_stop(RRDHOST *host) {
@@ -274,6 +290,7 @@ void *rrdpush_sender_thread(void *ptr) {
.tv_usec = 0
};
+ time_t last_sent_t = 0;
struct pollfd fds[2], *ifd, *ofd;
nfds_t fdmax;
@@ -281,8 +298,16 @@ void *rrdpush_sender_thread(void *ptr) {
ofd = &fds[1];
for(; host->rrdpush_enabled && !netdata_exit ;) {
+ debug(D_STREAM, "STREAM: Checking if we need to timeout the connection...");
+ if(host->rrdpush_socket != -1 && now_monotonic_sec() - last_sent_t > timeout) {
+ error("STREAM %s [send to %s]: could not send metrics for %d seconds - closing connection - we have sent %zu bytes on this connection.", host->hostname, connected_to, timeout, sent_connection);
+ close(host->rrdpush_socket);
+ host->rrdpush_socket = -1;
+ }
if(unlikely(host->rrdpush_socket == -1)) {
+ debug(D_STREAM, "STREAM: Attempting to connect...");
+
// stop appending data into rrdpush_buffer
// they will be lost, so there is no point to do it
host->rrdpush_connected = 0;
@@ -298,16 +323,19 @@ void *rrdpush_sender_thread(void *ptr) {
info("STREAM %s [send to %s]: initializing communication...", host->hostname, connected_to);
- char http[1000 + 1];
- snprintfz(http, 1000,
- "STREAM key=%s&hostname=%s&machine_guid=%s&os=%s&update_every=%d HTTP/1.1\r\n"
+ #define HTTP_HEADER_SIZE 8192
+ char http[HTTP_HEADER_SIZE + 1];
+ snprintfz(http, HTTP_HEADER_SIZE,
+ "STREAM key=%s&hostname=%s&registry_hostname=%s&machine_guid=%s&update_every=%d&os=%s&tags=%s HTTP/1.1\r\n"
"User-Agent: netdata-push-service/%s\r\n"
"Accept: */*\r\n\r\n"
, host->rrdpush_api_key
, host->hostname
+ , host->registry_hostname
, host->machine_guid
- , host->os
, default_rrd_update_every
+ , host->os
+ , (host->tags)?host->tags:""
, program_version
);
@@ -321,7 +349,7 @@ void *rrdpush_sender_thread(void *ptr) {
info("STREAM %s [send to %s]: waiting response from remote netdata...", host->hostname, connected_to);
- if(recv_timeout(host->rrdpush_socket, http, 1000, 0, timeout) == -1) {
+ if(recv_timeout(host->rrdpush_socket, http, HTTP_HEADER_SIZE, 0, timeout) == -1) {
close(host->rrdpush_socket);
host->rrdpush_socket = -1;
error("STREAM %s [send to %s]: failed to initialize communication", host->hostname, connected_to);
@@ -338,15 +366,21 @@ void *rrdpush_sender_thread(void *ptr) {
}
info("STREAM %s [send to %s]: established communication - sending metrics...", host->hostname, connected_to);
+ last_sent_t = now_monotonic_sec();
- if(fcntl(host->rrdpush_socket, F_SETFL, O_NONBLOCK) < 0)
+ if(sock_setnonblock(host->rrdpush_socket) < 0)
error("STREAM %s [send to %s]: cannot set non-blocking mode for socket.", host->hostname, connected_to);
+ if(sock_enlarge_out(host->rrdpush_socket) < 0)
+ error("STREAM %s [send to %s]: cannot enlarge the socket buffer.", host->hostname, connected_to);
+
rrdpush_sender_thread_data_flush(host);
sent_connection = 0;
// allow appending data into rrdpush_buffer
host->rrdpush_connected = 1;
+
+ debug(D_STREAM, "Connected...");
}
ifd->fd = host->rrdpush_pipe[PIPE_READ];
@@ -356,82 +390,113 @@ void *rrdpush_sender_thread(void *ptr) {
ofd->fd = host->rrdpush_socket;
ofd->revents = 0;
if(begin < buffer_strlen(host->rrdpush_buffer)) {
+ debug(D_STREAM, "STREAM: Requesting data output on streaming socket...");
ofd->events = POLLOUT;
fdmax = 2;
}
else {
+ debug(D_STREAM, "STREAM: Not requesting data output on streaming socket (nothing to send now)...");
ofd->events = 0;
fdmax = 1;
}
+ debug(D_STREAM, "STREAM: Waiting for poll() events (current buffer length %zu bytes)...", buffer_strlen(host->rrdpush_buffer));
if(netdata_exit) break;
- int retval = poll(fds, fdmax, timeout * 1000);
+ int retval = poll(fds, fdmax, 1000);
if(netdata_exit) break;
if(unlikely(retval == -1)) {
- if(errno == EAGAIN || errno == EINTR)
+ debug(D_STREAM, "STREAM: poll() failed (current buffer length %zu bytes)...", buffer_strlen(host->rrdpush_buffer));
+
+ if(errno == EAGAIN || errno == EINTR) {
+ debug(D_STREAM, "STREAM: poll() failed with EAGAIN or EINTR...");
continue;
+ }
error("STREAM %s [send to %s]: failed to poll().", host->hostname, connected_to);
close(host->rrdpush_socket);
host->rrdpush_socket = -1;
break;
}
- else if(unlikely(!retval)) {
- // timeout
- continue;
- }
-
- if(ifd->revents & POLLIN) {
- char buffer[1000 + 1];
- if(read(host->rrdpush_pipe[PIPE_READ], buffer, 1000) == -1)
- error("STREAM %s [send to %s]: cannot read from internal pipe.", host->hostname, connected_to);
- }
-
- if(ofd->revents & POLLOUT && begin < buffer_strlen(host->rrdpush_buffer)) {
+ else if(likely(retval)) {
+ if (ifd->revents & POLLIN) {
+ debug(D_STREAM, "STREAM: Data added to send buffer (current buffer length %zu bytes)...", buffer_strlen(host->rrdpush_buffer));
- // BEGIN RRDPUSH LOCKED SESSION
-
- // during this session, data collectors
- // will not be able to append data to our buffer
- // but the socket is in non-blocking mode
- // so, we will not block at send()
-
- if(pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL) != 0)
- error("STREAM %s [send]: cannot set pthread cancel state to DISABLE.", host->hostname);
-
- rrdpush_lock(host);
+ char buffer[1000 + 1];
+ if (read(host->rrdpush_pipe[PIPE_READ], buffer, 1000) == -1)
+ error("STREAM %s [send to %s]: cannot read from internal pipe.", host->hostname, connected_to);
+ }
- ssize_t ret = send(host->rrdpush_socket, &host->rrdpush_buffer->buffer[begin], buffer_strlen(host->rrdpush_buffer) - begin, MSG_DONTWAIT);
- if(ret == -1) {
- if(errno != EAGAIN && errno != EINTR) {
- error("STREAM %s [send to %s]: failed to send metrics - closing connection - we have sent %zu bytes on this connection.", host->hostname, connected_to, sent_connection);
+ if (ofd->revents & POLLOUT && begin < buffer_strlen(host->rrdpush_buffer)) {
+ debug(D_STREAM, "STREAM: Sending data (current buffer length %zu bytes)...", buffer_strlen(host->rrdpush_buffer));
+
+ // BEGIN RRDPUSH LOCKED SESSION
+
+ // during this session, data collectors
+ // will not be able to append data to our buffer
+ // but the socket is in non-blocking mode
+ // so, we will not block at send()
+
+ if (pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL) != 0)
+ error("STREAM %s [send]: cannot set pthread cancel state to DISABLE.", host->hostname);
+
+ debug(D_STREAM, "STREAM: Getting exclusive lock on host...");
+ rrdpush_lock(host);
+
+ debug(D_STREAM, "STREAM: Sending data, starting from %zu, size %zu...", begin, buffer_strlen(host->rrdpush_buffer));
+ ssize_t ret = send(host->rrdpush_socket, &host->rrdpush_buffer->buffer[begin], buffer_strlen(host->rrdpush_buffer) - begin, MSG_DONTWAIT);
+ if (unlikely(ret == -1)) {
+ if (errno != EAGAIN && errno != EINTR && errno != EWOULDBLOCK) {
+ debug(D_STREAM, "STREAM: Send failed - closing socket...");
+ error("STREAM %s [send to %s]: failed to send metrics - closing connection - we have sent %zu bytes on this connection.", host->hostname, connected_to, sent_connection);
+ close(host->rrdpush_socket);
+ host->rrdpush_socket = -1;
+ }
+ else {
+ debug(D_STREAM, "STREAM: Send failed - will retry...");
+ }
+ }
+ else if(likely(ret > 0)) {
+ sent_connection += ret;
+ sent_bytes += ret;
+ begin += ret;
+
+ if (begin == buffer_strlen(host->rrdpush_buffer)) {
+ // we send it all
+
+ debug(D_STREAM, "STREAM: Sent %zd bytes (the whole buffer)...", ret);
+ buffer_flush(host->rrdpush_buffer);
+ begin = 0;
+ }
+ else {
+ debug(D_STREAM, "STREAM: Sent %zd bytes (part of the data buffer)...", ret);
+ }
+
+ last_sent_t = now_monotonic_sec();
+ }
+ else {
+ debug(D_STREAM, "STREAM: send() returned %zd - closing the socket...", ret);
+ error("STREAM %s [send to %s]: failed to send metrics (send() returned %zd) - closing connection - we have sent %zu bytes on this connection.", host->hostname, connected_to, ret, sent_connection);
close(host->rrdpush_socket);
host->rrdpush_socket = -1;
}
- }
- else {
- sent_connection += ret;
- sent_bytes += ret;
- begin += ret;
- if(begin == buffer_strlen(host->rrdpush_buffer)) {
- // we send it all
-
- buffer_flush(host->rrdpush_buffer);
- begin = 0;
- }
- }
- rrdpush_unlock(host);
+ debug(D_STREAM, "STREAM: Releasing exclusive lock on host...");
+ rrdpush_unlock(host);
- if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
- error("STREAM %s [send]: cannot set pthread cancel state to ENABLE.", host->hostname);
+ if (pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
+ error("STREAM %s [send]: cannot set pthread cancel state to ENABLE.", host->hostname);
- // END RRDPUSH LOCKED SESSION
+ // END RRDPUSH LOCKED SESSION
+ }
+ }
+ else {
+ debug(D_STREAM, "STREAM: poll() timed out.");
}
// protection from overflow
- if(host->rrdpush_buffer->len > max_size) {
+ if(buffer_strlen(host->rrdpush_buffer) > max_size) {
+ debug(D_STREAM, "STREAM: Buffer is too big (%zu bytes), bigger than the max (%zu) - flushing it...", buffer_strlen(host->rrdpush_buffer), max_size);
errno = 0;
error("STREAM %s [send to %s]: too many data pending - buffer is %zu bytes long, %zu unsent - we have sent %zu bytes in total, %zu on this connection. Closing connection to flush the data.", host->hostname, connected_to, host->rrdpush_buffer->len, host->rrdpush_buffer->len - begin, sent_bytes, sent_connection);
if(host->rrdpush_socket != -1) {
@@ -464,7 +529,7 @@ cleanup:
// ----------------------------------------------------------------------------
// rrdpush receiver thread
-int rrdpush_receive(int fd, const char *key, const char *hostname, const char *machine_guid, const char *os, int update_every, char *client_ip, char *client_port) {
+static int rrdpush_receive(int fd, const char *key, const char *hostname, const char *registry_hostname, const char *machine_guid, const char *os, const char *tags, int update_every, char *client_ip, char *client_port) {
RRDHOST *host;
int history = default_rrd_history_entries;
RRD_MEMORY_MODE mode = default_rrd_memory_mode;
@@ -499,13 +564,18 @@ int rrdpush_receive(int fd, const char *key, const char *hostname, const char *m
rrdpush_api_key = appconfig_get(&stream_config, key, "default proxy api key", rrdpush_api_key);
rrdpush_api_key = appconfig_get(&stream_config, machine_guid, "proxy api key", rrdpush_api_key);
+ tags = appconfig_set_default(&stream_config, machine_guid, "host tags", (tags)?tags:"");
+ if(tags && !*tags) tags = NULL;
+
if(!strcmp(machine_guid, "localhost"))
host = localhost;
else
host = rrdhost_find_or_create(
hostname
+ , registry_hostname
, machine_guid
, os
+ , tags
, update_every
, history
, mode
@@ -522,7 +592,7 @@ int rrdpush_receive(int fd, const char *key, const char *hostname, const char *m
}
#ifdef NETDATA_INTERNAL_CHECKS
- info("STREAM %s [receive from [%s]:%s]: client willing to stream metrics for host '%s' with machine_guid '%s': update every = %d, history = %ld, memory mode = %s, health %s"
+ info("STREAM %s [receive from [%s]:%s]: client willing to stream metrics for host '%s' with machine_guid '%s': update every = %d, history = %ld, memory mode = %s, health %s, tags '%s'"
, hostname
, client_ip
, client_port
@@ -532,6 +602,7 @@ int rrdpush_receive(int fd, const char *key, const char *hostname, const char *m
, host->rrd_history_entries
, rrd_memory_mode_name(host->rrd_memory_mode)
, (health_enabled == CONFIG_BOOLEAN_NO)?"disabled":((health_enabled == CONFIG_BOOLEAN_YES)?"enabled":"auto")
+ , host->tags
);
#endif // NETDATA_INTERNAL_CHECKS
@@ -560,7 +631,7 @@ int rrdpush_receive(int fd, const char *key, const char *hostname, const char *m
}
// remove the non-blocking flag from the socket
- if(fcntl(fd, F_SETFL, fcntl(fd, F_GETFL, 0) & ~O_NONBLOCK) == -1)
+ if(sock_delnonblock(fd) < 0)
error("STREAM %s [receive from [%s]:%s]: cannot remove the non-blocking flag from socket %d", host->hostname, client_ip, client_port, fd);
// convert the socket to a FILE *
@@ -572,7 +643,11 @@ int rrdpush_receive(int fd, const char *key, const char *hostname, const char *m
}
rrdhost_wrlock(host);
+ if(host->connected_senders > 0)
+ info("STREAM %s [receive from [%s]:%s]: multiple streaming connections for the same host detected. If multiple netdata are pushing metrics for the same charts, at the same time, the result is unexpected.", host->hostname, client_ip, client_port);
+
host->connected_senders++;
+ rrdhost_flag_clear(host, RRDHOST_ORPHAN);
if(health_enabled != CONFIG_BOOLEAN_NO)
host->health_delay_up_to = now_realtime_sec() + alarms_delay;
rrdhost_unlock(host);
@@ -586,6 +661,7 @@ int rrdpush_receive(int fd, const char *key, const char *hostname, const char *m
host->senders_disconnected_time = now_realtime_sec();
host->connected_senders--;
if(!host->connected_senders) {
+ rrdhost_flag_set(host, RRDHOST_ORPHAN);
if(health_enabled == CONFIG_BOOLEAN_AUTO)
host->health_enabled = 0;
}
@@ -603,14 +679,16 @@ struct rrdpush_thread {
int fd;
char *key;
char *hostname;
+ char *registry_hostname;
char *machine_guid;
char *os;
+ char *tags;
char *client_ip;
char *client_port;
int update_every;
};
-void *rrdpush_receiver_thread(void *ptr) {
+static void *rrdpush_receiver_thread(void *ptr) {
struct rrdpush_thread *rpt = (struct rrdpush_thread *)ptr;
if (pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0)
@@ -621,13 +699,15 @@ void *rrdpush_receiver_thread(void *ptr) {
info("STREAM %s [%s]:%s: receive thread created (task id %d)", rpt->hostname, rpt->client_ip, rpt->client_port, gettid());
- rrdpush_receive(rpt->fd, rpt->key, rpt->hostname, rpt->machine_guid, rpt->os, rpt->update_every, rpt->client_ip, rpt->client_port);
+ rrdpush_receive(rpt->fd, rpt->key, rpt->hostname, rpt->registry_hostname, rpt->machine_guid, rpt->os, rpt->tags, rpt->update_every, rpt->client_ip, rpt->client_port);
info("STREAM %s [receive from [%s]:%s]: receive thread ended (task id %d)", rpt->hostname, rpt->client_ip, rpt->client_port, gettid());
freez(rpt->key);
freez(rpt->hostname);
+ freez(rpt->registry_hostname);
freez(rpt->machine_guid);
freez(rpt->os);
+ freez(rpt->tags);
freez(rpt->client_ip);
freez(rpt->client_port);
freez(rpt);
@@ -636,7 +716,7 @@ void *rrdpush_receiver_thread(void *ptr) {
return NULL;
}
-void rrdpush_sender_thread_spawn(RRDHOST *host) {
+static void rrdpush_sender_thread_spawn(RRDHOST *host) {
rrdhost_wrlock(host);
if(!host->rrdpush_spawn) {
@@ -646,7 +726,6 @@ void rrdpush_sender_thread_spawn(RRDHOST *host) {
else if(pthread_detach(host->rrdpush_thread))
error("STREAM %s [send]: cannot request detach newly created thread.", host->hostname);
- rrdhost_flag_clear(host, RRDHOST_ORPHAN);
host->rrdpush_spawn = 1;
}
@@ -658,7 +737,7 @@ int rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url
info("STREAM [receive from [%s]:%s]: new client connection.", w->client_ip, w->client_port);
- char *key = NULL, *hostname = NULL, *machine_guid = NULL, *os = "unknown";
+ char *key = NULL, *hostname = NULL, *registry_hostname = NULL, *machine_guid = NULL, *os = "unknown", *tags = NULL;
int update_every = default_rrd_update_every;
char buf[GUID_LEN + 1];
@@ -674,12 +753,18 @@ int rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url
key = value;
else if(!strcmp(name, "hostname"))
hostname = value;
+ else if(!strcmp(name, "registry_hostname"))
+ registry_hostname = value;
else if(!strcmp(name, "machine_guid"))
machine_guid = value;
else if(!strcmp(name, "update_every"))
update_every = (int)strtoul(value, NULL, 0);
else if(!strcmp(name, "os"))
os = value;
+ else if(!strcmp(name, "tags"))
+ tags = value;
+ else
+ info("STREAM [receive from [%s]:%s]: request has parameter '%s' = '%s', which is not used.", w->client_ip, w->client_port, key, value);
}
if(!key || !*key) {
@@ -704,21 +789,21 @@ int rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url
}
if(regenerate_guid(key, buf) == -1) {
- error("STREAM [receive from [%s]:%s]: API key '%s' is not valid GUID. Forbidding access.", w->client_ip, w->client_port, key);
+ error("STREAM [receive from [%s]:%s]: API key '%s' is not valid GUID (use the command uuidgen to generate one). Forbidding access.", w->client_ip, w->client_port, key);
buffer_flush(w->response.data);
buffer_sprintf(w->response.data, "Your API key is invalid.");
return 401;
}
if(regenerate_guid(machine_guid, buf) == -1) {
- error("STREAM [receive from [%s]:%s]: machine GUID '%s' is not GUID. Forbidding access.", w->client_ip, w->client_port, key);
+ error("STREAM [receive from [%s]:%s]: machine GUID '%s' is not GUID. Forbidding access.", w->client_ip, w->client_port, machine_guid);
buffer_flush(w->response.data);
buffer_sprintf(w->response.data, "Your machine GUID is invalid.");
return 404;
}
if(!appconfig_get_boolean(&stream_config, key, "enabled", 0)) {
- error("STREAM [receive from [%s]:%s]: API key '%s' is not allowed. Forbidding access.", w->client_ip, w->client_port, machine_guid);
+ error("STREAM [receive from [%s]:%s]: API key '%s' is not allowed. Forbidding access.", w->client_ip, w->client_port, key);
buffer_flush(w->response.data);
buffer_sprintf(w->response.data, "Your API key is not permitted access.");
return 401;
@@ -732,14 +817,16 @@ int rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url
}
struct rrdpush_thread *rpt = mallocz(sizeof(struct rrdpush_thread));
- rpt->fd = w->ifd;
- rpt->key = strdupz(key);
- rpt->hostname = strdupz(hostname);
- rpt->machine_guid = strdupz(machine_guid);
- rpt->os = strdupz(os);
- rpt->client_ip = strdupz(w->client_ip);
- rpt->client_port = strdupz(w->client_port);
- rpt->update_every = update_every;
+ rpt->fd = w->ifd;
+ rpt->key = strdupz(key);
+ rpt->hostname = strdupz(hostname);
+ rpt->registry_hostname = strdupz((registry_hostname && *registry_hostname)?registry_hostname:hostname);
+ rpt->machine_guid = strdupz(machine_guid);
+ rpt->os = strdupz(os);
+ rpt->tags = (tags)?strdupz(tags):NULL;
+ rpt->client_ip = strdupz(w->client_ip);
+ rpt->client_port = strdupz(w->client_port);
+ rpt->update_every = update_every;
pthread_t thread;
debug(D_SYSTEM, "STREAM [receive from [%s]:%s]: starting receiving thread.", w->client_ip, w->client_port);
diff --git a/src/rrdpush.h b/src/rrdpush.h
index dddbe758..c3c7f4a5 100644
--- a/src/rrdpush.h
+++ b/src/rrdpush.h
@@ -4,9 +4,11 @@
extern int default_rrdpush_enabled;
extern char *default_rrdpush_destination;
extern char *default_rrdpush_api_key;
+extern unsigned int remote_clock_resync_iterations;
extern int rrdpush_init();
extern void rrdset_done_push(RRDSET *st);
+extern void rrdset_push_chart_definition(RRDSET *st);
extern void *rrdpush_sender_thread(void *ptr);
extern int rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url);
diff --git a/src/rrdset.c b/src/rrdset.c
index c847b969..caa427ff 100644
--- a/src/rrdset.c
+++ b/src/rrdset.c
@@ -168,6 +168,58 @@ void rrdset_set_name(RRDSET *st, const char *name) {
error("RRDSET: INTERNAL ERROR: attempted to index duplicate chart name '%s'", st->name);
}
+inline void rrdset_is_obsolete(RRDSET *st) {
+ if(unlikely(!(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) {
+ rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE);
+ rrdset_flag_clear(st, RRDSET_FLAG_EXPOSED_UPSTREAM);
+
+ // the chart will not get more updates (data collection)
+ // so, we have to push its definition now
+ if(unlikely(st->rrdhost->rrdpush_enabled))
+ rrdset_push_chart_definition(st);
+ }
+}
+
+inline void rrdset_isnot_obsolete(RRDSET *st) {
+ if(unlikely((rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) {
+ rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE);
+ rrdset_flag_clear(st, RRDSET_FLAG_EXPOSED_UPSTREAM);
+
+ // the chart will be pushed upstream automatically
+ // due to data collection
+ }
+}
+
+inline void rrdset_update_heterogeneous_flag(RRDSET *st) {
+ RRDDIM *rd;
+
+ rrdset_flag_clear(st, RRDSET_FLAG_HOMEGENEOUS_CHECK);
+
+ RRD_ALGORITHM algorithm = st->dimensions->algorithm;
+ collected_number multiplier = abs(st->dimensions->multiplier);
+ collected_number divisor = abs(st->dimensions->divisor);
+
+ rrddim_foreach_read(rd, st) {
+ if(algorithm != rd->algorithm || multiplier != abs(rd->multiplier) || divisor != abs(rd->divisor)) {
+ if(!rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS)) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ info("Dimension '%s' added on chart '%s' of host '%s' is not homogeneous to other dimensions already present (algorithm is '%s' vs '%s', multiplier is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ", divisor is " COLLECTED_NUMBER_FORMAT " vs " COLLECTED_NUMBER_FORMAT ").",
+ rd->name,
+ st->name,
+ st->rrdhost->hostname,
+ rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(algorithm),
+ rd->multiplier, multiplier,
+ rd->divisor, divisor
+ );
+ #endif
+ rrdset_flag_set(st, RRDSET_FLAG_HETEROGENEOUS);
+ }
+ return;
+ }
+ }
+
+ rrdset_flag_clear(st, RRDSET_FLAG_HETEROGENEOUS);
+}
// ----------------------------------------------------------------------------
// RRDSET - reset a chart
@@ -188,7 +240,7 @@ void rrdset_reset(RRDSET *st) {
rd->last_collected_time.tv_sec = 0;
rd->last_collected_time.tv_usec = 0;
rd->collections_counter = 0;
- memset(rd->values, 0, rd->entries * sizeof(storage_number));
+ // memset(rd->values, 0, rd->entries * sizeof(storage_number));
}
}
@@ -199,7 +251,7 @@ inline long align_entries_to_pagesize(RRD_MEMORY_MODE mode, long entries) {
if(unlikely(entries < 5)) entries = 5;
if(unlikely(entries > RRD_HISTORY_ENTRIES_MAX)) entries = RRD_HISTORY_ENTRIES_MAX;
- if(unlikely(mode == RRD_MEMORY_MODE_NONE || mode == RRD_MEMORY_MODE_RAM))
+ if(unlikely(mode == RRD_MEMORY_MODE_NONE || mode == RRD_MEMORY_MODE_ALLOC))
return entries;
long page = (size_t)sysconf(_SC_PAGESIZE);
@@ -215,14 +267,18 @@ inline long align_entries_to_pagesize(RRD_MEMORY_MODE mode, long entries) {
return entries;
}
-static inline void last_collected_time_align(struct timeval *tv, int update_every) {
- tv->tv_sec -= tv->tv_sec % update_every;
- tv->tv_usec = 500000;
+static inline void last_collected_time_align(RRDSET *st) {
+ st->last_collected_time.tv_sec -= st->last_collected_time.tv_sec % st->update_every;
+
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST)))
+ st->last_collected_time.tv_usec = 0;
+ else
+ st->last_collected_time.tv_usec = 500000;
}
-static inline void last_updated_time_align(struct timeval *tv, int update_every) {
- tv->tv_sec -= tv->tv_sec % update_every;
- tv->tv_usec = 0;
+static inline void last_updated_time_align(RRDSET *st) {
+ st->last_updated.tv_sec -= st->last_updated.tv_sec % st->update_every;
+ st->last_updated.tv_usec = 0;
}
// ----------------------------------------------------------------------------
@@ -279,30 +335,36 @@ void rrdset_free(RRDSET *st) {
// free directly allocated members
freez(st->config_section);
- if(st->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || st->rrd_memory_mode == RRD_MEMORY_MODE_MAP) {
- debug(D_RRD_CALLS, "Unmapping stats '%s'.", st->name);
- munmap(st, st->memsize);
+ switch(st->rrd_memory_mode) {
+ case RRD_MEMORY_MODE_SAVE:
+ case RRD_MEMORY_MODE_MAP:
+ case RRD_MEMORY_MODE_RAM:
+ debug(D_RRD_CALLS, "Unmapping stats '%s'.", st->name);
+ munmap(st, st->memsize);
+ break;
+
+ case RRD_MEMORY_MODE_ALLOC:
+ case RRD_MEMORY_MODE_NONE:
+ freez(st);
+ break;
}
- else
- freez(st);
}
void rrdset_save(RRDSET *st) {
- RRDDIM *rd;
-
rrdset_check_rdlock(st);
// info("Saving chart '%s' ('%s')", st->id, st->name);
if(st->rrd_memory_mode == RRD_MEMORY_MODE_SAVE) {
debug(D_RRD_STATS, "Saving stats '%s' to '%s'.", st->name, st->cache_filename);
- savememory(st->cache_filename, st, st->memsize);
+ memory_file_save(st->cache_filename, st, st->memsize);
}
+ RRDDIM *rd;
rrddim_foreach_read(rd, st) {
if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE)) {
debug(D_RRD_STATS, "Saving dimension '%s' to '%s'.", rd->name, rd->cache_filename);
- savememory(rd->cache_filename, rd, rd->memsize);
+ memory_file_save(rd->cache_filename, rd, rd->memsize);
}
}
}
@@ -312,19 +374,23 @@ void rrdset_delete(RRDSET *st) {
rrdset_check_rdlock(st);
- // info("Deleting chart '%s' ('%s')", st->id, st->name);
+ info("Deleting chart '%s' ('%s') from disk...", st->id, st->name);
- if(st->rrd_memory_mode == RRD_MEMORY_MODE_SAVE) {
- debug(D_RRD_STATS, "Deleting stats '%s' to '%s'.", st->name, st->cache_filename);
- unlink(st->cache_filename);
+ if(st->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || st->rrd_memory_mode == RRD_MEMORY_MODE_MAP) {
+ info("Deleting chart header file '%s'.", st->cache_filename);
+ if(unlikely(unlink(st->cache_filename) == -1))
+ error("Cannot delete chart header file '%s'", st->cache_filename);
}
rrddim_foreach_read(rd, st) {
- if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE)) {
- debug(D_RRD_STATS, "Deleting dimension '%s' to '%s'.", rd->name, rd->cache_filename);
- unlink(rd->cache_filename);
+ if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || rd->rrd_memory_mode == RRD_MEMORY_MODE_MAP)) {
+ info("Deleting dimension file '%s'.", rd->cache_filename);
+ if(unlikely(unlink(rd->cache_filename) == -1))
+ error("Cannot delete dimension file '%s'", rd->cache_filename);
}
}
+
+ recursively_delete_dir(st->cache_dir, "left-over chart");
}
// ----------------------------------------------------------------------------
@@ -333,7 +399,7 @@ void rrdset_delete(RRDSET *st) {
static inline RRDSET *rrdset_find_on_create(RRDHOST *host, const char *fullid) {
RRDSET *st = rrdset_find(host, fullid);
if(unlikely(st)) {
- rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE);
+ rrdset_isnot_obsolete(st);
debug(D_RRD_CALLS, "RRDSET '%s', already exists.", fullid);
return st;
}
@@ -341,7 +407,7 @@ static inline RRDSET *rrdset_find_on_create(RRDHOST *host, const char *fullid) {
return NULL;
}
-RRDSET *rrdset_create(
+RRDSET *rrdset_create_custom(
RRDHOST *host
, const char *type
, const char *id
@@ -353,6 +419,8 @@ RRDSET *rrdset_create(
, long priority
, int update_every
, RRDSET_TYPE chart_type
+ , RRD_MEMORY_MODE memory_mode
+ , long history_entries
) {
if(!type || !type[0]) {
fatal("Cannot create rrd stats without a type.");
@@ -395,11 +463,11 @@ RRDSET *rrdset_create(
// ------------------------------------------------------------------------
// get the options from the config, we need to create it
- long rentries = config_get_number(config_section, "history", host->rrd_history_entries);
- long entries = align_entries_to_pagesize(host->rrd_memory_mode, rentries);
+ long rentries = config_get_number(config_section, "history", history_entries);
+ long entries = align_entries_to_pagesize(memory_mode, rentries);
if(entries != rentries) entries = config_set_number(config_section, "history", entries);
- if(host->rrd_memory_mode == RRD_MEMORY_MODE_NONE && entries != rentries)
+ if(memory_mode == RRD_MEMORY_MODE_NONE && entries != rentries)
entries = config_set_number(config_section, "history", 10);
int enabled = config_get_boolean(config_section, "enabled", 1);
@@ -416,8 +484,14 @@ RRDSET *rrdset_create(
debug(D_RRD_CALLS, "Creating RRD_STATS for '%s.%s'.", type, id);
snprintfz(fullfilename, FILENAME_MAX, "%s/main.db", cache_dir);
- if(host->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || host->rrd_memory_mode == RRD_MEMORY_MODE_MAP) {
- st = (RRDSET *) mymmap(fullfilename, size, ((host->rrd_memory_mode == RRD_MEMORY_MODE_MAP) ? MAP_SHARED : MAP_PRIVATE), 0);
+ if(memory_mode == RRD_MEMORY_MODE_SAVE || memory_mode == RRD_MEMORY_MODE_MAP || memory_mode == RRD_MEMORY_MODE_RAM) {
+ st = (RRDSET *) mymmap(
+ (memory_mode == RRD_MEMORY_MODE_RAM)?NULL:fullfilename
+ , size
+ , ((memory_mode == RRD_MEMORY_MODE_MAP) ? MAP_SHARED : MAP_PRIVATE)
+ , 0
+ );
+
if(st) {
memset(&st->avl, 0, sizeof(avl));
memset(&st->avlname, 0, sizeof(avl));
@@ -426,64 +500,68 @@ RRDSET *rrdset_create(
memset(&st->rrdset_rwlock, 0, sizeof(netdata_rwlock_t));
st->name = NULL;
+ st->config_section = NULL;
st->type = NULL;
st->family = NULL;
- st->context = NULL;
st->title = NULL;
st->units = NULL;
+ st->context = NULL;
+ st->cache_dir = NULL;
st->dimensions = NULL;
+ st->rrdfamily = NULL;
+ st->rrdhost = NULL;
st->next = NULL;
st->variables = NULL;
st->alarms = NULL;
st->flags = 0x00000000;
- if(strcmp(st->magic, RRDSET_MAGIC) != 0) {
- errno = 0;
- info("Initializing file %s.", fullfilename);
- memset(st, 0, size);
- }
- else if(strcmp(st->id, fullid) != 0) {
- errno = 0;
- error("File %s contents are not for chart %s. Clearing it.", fullfilename, fullid);
- // munmap(st, size);
- // st = NULL;
+ if(memory_mode == RRD_MEMORY_MODE_RAM) {
memset(st, 0, size);
}
- else if(st->memsize != size || st->entries != entries) {
- errno = 0;
- error("File %s does not have the desired size. Clearing it.", fullfilename);
- memset(st, 0, size);
- }
- else if(st->update_every != update_every) {
- errno = 0;
- error("File %s does not have the desired update frequency. Clearing it.", fullfilename);
- memset(st, 0, size);
- }
- else if((now - st->last_updated.tv_sec) > update_every * entries) {
- errno = 0;
- error("File %s is too old. Clearing it.", fullfilename);
- memset(st, 0, size);
- }
- else if(st->last_updated.tv_sec > now + update_every) {
- errno = 0;
- error("File %s refers to the future. Clearing it.", fullfilename);
- memset(st, 0, size);
- }
-
- // make sure the database is aligned
- if(st->last_updated.tv_sec)
- last_updated_time_align(&st->last_updated, update_every);
+ else {
+ if(strcmp(st->magic, RRDSET_MAGIC) != 0) {
+ info("Initializing file %s.", fullfilename);
+ memset(st, 0, size);
+ }
+ else if(strcmp(st->id, fullid) != 0) {
+ error("File %s contents are not for chart %s. Clearing it.", fullfilename, fullid);
+ // munmap(st, size);
+ // st = NULL;
+ memset(st, 0, size);
+ }
+ else if(st->memsize != size || st->entries != entries) {
+ error("File %s does not have the desired size. Clearing it.", fullfilename);
+ memset(st, 0, size);
+ }
+ else if(st->update_every != update_every) {
+ error("File %s does not have the desired update frequency. Clearing it.", fullfilename);
+ memset(st, 0, size);
+ }
+ else if((now - st->last_updated.tv_sec) > update_every * entries) {
+ error("File %s is too old. Clearing it.", fullfilename);
+ memset(st, 0, size);
+ }
+ else if(st->last_updated.tv_sec > now + update_every) {
+ error("File %s refers to the future. Clearing it.", fullfilename);
+ memset(st, 0, size);
+ }
+ // make sure the database is aligned
+ if(st->last_updated.tv_sec) {
+ st->update_every = update_every;
+ last_updated_time_align(st);
+ }
+ }
// make sure we have the right memory mode
// even if we cleared the memory
- st->rrd_memory_mode = host->rrd_memory_mode;
+ st->rrd_memory_mode = memory_mode;
}
}
if(unlikely(!st)) {
st = callocz(1, size);
- st->rrd_memory_mode = (host->rrd_memory_mode == RRD_MEMORY_MODE_NONE) ? RRD_MEMORY_MODE_NONE : RRD_MEMORY_MODE_RAM;
+ st->rrd_memory_mode = (memory_mode == RRD_MEMORY_MODE_NONE) ? RRD_MEMORY_MODE_NONE : RRD_MEMORY_MODE_ALLOC;
}
st->config_section = strdup(config_section);
@@ -519,6 +597,7 @@ RRDSET *rrdset_create(
rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
rrdset_flag_clear(st, RRDSET_FLAG_DEBUG);
rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE);
+ rrdset_flag_clear(st, RRDSET_FLAG_EXPOSED_UPSTREAM);
// if(!strcmp(st->id, "disk_util.dm-0")) {
// st->debug = 1;
@@ -536,6 +615,9 @@ RRDSET *rrdset_create(
st->gap_when_lost_iterations_above = (int) (
config_get_number(st->config_section, "gap when lost iterations above", RRD_DEFAULT_GAP_INTERPOLATIONS) + 2);
+ st->last_accessed_time = 0;
+ st->upstream_resync_time = 0;
+
avl_init_lock(&st->dimensions_index, rrddim_compare);
avl_init_lock(&st->variables_root_index, rrdvar_compare);
@@ -544,13 +626,8 @@ RRDSET *rrdset_create(
if(name && *name) rrdset_set_name(st, name);
else rrdset_set_name(st, id);
- {
- char varvalue[CONFIG_MAX_VALUE + 1];
- char varvalue2[CONFIG_MAX_VALUE + 1];
- snprintfz(varvalue, CONFIG_MAX_VALUE, "%s (%s)", title?title:"", st->name);
- json_escape_string(varvalue2, varvalue, sizeof(varvalue2));
- st->title = config_get(st->config_section, "title", varvalue2);
- }
+ st->title = config_get(st->config_section, "title", title);
+ json_fix_string(st->title);
st->rrdfamily = rrdfamily_create(host, st->family);
@@ -571,7 +648,7 @@ RRDSET *rrdset_create(
rrdsetcalc_link_matching(st);
rrdcalctemplate_link_matching(st);
- rrdhost_cleanup_obsolete(host);
+ rrdhost_cleanup_obsolete_charts(host);
rrdhost_unlock(host);
@@ -583,16 +660,10 @@ RRDSET *rrdset_create(
// RRDSET - data collection iteration control
inline void rrdset_next_usec_unfiltered(RRDSET *st, usec_t microseconds) {
-
- if(unlikely(!st->last_collected_time.tv_sec)) {
- // the first entry
- microseconds = st->update_every * USEC_PER_SEC;
- }
- else if(unlikely(!microseconds)) {
- // no dt given by the plugin
- struct timeval now;
- now_realtime_timeval(&now);
- microseconds = dt_usec(&now, &st->last_collected_time);
+ if(unlikely(!st->last_collected_time.tv_sec || !microseconds || (st->counter % remote_clock_resync_iterations) == 0)) {
+ // call the full next_usec() function
+ rrdset_next_usec(st, microseconds);
+ return;
}
st->usec_since_last_update = microseconds;
@@ -612,52 +683,34 @@ inline void rrdset_next_usec(RRDSET *st, usec_t microseconds) {
}
else {
// microseconds has the time since the last collection
-//#ifdef NETDATA_INTERNAL_CHECKS
-// usec_t now_usec = timeval_usec(&now);
-// usec_t last_usec = timeval_usec(&st->last_collected_time);
-//#endif
susec_t since_last_usec = dt_usec_signed(&now, &st->last_collected_time);
if(unlikely(since_last_usec < 0)) {
// oops! the database is in the future
- error("Database for chart '%s' on host '%s' is %lld microseconds in the future. Adjusting it to current time.", st->id, st->rrdhost->hostname, -since_last_usec);
+ info("RRD database for chart '%s' on host '%s' is %0.5Lf secs in the future. Adjusting it to current time.", st->id, st->rrdhost->hostname, (long double)-since_last_usec / USEC_PER_SEC);
st->last_collected_time.tv_sec = now.tv_sec - st->update_every;
st->last_collected_time.tv_usec = now.tv_usec;
- last_collected_time_align(&st->last_collected_time, st->update_every);
+ last_collected_time_align(st);
st->last_updated.tv_sec = now.tv_sec - st->update_every;
st->last_updated.tv_usec = now.tv_usec;
- last_updated_time_align(&st->last_updated, st->update_every);
+ last_updated_time_align(st);
microseconds = st->update_every * USEC_PER_SEC;
- since_last_usec = st->update_every * USEC_PER_SEC;
- }
-
- // verify the microseconds given is good
- if(unlikely(microseconds > (usec_t)since_last_usec)) {
- debug(D_RRD_CALLS, "dt %llu usec given is too big - it leads %llu usec to the future, for chart '%s' (%s).", microseconds, microseconds - (usec_t)since_last_usec, st->name, st->id);
-
-//#ifdef NETDATA_INTERNAL_CHECKS
-// if(unlikely(last_usec + microseconds > now_usec + 1000))
-// error("dt %llu usec given is too big - it leads %llu usec to the future, for chart '%s' (%s).", microseconds, microseconds - (usec_t)since_last_usec, st->name, st->id);
-//#endif
-
- microseconds = (usec_t)since_last_usec;
}
- else if(unlikely(microseconds < (usec_t)since_last_usec * 0.8)) {
- debug(D_RRD_CALLS, "dt %llu usec given is too small - expected %llu usec up to -20%%, for chart '%s' (%s).", microseconds, (usec_t)since_last_usec, st->name, st->id);
+ else if(unlikely((usec_t)since_last_usec > (usec_t)(st->update_every * 10 * USEC_PER_SEC))) {
+ // oops! the database is too far behind
+ info("RRD database for chart '%s' on host '%s' is %0.5Lf secs in the past. Adjusting it to current time.", st->id, st->rrdhost->hostname, (long double)since_last_usec / USEC_PER_SEC);
-//#ifdef NETDATA_INTERNAL_CHECKS
-// error("dt %llu usec given is too small - expected %llu usec up to -20%%, for chart '%s' (%s).", microseconds, (usec_t)since_last_usec, st->name, st->id);
-//#endif
microseconds = (usec_t)since_last_usec;
}
}
- debug(D_RRD_CALLS, "rrdset_next_usec() for chart %s with microseconds %llu", st->name, microseconds);
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s: NEXT: %llu microseconds", st->name, microseconds);
+ #ifdef NETDATA_INTERNAL_CHECKS
+ debug(D_RRD_CALLS, "rrdset_next_usec() for chart %s with microseconds %llu", st->name, microseconds);
+ rrdset_debug(st, "NEXT: %llu microseconds", microseconds);
+ #endif
st->usec_since_last_update = microseconds;
}
@@ -666,9 +719,17 @@ inline void rrdset_next_usec(RRDSET *st, usec_t microseconds) {
// ----------------------------------------------------------------------------
// RRDSET - process the collected values for all dimensions of a chart
-static inline void rrdset_init_last_collected_time(RRDSET *st) {
+static inline usec_t rrdset_init_last_collected_time(RRDSET *st) {
now_realtime_timeval(&st->last_collected_time);
- last_collected_time_align(&st->last_collected_time, st->update_every);
+ last_collected_time_align(st);
+
+ usec_t last_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "initialized last collected time to %0.3Lf", (long double)last_collect_ut / USEC_PER_SEC);
+ #endif
+
+ return last_collect_ut;
}
static inline usec_t rrdset_update_last_collected_time(RRDSET *st) {
@@ -676,17 +737,42 @@ static inline usec_t rrdset_update_last_collected_time(RRDSET *st) {
usec_t ut = last_collect_ut + st->usec_since_last_update;
st->last_collected_time.tv_sec = (time_t) (ut / USEC_PER_SEC);
st->last_collected_time.tv_usec = (suseconds_t) (ut % USEC_PER_SEC);
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "updated last collected time to %0.3Lf", (long double)last_collect_ut / USEC_PER_SEC);
+ #endif
+
return last_collect_ut;
}
-static inline void rrdset_init_last_updated_time(RRDSET *st) {
+static inline usec_t rrdset_init_last_updated_time(RRDSET *st) {
// copy the last collected time to last updated time
st->last_updated.tv_sec = st->last_collected_time.tv_sec;
st->last_updated.tv_usec = st->last_collected_time.tv_usec;
- last_updated_time_align(&st->last_updated, st->update_every);
+
+ if(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST))
+ st->last_updated.tv_sec -= st->update_every;
+
+ last_updated_time_align(st);
+
+ usec_t last_updated_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "initialized last updated time to %0.3Lf", (long double)last_updated_ut / USEC_PER_SEC);
+ #endif
+
+ return last_updated_ut;
}
static inline void rrdset_done_push_exclusive(RRDSET *st) {
+// usec_t update_every_ut = st->update_every * USEC_PER_SEC; // st->update_every in microseconds
+//
+// if(unlikely(st->usec_since_last_update > update_every_ut * remote_clock_resync_iterations)) {
+// error("Chart '%s' was last collected %llu usec before. Resetting it.", st->id, st->usec_since_last_update);
+// rrdset_reset(st);
+// st->usec_since_last_update = update_every_ut;
+// }
+
if(unlikely(!st->last_collected_time.tv_sec)) {
// it is the first entry
// set the last_collected_time to now
@@ -705,6 +791,235 @@ static inline void rrdset_done_push_exclusive(RRDSET *st) {
rrdset_unlock(st);
}
+
+static inline size_t rrdset_done_interpolate(
+ RRDSET *st
+ , usec_t update_every_ut
+ , usec_t last_stored_ut
+ , usec_t next_store_ut
+ , usec_t last_collect_ut
+ , usec_t now_collect_ut
+ , char store_this_entry
+ , uint32_t storage_flags
+) {
+ RRDDIM *rd;
+
+ size_t stored_entries = 0; // the number of entries we have stored in the db, during this call to rrdset_done()
+
+ usec_t first_ut = last_stored_ut, last_ut = 0;
+ ssize_t iterations = (ssize_t)((now_collect_ut - last_stored_ut) / (update_every_ut));
+ if((now_collect_ut % (update_every_ut)) == 0) iterations++;
+
+ size_t counter = st->counter;
+ long current_entry = st->current_entry;
+
+ for( ; next_store_ut <= now_collect_ut ; last_collect_ut = next_store_ut, next_store_ut += update_every_ut, iterations-- ) {
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ if(iterations < 0) { error("INTERNAL CHECK: %s: iterations calculation wrapped! first_ut = %llu, last_stored_ut = %llu, next_store_ut = %llu, now_collect_ut = %llu", st->name, first_ut, last_stored_ut, next_store_ut, now_collect_ut); }
+ rrdset_debug(st, "last_stored_ut = %0.3Lf (last updated time)", (long double)last_stored_ut/USEC_PER_SEC);
+ rrdset_debug(st, "next_store_ut = %0.3Lf (next interpolation point)", (long double)next_store_ut/USEC_PER_SEC);
+ #endif
+
+ last_ut = next_store_ut;
+
+ rrddim_foreach_read(rd, st) {
+ calculated_number new_value;
+
+ switch(rd->algorithm) {
+ case RRD_ALGORITHM_INCREMENTAL:
+ new_value = (calculated_number)
+ ( rd->calculated_value
+ * (calculated_number)(next_store_ut - last_collect_ut)
+ / (calculated_number)(now_collect_ut - last_collect_ut)
+ );
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC2 INC "
+ CALCULATED_NUMBER_FORMAT " = "
+ CALCULATED_NUMBER_FORMAT
+ " * (%llu - %llu)"
+ " / (%llu - %llu)"
+ , rd->name
+ , new_value
+ , rd->calculated_value
+ , next_store_ut, last_collect_ut
+ , now_collect_ut, last_collect_ut
+ );
+ #endif
+
+ rd->calculated_value -= new_value;
+ new_value += rd->last_calculated_value;
+ rd->last_calculated_value = 0;
+ new_value /= (calculated_number)st->update_every;
+
+ if(unlikely(next_store_ut - last_stored_ut < update_every_ut)) {
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: COLLECTION POINT IS SHORT " CALCULATED_NUMBER_FORMAT " - EXTRAPOLATING",
+ rd->name
+ , (calculated_number)(next_store_ut - last_stored_ut)
+ );
+ #endif
+
+ new_value = new_value * (calculated_number)(st->update_every * USEC_PER_SEC) / (calculated_number)(next_store_ut - last_stored_ut);
+ }
+ break;
+
+ case RRD_ALGORITHM_ABSOLUTE:
+ case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL:
+ case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL:
+ default:
+ if(iterations == 1) {
+ // this is the last iteration
+ // do not interpolate
+ // just show the calculated value
+
+ new_value = rd->calculated_value;
+ }
+ else {
+ // we have missed an update
+ // interpolate in the middle values
+
+ new_value = (calculated_number)
+ ( ( (rd->calculated_value - rd->last_calculated_value)
+ * (calculated_number)(next_store_ut - last_collect_ut)
+ / (calculated_number)(now_collect_ut - last_collect_ut)
+ )
+ + rd->last_calculated_value
+ );
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC2 DEF "
+ CALCULATED_NUMBER_FORMAT " = ((("
+ "(" CALCULATED_NUMBER_FORMAT " - " CALCULATED_NUMBER_FORMAT ")"
+ " * %llu"
+ " / %llu) + " CALCULATED_NUMBER_FORMAT
+ , rd->name
+ , new_value
+ , rd->calculated_value, rd->last_calculated_value
+ , (next_store_ut - first_ut)
+ , (now_collect_ut - first_ut), rd->last_calculated_value
+ );
+ #endif
+ }
+ break;
+ }
+
+ if(unlikely(!store_this_entry)) {
+ rd->values[current_entry] = SN_EMPTY_SLOT; //pack_storage_number(0, SN_NOT_EXISTS);
+ continue;
+ }
+
+ if(likely(rd->updated && rd->collections_counter > 1 && iterations < st->gap_when_lost_iterations_above)) {
+ rd->values[current_entry] = pack_storage_number(new_value, storage_flags );
+ rd->last_stored_value = new_value;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: STORE[%ld] "
+ CALCULATED_NUMBER_FORMAT " = " CALCULATED_NUMBER_FORMAT
+ , rd->name
+ , current_entry
+ , unpack_storage_number(rd->values[current_entry]), new_value
+ );
+ #endif
+
+ }
+ else {
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: STORE[%ld] = NON EXISTING "
+ , rd->name
+ , current_entry
+ );
+ #endif
+
+ rd->values[current_entry] = SN_EMPTY_SLOT; // pack_storage_number(0, SN_NOT_EXISTS);
+ rd->last_stored_value = NAN;
+ }
+
+ stored_entries++;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) {
+ calculated_number t1 = new_value * (calculated_number)rd->multiplier / (calculated_number)rd->divisor;
+ calculated_number t2 = unpack_storage_number(rd->values[current_entry]);
+
+ calculated_number accuracy = accuracy_loss(t1, t2);
+ debug(D_RRD_STATS, "%s/%s: UNPACK[%ld] = " CALCULATED_NUMBER_FORMAT " FLAGS=0x%08x (original = " CALCULATED_NUMBER_FORMAT ", accuracy loss = " CALCULATED_NUMBER_FORMAT "%%%s)"
+ , st->id, rd->name
+ , current_entry
+ , t2
+ , get_storage_number_flags(rd->values[current_entry])
+ , t1
+ , accuracy
+ , (accuracy > ACCURACY_LOSS) ? " **TOO BIG** " : ""
+ );
+
+ rd->collected_volume += t1;
+ rd->stored_volume += t2;
+
+ accuracy = accuracy_loss(rd->collected_volume, rd->stored_volume);
+ debug(D_RRD_STATS, "%s/%s: VOLUME[%ld] = " CALCULATED_NUMBER_FORMAT ", calculated = " CALCULATED_NUMBER_FORMAT ", accuracy loss = " CALCULATED_NUMBER_FORMAT "%%%s"
+ , st->id, rd->name
+ , current_entry
+ , rd->stored_volume
+ , rd->collected_volume
+ , accuracy
+ , (accuracy > ACCURACY_LOSS) ? " **TOO BIG** " : ""
+ );
+ }
+ #endif
+ }
+ // reset the storage flags for the next point, if any;
+ storage_flags = SN_EXISTS;
+
+ counter++;
+ current_entry = ((current_entry + 1) >= st->entries) ? 0 : current_entry + 1;
+ last_stored_ut = next_store_ut;
+ }
+
+ st->counter = counter;
+ st->current_entry = current_entry;
+
+ if(likely(last_ut)) {
+ st->last_updated.tv_sec = (time_t) (last_ut / USEC_PER_SEC);
+ st->last_updated.tv_usec = 0;
+ }
+
+ return stored_entries;
+}
+
+static inline void rrdset_done_fill_the_gap(RRDSET *st) {
+ usec_t update_every_ut = st->update_every * USEC_PER_SEC;
+ usec_t now_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec;
+
+ long c = 0, entries = st->entries;
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ usec_t next_store_ut = (st->last_updated.tv_sec + st->update_every) * USEC_PER_SEC;
+ long current_entry = st->current_entry;
+
+ for(c = 0; c < entries && next_store_ut <= now_collect_ut ; next_store_ut += update_every_ut, c++) {
+ rd->values[current_entry] = SN_EMPTY_SLOT;
+ current_entry = ((current_entry + 1) >= entries) ? 0 : current_entry + 1;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: STORE[%ld] = NON EXISTING (FILLED THE GAP)", rd->name, current_entry);
+ #endif
+ }
+ }
+
+ if(c > 0) {
+ c--;
+ st->last_updated.tv_sec += c * st->update_every;
+
+ st->current_entry += c;
+ if(st->current_entry >= st->entries)
+ st->current_entry -= st->entries;
+ }
+}
+
void rrdset_done(RRDSET *st) {
if(unlikely(netdata_exit)) return;
@@ -726,9 +1041,6 @@ void rrdset_done(RRDSET *st) {
store_this_entry = 1, // boolean: 1 = store this entry, 0 = don't store this entry
first_entry = 0; // boolean: 1 = this is the first entry seen for this chart, 0 = all other entries
- unsigned int
- stored_entries = 0; // the number of entries we have stored in the db, during this call to rrdset_done()
-
usec_t
last_collect_ut, // the timestamp in microseconds, of the last collected value
now_collect_ut, // the timestamp in microseconds, of this collected value (this is NOW)
@@ -742,42 +1054,33 @@ void rrdset_done(RRDSET *st) {
// a read lock is OK here
rrdset_rdlock(st);
-/*
- // enable the chart, if it was disabled
- if(unlikely(rrd_delete_unupdated_dimensions) && !st->enabled)
- st->enabled = 1;
-*/
-
if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE))) {
error("Chart '%s' has the OBSOLETE flag set, but it is collected.", st->id);
- rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE);
+ rrdset_isnot_obsolete(st);
}
// check if the chart has a long time to be updated
if(unlikely(st->usec_since_last_update > st->entries * update_every_ut)) {
- info("%s: took too long to be updated (%0.3Lf secs). Resetting it.", st->name, (long double)(st->usec_since_last_update / 1000000.0));
+ info("host '%s', chart %s: took too long to be updated (%0.3Lf secs). Resetting it.", st->rrdhost->hostname, st->name, (long double)st->usec_since_last_update / USEC_PER_SEC);
rrdset_reset(st);
st->usec_since_last_update = update_every_ut;
+ store_this_entry = 0;
first_entry = 1;
}
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s: microseconds since last update: %llu", st->name, st->usec_since_last_update);
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "microseconds since last update: %llu", st->usec_since_last_update);
+ #endif
// set last_collected_time
if(unlikely(!st->last_collected_time.tv_sec)) {
// it is the first entry
// set the last_collected_time to now
- rrdset_init_last_collected_time(st);
-
- last_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec - update_every_ut;
+ last_collect_ut = rrdset_init_last_collected_time(st) - update_every_ut;
// the first entry should not be stored
store_this_entry = 0;
first_entry = 1;
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s: has not set last_collected_time. Setting it now. Will not store the next entry.", st->name);
}
else {
// it is not the first entry
@@ -795,9 +1098,6 @@ void rrdset_done(RRDSET *st) {
// the first entry should not be stored
store_this_entry = 0;
first_entry = 1;
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s: initializing last_updated to last_collected_time - %llu microseconds. Will not store the next entry.", st->name, st->usec_since_last_update);
}
// check if we will re-write the entire data set
@@ -817,27 +1117,48 @@ void rrdset_done(RRDSET *st) {
// last_stored_ut = the last time we added a value to the storage
// now_collect_ut = the time the current value has been collected
// next_store_ut = the time of the next interpolation point
- last_stored_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec;
now_collect_ut = st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec;
+ last_stored_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec;
next_store_ut = (st->last_updated.tv_sec + st->update_every) * USEC_PER_SEC;
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) {
- debug(D_RRD_STATS, "%s: last_collect_ut = %0.3Lf (last collection time)", st->name, (long double)last_collect_ut/1000000.0);
- debug(D_RRD_STATS, "%s: now_collect_ut = %0.3Lf (current collection time)", st->name, (long double)now_collect_ut/1000000.0);
- debug(D_RRD_STATS, "%s: last_stored_ut = %0.3Lf (last updated time)", st->name, (long double)last_stored_ut/1000000.0);
- debug(D_RRD_STATS, "%s: next_store_ut = %0.3Lf (next interpolation point)", st->name, (long double)next_store_ut/1000000.0);
- }
-
if(unlikely(!st->counter_done)) {
- store_this_entry = 0;
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s: Will not store the next entry.", st->name);
+ // if we have not collected metrics this session (st->counter_done == 0)
+ // and we have collected metrics for this chart in the past (st->counter != 0)
+ // fill the gap (the chart has been just loaded from disk)
+ if(unlikely(st->counter)) {
+ rrdset_done_fill_the_gap(st);
+ last_stored_ut = st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec;
+ next_store_ut = (st->last_updated.tv_sec + st->update_every) * USEC_PER_SEC;
+ }
+
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_STORE_FIRST))) {
+ store_this_entry = 1;
+ last_collect_ut = next_store_ut - update_every_ut;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "Fixed first entry.");
+ #endif
+ }
+ else {
+ store_this_entry = 0;
+
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "Will not store the next entry.");
+ #endif
+ }
}
st->counter_done++;
if(unlikely(st->rrdhost->rrdpush_enabled))
rrdset_done_push(st);
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "last_collect_ut = %0.3Lf (last collection time)", (long double)last_collect_ut/USEC_PER_SEC);
+ rrdset_debug(st, "now_collect_ut = %0.3Lf (current collection time)", (long double)now_collect_ut/USEC_PER_SEC);
+ rrdset_debug(st, "last_stored_ut = %0.3Lf (last updated time)", (long double)last_stored_ut/USEC_PER_SEC);
+ rrdset_debug(st, "next_store_ut = %0.3Lf (next interpolation point)", (long double)next_store_ut/USEC_PER_SEC);
+ #endif
+
// calculate totals and count the dimensions
int dimensions = 0;
st->collected_total = 0;
@@ -859,18 +1180,19 @@ void rrdset_done(RRDSET *st) {
continue;
}
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: START "
- " last_collected_value = " COLLECTED_NUMBER_FORMAT
- " collected_value = " COLLECTED_NUMBER_FORMAT
- " last_calculated_value = " CALCULATED_NUMBER_FORMAT
- " calculated_value = " CALCULATED_NUMBER_FORMAT
- , st->id, rd->name
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: START "
+ " last_collected_value = " COLLECTED_NUMBER_FORMAT
+ " collected_value = " COLLECTED_NUMBER_FORMAT
+ " last_calculated_value = " CALCULATED_NUMBER_FORMAT
+ " calculated_value = " CALCULATED_NUMBER_FORMAT
+ , rd->name
, rd->last_collected_value
, rd->collected_value
, rd->last_calculated_value
, rd->calculated_value
- );
+ );
+ #endif
switch(rd->algorithm) {
case RRD_ALGORITHM_ABSOLUTE:
@@ -878,18 +1200,20 @@ void rrdset_done(RRDSET *st) {
* (calculated_number)rd->multiplier
/ (calculated_number)rd->divisor;
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: CALC ABS/ABS-NO-IN "
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC ABS/ABS-NO-IN "
CALCULATED_NUMBER_FORMAT " = "
COLLECTED_NUMBER_FORMAT
" * " CALCULATED_NUMBER_FORMAT
" / " CALCULATED_NUMBER_FORMAT
- , st->id, rd->name
+ , rd->name
, rd->calculated_value
, rd->collected_value
, (calculated_number)rd->multiplier
, (calculated_number)rd->divisor
- );
+ );
+ #endif
+
break;
case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL:
@@ -903,16 +1227,18 @@ void rrdset_done(RRDSET *st) {
* (calculated_number)rd->collected_value
/ (calculated_number)st->collected_total;
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: CALC PCENT-ROW "
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC PCENT-ROW "
CALCULATED_NUMBER_FORMAT " = 100"
- " * " COLLECTED_NUMBER_FORMAT
+ " * " COLLECTED_NUMBER_FORMAT
" / " COLLECTED_NUMBER_FORMAT
- , st->id, rd->name
+ , rd->name
, rd->calculated_value
, rd->collected_value
, st->collected_total
- );
+ );
+ #endif
+
break;
case RRD_ALGORITHM_INCREMENTAL:
@@ -940,19 +1266,21 @@ void rrdset_done(RRDSET *st) {
* (calculated_number)rd->multiplier
/ (calculated_number)rd->divisor;
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: CALC INC PRE "
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC INC PRE "
CALCULATED_NUMBER_FORMAT " = ("
COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT
")"
" * " CALCULATED_NUMBER_FORMAT
" / " CALCULATED_NUMBER_FORMAT
- , st->id, rd->name
+ , rd->name
, rd->calculated_value
, rd->collected_value, rd->last_collected_value
, (calculated_number)rd->multiplier
, (calculated_number)rd->divisor
- );
+ );
+ #endif
+
break;
case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL:
@@ -967,7 +1295,8 @@ void rrdset_done(RRDSET *st) {
debug(D_RRD_STATS, "%s.%s: RESET or OVERFLOW. Last collected value = " COLLECTED_NUMBER_FORMAT ", current = " COLLECTED_NUMBER_FORMAT
, st->name, rd->name
, rd->last_collected_value
- , rd->collected_value);
+ , rd->collected_value
+ );
if(!(rrddim_flag_check(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS)))
storage_flags = SN_EXISTS_RESET;
@@ -985,16 +1314,18 @@ void rrdset_done(RRDSET *st) {
* (calculated_number)(rd->collected_value - rd->last_collected_value)
/ (calculated_number)(st->collected_total - st->last_collected_total);
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: CALC PCENT-DIFF "
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC PCENT-DIFF "
CALCULATED_NUMBER_FORMAT " = 100"
- " * (" COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT ")"
- " / (" COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT ")"
- , st->id, rd->name
+ " * (" COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT ")"
+ " / (" COLLECTED_NUMBER_FORMAT " - " COLLECTED_NUMBER_FORMAT ")"
+ , rd->name
, rd->calculated_value
, rd->collected_value, rd->last_collected_value
, st->collected_total, st->last_collected_total
- );
+ );
+ #endif
+
break;
default:
@@ -1002,203 +1333,53 @@ void rrdset_done(RRDSET *st) {
// it gets noticed when we add new types
rd->calculated_value = 0;
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: CALC "
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: CALC "
CALCULATED_NUMBER_FORMAT " = 0"
- , st->id, rd->name
+ , rd->name
, rd->calculated_value
- );
+ );
+ #endif
+
break;
}
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: PHASE2 "
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: PHASE2 "
" last_collected_value = " COLLECTED_NUMBER_FORMAT
" collected_value = " COLLECTED_NUMBER_FORMAT
" last_calculated_value = " CALCULATED_NUMBER_FORMAT
" calculated_value = " CALCULATED_NUMBER_FORMAT
- , st->id, rd->name
+ , rd->name
, rd->last_collected_value
, rd->collected_value
, rd->last_calculated_value
, rd->calculated_value
- );
+ );
+ #endif
}
// at this point we have all the calculated values ready
// it is now time to interpolate values on a second boundary
+#ifdef NETDATA_INTERNAL_CHECKS
if(unlikely(now_collect_ut < next_store_ut)) {
// this is collected in the same interpolation point
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s: THIS IS IN THE SAME INTERPOLATION POINT", st->name);
-
-//#ifdef NETDATA_INTERNAL_CHECKS
-// info("%s is collected in the same interpolation point: short by %llu microseconds", st->name, next_store_ut - now_collect_ut);
-//#endif
- }
-
- usec_t first_ut = last_stored_ut;
- long long iterations = (now_collect_ut - last_stored_ut) / (update_every_ut);
- if((now_collect_ut % (update_every_ut)) == 0) iterations++;
-
- for( ; next_store_ut <= now_collect_ut ; last_collect_ut = next_store_ut, next_store_ut += update_every_ut, iterations-- ) {
-//#ifdef NETDATA_INTERNAL_CHECKS
-// if(iterations < 0) { error("%s: iterations calculation wrapped! first_ut = %llu, last_stored_ut = %llu, next_store_ut = %llu, now_collect_ut = %llu", st->name, first_ut, last_stored_ut, next_store_ut, now_collect_ut); }
-//#endif
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) {
- debug(D_RRD_STATS, "%s: last_stored_ut = %0.3Lf (last updated time)", st->name, (long double)last_stored_ut/1000000.0);
- debug(D_RRD_STATS, "%s: next_store_ut = %0.3Lf (next interpolation point)", st->name, (long double)next_store_ut/1000000.0);
- }
-
- st->last_updated.tv_sec = (time_t) (next_store_ut / USEC_PER_SEC);
- st->last_updated.tv_usec = 0;
-
- rrddim_foreach_read(rd, st) {
- calculated_number new_value;
-
- switch(rd->algorithm) {
- case RRD_ALGORITHM_INCREMENTAL:
- new_value = (calculated_number)
- ( rd->calculated_value
- * (calculated_number)(next_store_ut - last_collect_ut)
- / (calculated_number)(now_collect_ut - last_collect_ut)
- );
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: CALC2 INC "
- CALCULATED_NUMBER_FORMAT " = "
- CALCULATED_NUMBER_FORMAT
- " * %llu"
- " / %llu"
- , st->id, rd->name
- , new_value
- , rd->calculated_value
- , (next_store_ut - last_stored_ut)
- , (now_collect_ut - last_stored_ut)
- );
-
- rd->calculated_value -= new_value;
- new_value += rd->last_calculated_value;
- rd->last_calculated_value = 0;
- new_value /= (calculated_number)st->update_every;
-
- if(unlikely(next_store_ut - last_stored_ut < update_every_ut)) {
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: COLLECTION POINT IS SHORT " CALCULATED_NUMBER_FORMAT " - EXTRAPOLATING",
- st->id, rd->name
- , (calculated_number)(next_store_ut - last_stored_ut)
- );
- new_value = new_value * (calculated_number)(st->update_every * 1000000) / (calculated_number)(next_store_ut - last_stored_ut);
- }
- break;
-
- case RRD_ALGORITHM_ABSOLUTE:
- case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL:
- case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL:
- default:
- if(iterations == 1) {
- // this is the last iteration
- // do not interpolate
- // just show the calculated value
-
- new_value = rd->calculated_value;
- }
- else {
- // we have missed an update
- // interpolate in the middle values
-
- new_value = (calculated_number)
- ( ( (rd->calculated_value - rd->last_calculated_value)
- * (calculated_number)(next_store_ut - last_collect_ut)
- / (calculated_number)(now_collect_ut - last_collect_ut)
- )
- + rd->last_calculated_value
- );
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: CALC2 DEF "
- CALCULATED_NUMBER_FORMAT " = ((("
- "(" CALCULATED_NUMBER_FORMAT " - " CALCULATED_NUMBER_FORMAT ")"
- " * %llu"
- " / %llu) + " CALCULATED_NUMBER_FORMAT
- , st->id, rd->name
- , new_value
- , rd->calculated_value, rd->last_calculated_value
- , (next_store_ut - first_ut)
- , (now_collect_ut - first_ut), rd->last_calculated_value
- );
- }
- break;
- }
-
- if(unlikely(!store_this_entry)) {
- rd->values[st->current_entry] = pack_storage_number(0, SN_NOT_EXISTS);
- continue;
- }
-
- if(likely(rd->updated && rd->collections_counter > 1 && iterations < st->gap_when_lost_iterations_above)) {
- rd->values[st->current_entry] = pack_storage_number(new_value, storage_flags );
- rd->last_stored_value = new_value;
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: STORE[%ld] "
- CALCULATED_NUMBER_FORMAT " = " CALCULATED_NUMBER_FORMAT
- , st->id, rd->name
- , st->current_entry
- , unpack_storage_number(rd->values[st->current_entry]), new_value
- );
- }
- else {
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: STORE[%ld] = NON EXISTING "
- , st->id, rd->name
- , st->current_entry
- );
- rd->values[st->current_entry] = pack_storage_number(0, SN_NOT_EXISTS);
- rd->last_stored_value = NAN;
- }
-
- stored_entries++;
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) {
- calculated_number t1 = new_value * (calculated_number)rd->multiplier / (calculated_number)rd->divisor;
- calculated_number t2 = unpack_storage_number(rd->values[st->current_entry]);
- calculated_number accuracy = accuracy_loss(t1, t2);
- debug(D_RRD_STATS, "%s/%s: UNPACK[%ld] = " CALCULATED_NUMBER_FORMAT " FLAGS=0x%08x (original = " CALCULATED_NUMBER_FORMAT ", accuracy loss = " CALCULATED_NUMBER_FORMAT "%%%s)"
- , st->id, rd->name
- , st->current_entry
- , t2
- , get_storage_number_flags(rd->values[st->current_entry])
- , t1
- , accuracy
- , (accuracy > ACCURACY_LOSS) ? " **TOO BIG** " : ""
- );
-
- rd->collected_volume += t1;
- rd->stored_volume += t2;
- accuracy = accuracy_loss(rd->collected_volume, rd->stored_volume);
- debug(D_RRD_STATS, "%s/%s: VOLUME[%ld] = " CALCULATED_NUMBER_FORMAT ", calculated = " CALCULATED_NUMBER_FORMAT ", accuracy loss = " CALCULATED_NUMBER_FORMAT "%%%s"
- , st->id, rd->name
- , st->current_entry
- , rd->stored_volume
- , rd->collected_volume
- , accuracy
- , (accuracy > ACCURACY_LOSS) ? " **TOO BIG** " : ""
- );
-
- }
- }
- // reset the storage flags for the next point, if any;
- storage_flags = SN_EXISTS;
-
- st->counter++;
- st->current_entry = ((st->current_entry + 1) >= st->entries) ? 0 : st->current_entry + 1;
- last_stored_ut = next_store_ut;
+ rrdset_debug(st, "THIS IS IN THE SAME INTERPOLATION POINT");
+ info("INTERNAL CHECK: host '%s', chart '%s' is collected in the same interpolation point: short by %llu microseconds", st->rrdhost->hostname, st->name, next_store_ut - now_collect_ut);
}
+#endif
+
+ rrdset_done_interpolate(st
+ , update_every_ut
+ , last_stored_ut
+ , next_store_ut
+ , last_collect_ut
+ , now_collect_ut
+ , store_this_entry
+ , storage_flags
+ );
st->last_collected_total = st->collected_total;
@@ -1206,29 +1387,35 @@ void rrdset_done(RRDSET *st) {
if(unlikely(!rd->updated))
continue;
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: setting last_collected_value (old: " COLLECTED_NUMBER_FORMAT ") to last_collected_value (new: " COLLECTED_NUMBER_FORMAT ")", st->id, rd->name, rd->last_collected_value, rd->collected_value);
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: setting last_collected_value (old: " COLLECTED_NUMBER_FORMAT ") to last_collected_value (new: " COLLECTED_NUMBER_FORMAT ")", rd->name, rd->last_collected_value, rd->collected_value);
+ #endif
rd->last_collected_value = rd->collected_value;
switch(rd->algorithm) {
case RRD_ALGORITHM_INCREMENTAL:
if(unlikely(!first_entry)) {
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: setting last_calculated_value (old: " CALCULATED_NUMBER_FORMAT ") to last_calculated_value (new: " CALCULATED_NUMBER_FORMAT ")", st->id, rd->name, rd->last_calculated_value + rd->calculated_value, rd->calculated_value);
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: setting last_calculated_value (old: " CALCULATED_NUMBER_FORMAT ") to last_calculated_value (new: " CALCULATED_NUMBER_FORMAT ")", rd->name, rd->last_calculated_value + rd->calculated_value, rd->calculated_value);
+ #endif
+
rd->last_calculated_value += rd->calculated_value;
}
else {
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s: THIS IS THE FIRST POINT", st->name);
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "THIS IS THE FIRST POINT");
+ #endif
}
break;
case RRD_ALGORITHM_ABSOLUTE:
case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL:
case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL:
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: setting last_calculated_value (old: " CALCULATED_NUMBER_FORMAT ") to last_calculated_value (new: " CALCULATED_NUMBER_FORMAT ")", st->id, rd->name, rd->last_calculated_value, rd->calculated_value);
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: setting last_calculated_value (old: " CALCULATED_NUMBER_FORMAT ") to last_calculated_value (new: " CALCULATED_NUMBER_FORMAT ")", rd->name, rd->last_calculated_value, rd->calculated_value);
+ #endif
+
rd->last_calculated_value = rd->calculated_value;
break;
}
@@ -1237,18 +1424,20 @@ void rrdset_done(RRDSET *st) {
rd->collected_value = 0;
rd->updated = 0;
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_RRD_STATS, "%s/%s: END "
+ #ifdef NETDATA_INTERNAL_CHECKS
+ rrdset_debug(st, "%s: END "
" last_collected_value = " COLLECTED_NUMBER_FORMAT
" collected_value = " COLLECTED_NUMBER_FORMAT
" last_calculated_value = " CALCULATED_NUMBER_FORMAT
" calculated_value = " CALCULATED_NUMBER_FORMAT
- , st->id, rd->name
+ , rd->name
, rd->last_collected_value
, rd->collected_value
, rd->last_calculated_value
, rd->calculated_value
- );
+ );
+ #endif
+
}
// ALL DONE ABOUT THE DATA UPDATE
diff --git a/src/socket.c b/src/socket.c
index 400c1ef4..2b382119 100644
--- a/src/socket.c
+++ b/src/socket.c
@@ -1,44 +1,267 @@
#include "common.h"
-// connect_to()
-//
-// definition format:
-//
-// [PROTOCOL:]IP[%INTERFACE][:PORT]
-//
-// PROTOCOL = tcp or udp
-// IP = IPv4 or IPv6 IP or hostname, optionally enclosed in [] (required for IPv6)
-// INTERFACE = for IPv6 only, the network interface to use
-// PORT = port number or service name
+// --------------------------------------------------------------------------------------------------------------------
+// various library calls
-int connect_to(const char *definition, int default_port, struct timeval *timeout) {
+#ifdef __gnu_linux__
+#define LARGE_SOCK_SIZE 33554431 // don't ask why - I found it at brubeck source - I guess it is just a large number
+#else
+#define LARGE_SOCK_SIZE 4096
+#endif
+
+int sock_setnonblock(int fd) {
+ int flags;
+
+ flags = fcntl(fd, F_GETFL);
+ flags |= O_NONBLOCK;
+
+ int ret = fcntl(fd, F_SETFL, flags);
+ if(ret < 0)
+ error("Failed to set O_NONBLOCK on socket %d", fd);
+
+ return ret;
+}
+
+int sock_delnonblock(int fd) {
+ int flags;
+
+ flags = fcntl(fd, F_GETFL);
+ flags &= ~O_NONBLOCK;
+
+ int ret = fcntl(fd, F_SETFL, flags);
+ if(ret < 0)
+ error("Failed to remove O_NONBLOCK on socket %d", fd);
+
+ return ret;
+}
+
+int sock_setreuse(int fd, int reuse) {
+ int ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse));
+
+ if(ret == -1)
+ error("Failed to set SO_REUSEADDR on socket %d", fd);
+
+ return ret;
+}
+
+int sock_setreuse_port(int fd, int reuse) {
+ int ret = -1;
+#ifdef SO_REUSEPORT
+ ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &reuse, sizeof(reuse));
+ if(ret == -1)
+ error("failed to set SO_REUSEPORT on socket %d", fd);
+#endif
+
+ return ret;
+}
+
+int sock_enlarge_in(int fd) {
+ int ret, bs = LARGE_SOCK_SIZE;
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bs, sizeof(bs));
+
+ if(ret == -1)
+ error("Failed to set SO_RCVBUF on socket %d", fd);
+
+ return ret;
+}
+
+int sock_enlarge_out(int fd) {
+ int ret, bs = LARGE_SOCK_SIZE;
+ ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &bs, sizeof(bs));
+
+ if(ret == -1)
+ error("Failed to set SO_SNDBUF on socket %d", fd);
+
+ return ret;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// listening sockets
+
+int create_listen_socket4(int socktype, const char *ip, int port, int listen_backlog) {
+ int sock;
+
+ debug(D_LISTENER, "LISTENER: IPv4 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype);
+
+ sock = socket(AF_INET, socktype, 0);
+ if(sock < 0) {
+ error("LISTENER: IPv4 socket() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ sock_setreuse(sock, 1);
+ sock_setreuse_port(sock, 1);
+ sock_setnonblock(sock);
+ sock_enlarge_in(sock);
+
+ struct sockaddr_in name;
+ memset(&name, 0, sizeof(struct sockaddr_in));
+ name.sin_family = AF_INET;
+ name.sin_port = htons (port);
+
+ int ret = inet_pton(AF_INET, ip, (void *)&name.sin_addr.s_addr);
+ if(ret != 1) {
+ error("LISTENER: Failed to convert IP '%s' to a valid IPv4 address.", ip);
+ close(sock);
+ return -1;
+ }
+
+ if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
+ close(sock);
+ error("LISTENER: IPv4 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ if(socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) {
+ close(sock);
+ error("LISTENER: IPv4 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ debug(D_LISTENER, "LISTENER: Listening on IPv4 ip '%s' port %d, socktype %d", ip, port, socktype);
+ return sock;
+}
+
+int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int port, int listen_backlog) {
+ int sock;
+ int ipv6only = 1;
+
+ debug(D_LISTENER, "LISTENER: IPv6 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype);
+
+ sock = socket(AF_INET6, socktype, 0);
+ if (sock < 0) {
+ error("LISTENER: IPv6 socket() on ip '%s' port %d, socktype %d, failed.", ip, port, socktype);
+ return -1;
+ }
+
+ sock_setreuse(sock, 1);
+ sock_setreuse_port(sock, 1);
+ sock_setnonblock(sock);
+ sock_enlarge_in(sock);
+
+ /* IPv6 only */
+ if(setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&ipv6only, sizeof(ipv6only)) != 0)
+ error("LISTENER: Cannot set IPV6_V6ONLY on ip '%s' port %d, socktype %d.", ip, port, socktype);
+
+ struct sockaddr_in6 name;
+ memset(&name, 0, sizeof(struct sockaddr_in6));
+ name.sin6_family = AF_INET6;
+ name.sin6_port = htons ((uint16_t) port);
+ name.sin6_scope_id = scope_id;
+
+ int ret = inet_pton(AF_INET6, ip, (void *)&name.sin6_addr.s6_addr);
+ if(ret != 1) {
+ error("LISTENER: Failed to convert IP '%s' to a valid IPv6 address.", ip);
+ close(sock);
+ return -1;
+ }
+
+ name.sin6_scope_id = scope_id;
+
+ if (bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
+ close(sock);
+ error("LISTENER: IPv6 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ if (socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) {
+ close(sock);
+ error("LISTENER: IPv6 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ debug(D_LISTENER, "LISTENER: Listening on IPv6 ip '%s' port %d, socktype %d", ip, port, socktype);
+ return sock;
+}
+
+static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int socktype, const char *protocol, const char *ip, int port) {
+ if(sockets->opened >= MAX_LISTEN_FDS) {
+ error("LISTENER: Too many listening sockets. Failed to add listening %s socket at ip '%s' port %d, protocol %s, socktype %d", protocol, ip, port, protocol, socktype);
+ close(fd);
+ return -1;
+ }
+
+ sockets->fds[sockets->opened] = fd;
+
+ char buffer[100 + 1];
+ snprintfz(buffer, 100, "%s:[%s]:%d", protocol, ip, port);
+ sockets->fds_names[sockets->opened] = strdupz(buffer);
+ sockets->fds_types[sockets->opened] = socktype;
+
+ sockets->opened++;
+ return 0;
+}
+
+int listen_sockets_check_is_member(LISTEN_SOCKETS *sockets, int fd) {
+ size_t i;
+ for(i = 0; i < sockets->opened ;i++)
+ if(sockets->fds[i] == fd) return 1;
+
+ return 0;
+}
+
+static inline void listen_sockets_init(LISTEN_SOCKETS *sockets) {
+ size_t i;
+ for(i = 0; i < MAX_LISTEN_FDS ;i++) {
+ sockets->fds[i] = -1;
+ sockets->fds_names[i] = NULL;
+ sockets->fds_types[i] = -1;
+ }
+
+ sockets->opened = 0;
+ sockets->failed = 0;
+}
+
+void listen_sockets_close(LISTEN_SOCKETS *sockets) {
+ size_t i;
+ for(i = 0; i < sockets->opened ;i++) {
+ close(sockets->fds[i]);
+ sockets->fds[i] = -1;
+
+ freez(sockets->fds_names[i]);
+ sockets->fds_names[i] = NULL;
+
+ sockets->fds_types[i] = -1;
+ }
+
+ sockets->opened = 0;
+ sockets->failed = 0;
+}
+
+static inline int bind_to_one(LISTEN_SOCKETS *sockets, const char *definition, int default_port, int listen_backlog) {
+ int added = 0;
struct addrinfo hints;
- struct addrinfo *ai_head = NULL, *ai = NULL;
+ struct addrinfo *result = NULL, *rp = NULL;
char buffer[strlen(definition) + 1];
strcpy(buffer, definition);
- char default_service[10 + 1];
- snprintfz(default_service, 10, "%d", default_port);
+ char buffer2[10 + 1];
+ snprintfz(buffer2, 10, "%d", default_port);
+
+ char *ip = buffer, *port = buffer2, *interface = "";;
- char *host = buffer, *service = default_service, *interface = "";
int protocol = IPPROTO_TCP, socktype = SOCK_STREAM;
- uint32_t scope_id = 0;
+ const char *protocol_str = "tcp";
- if(strncmp(host, "tcp:", 4) == 0) {
- host += 4;
+ if(strncmp(ip, "tcp:", 4) == 0) {
+ ip += 4;
protocol = IPPROTO_TCP;
socktype = SOCK_STREAM;
+ protocol_str = "tcp";
}
- else if(strncmp(host, "udp:", 4) == 0) {
- host += 4;
+ else if(strncmp(ip, "udp:", 4) == 0) {
+ ip += 4;
protocol = IPPROTO_UDP;
socktype = SOCK_DGRAM;
+ protocol_str = "udp";
}
- char *e = host;
+ char *e = ip;
if(*e == '[') {
- e = ++host;
+ e = ++ip;
while(*e && *e != ']') e++;
if(*e == ']') {
*e = '\0';
@@ -57,26 +280,142 @@ int connect_to(const char *definition, int default_port, struct timeval *timeout
}
if(*e == ':') {
+ port = e + 1;
*e = '\0';
- e++;
- service = e;
}
- debug(D_CONNECT_TO, "Attempting connection to host = '%s', service = '%s', interface = '%s', protocol = %d (tcp = %d, udp = %d)", host, service, interface, protocol, IPPROTO_TCP, IPPROTO_UDP);
+ uint32_t scope_id = 0;
+ if(*interface) {
+ scope_id = if_nametoindex(interface);
+ if(!scope_id)
+ error("LISTENER: Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface);
+ }
- if(!*host) {
- error("Definition '%s' does not specify a host.", definition);
+ if(!*ip || *ip == '*' || !strcmp(ip, "any") || !strcmp(ip, "all"))
+ ip = NULL;
+
+ if(!*port)
+ port = buffer2;
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
+ hints.ai_socktype = socktype;
+ hints.ai_flags = AI_PASSIVE; /* For wildcard IP address */
+ hints.ai_protocol = protocol;
+ hints.ai_canonname = NULL;
+ hints.ai_addr = NULL;
+ hints.ai_next = NULL;
+
+ int r = getaddrinfo(ip, port, &hints, &result);
+ if (r != 0) {
+ error("LISTENER: getaddrinfo('%s', '%s'): %s\n", ip, port, gai_strerror(r));
return -1;
}
- if(*interface) {
- scope_id = if_nametoindex(interface);
- if(!scope_id)
- error("Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface);
+ for (rp = result; rp != NULL; rp = rp->ai_next) {
+ int fd = -1;
+
+ char rip[INET_ADDRSTRLEN + INET6_ADDRSTRLEN] = "INVALID";
+ int rport = default_port;
+
+ switch (rp->ai_addr->sa_family) {
+ case AF_INET: {
+ struct sockaddr_in *sin = (struct sockaddr_in *) rp->ai_addr;
+ inet_ntop(AF_INET, &sin->sin_addr, rip, INET_ADDRSTRLEN);
+ rport = ntohs(sin->sin_port);
+ // info("Attempting to listen on IPv4 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype);
+ fd = create_listen_socket4(socktype, rip, rport, listen_backlog);
+ break;
+ }
+
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) rp->ai_addr;
+ inet_ntop(AF_INET6, &sin6->sin6_addr, rip, INET6_ADDRSTRLEN);
+ rport = ntohs(sin6->sin6_port);
+ // info("Attempting to listen on IPv6 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype);
+ fd = create_listen_socket6(socktype, scope_id, rip, rport, listen_backlog);
+ break;
+ }
+
+ default:
+ debug(D_LISTENER, "LISTENER: Unknown socket family %d", rp->ai_addr->sa_family);
+ break;
+ }
+
+ if (fd == -1) {
+ error("LISTENER: Cannot bind to ip '%s', port %d", rip, rport);
+ sockets->failed++;
+ }
+ else {
+ listen_sockets_add(sockets, fd, socktype, protocol_str, rip, rport);
+ added++;
+ }
}
- if(!*service)
- service = default_service;
+ freeaddrinfo(result);
+
+ return added;
+}
+
+int listen_sockets_setup(LISTEN_SOCKETS *sockets) {
+ listen_sockets_init(sockets);
+
+ sockets->backlog = (int) config_get_number(sockets->config_section, "listen backlog", sockets->backlog);
+
+ int old_port = sockets->default_port;
+ sockets->default_port = (int) config_get_number(sockets->config_section, "default port", sockets->default_port);
+ if(sockets->default_port < 1 || sockets->default_port > 65535) {
+ error("LISTENER: Invalid listen port %d given. Defaulting to %d.", sockets->default_port, old_port);
+ sockets->default_port = (int) config_set_number(sockets->config_section, "default port", old_port);
+ }
+ debug(D_OPTIONS, "LISTENER: Default listen port set to %d.", sockets->default_port);
+
+ char *s = config_get(sockets->config_section, "bind to", sockets->default_bind_to);
+ while(*s) {
+ char *e = s;
+
+ // skip separators, moving both s(tart) and e(nd)
+ while(isspace(*e) || *e == ',') s = ++e;
+
+ // move e(nd) to the first separator
+ while(*e && !isspace(*e) && *e != ',') e++;
+
+ // is there anything?
+ if(!*s || s == e) break;
+
+ char buf[e - s + 1];
+ strncpyz(buf, s, e - s);
+ bind_to_one(sockets, buf, sockets->default_port, sockets->backlog);
+
+ s = e;
+ }
+
+ if(sockets->failed) {
+ size_t i;
+ for(i = 0; i < sockets->opened ;i++)
+ info("LISTENER: Listen socket %s opened successfully.", sockets->fds_names[i]);
+ }
+
+ return (int)sockets->opened;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// connect to another host/port
+
+// _connect_to()
+// protocol IPPROTO_TCP, IPPROTO_UDP
+// socktype SOCK_STREAM, SOCK_DGRAM
+// host the destination hostname or IP address (IPv4 or IPv6) to connect to
+// if it resolves to many IPs, all are tried (IPv4 and IPv6)
+// scope_id the if_index id of the interface to use for connecting (0 = any)
+// (used only under IPv6)
+// service the service name or port to connect to
+// timeout the timeout for establishing a connection
+
+static inline int _connect_to(int protocol, int socktype, const char *host, uint32_t scope_id, const char *service, struct timeval *timeout) {
+ struct addrinfo hints;
+ struct addrinfo *ai_head = NULL, *ai = NULL;
memset(&hints, 0, sizeof(hints));
hints.ai_family = PF_UNSPEC; /* Allow IPv4 or IPv6 */
@@ -103,52 +442,52 @@ int connect_to(const char *definition, int default_port, struct timeval *timeout
char servBfr[NI_MAXSERV + 1];
getnameinfo(ai->ai_addr,
- ai->ai_addrlen,
- hostBfr,
- sizeof(hostBfr),
- servBfr,
- sizeof(servBfr),
- NI_NUMERICHOST | NI_NUMERICSERV);
+ ai->ai_addrlen,
+ hostBfr,
+ sizeof(hostBfr),
+ servBfr,
+ sizeof(servBfr),
+ NI_NUMERICHOST | NI_NUMERICSERV);
debug(D_CONNECT_TO, "Address info: host = '%s', service = '%s', ai_flags = 0x%02X, ai_family = %d (PF_INET = %d, PF_INET6 = %d), ai_socktype = %d (SOCK_STREAM = %d, SOCK_DGRAM = %d), ai_protocol = %d (IPPROTO_TCP = %d, IPPROTO_UDP = %d), ai_addrlen = %lu (sockaddr_in = %lu, sockaddr_in6 = %lu)",
- hostBfr,
- servBfr,
- (unsigned int)ai->ai_flags,
- ai->ai_family,
- PF_INET,
- PF_INET6,
- ai->ai_socktype,
- SOCK_STREAM,
- SOCK_DGRAM,
- ai->ai_protocol,
- IPPROTO_TCP,
- IPPROTO_UDP,
- (unsigned long)ai->ai_addrlen,
- (unsigned long)sizeof(struct sockaddr_in),
- (unsigned long)sizeof(struct sockaddr_in6));
+ hostBfr,
+ servBfr,
+ (unsigned int)ai->ai_flags,
+ ai->ai_family,
+ PF_INET,
+ PF_INET6,
+ ai->ai_socktype,
+ SOCK_STREAM,
+ SOCK_DGRAM,
+ ai->ai_protocol,
+ IPPROTO_TCP,
+ IPPROTO_UDP,
+ (unsigned long)ai->ai_addrlen,
+ (unsigned long)sizeof(struct sockaddr_in),
+ (unsigned long)sizeof(struct sockaddr_in6));
switch (ai->ai_addr->sa_family) {
case PF_INET: {
struct sockaddr_in *pSadrIn = (struct sockaddr_in *)ai->ai_addr;
debug(D_CONNECT_TO, "ai_addr = sin_family: %d (AF_INET = %d, AF_INET6 = %d), sin_addr: '%s', sin_port: '%s'",
- pSadrIn->sin_family,
- AF_INET,
- AF_INET6,
- hostBfr,
- servBfr);
+ pSadrIn->sin_family,
+ AF_INET,
+ AF_INET6,
+ hostBfr,
+ servBfr);
break;
}
case PF_INET6: {
struct sockaddr_in6 *pSadrIn6 = (struct sockaddr_in6 *) ai->ai_addr;
debug(D_CONNECT_TO,"ai_addr = sin6_family: %d (AF_INET = %d, AF_INET6 = %d), sin6_addr: '%s', sin6_port: '%s', sin6_flowinfo: %u, sin6_scope_id: %u",
- pSadrIn6->sin6_family,
- AF_INET,
- AF_INET6,
- hostBfr,
- servBfr,
- pSadrIn6->sin6_flowinfo,
- pSadrIn6->sin6_scope_id);
+ pSadrIn6->sin6_family,
+ AF_INET,
+ AF_INET6,
+ hostBfr,
+ servBfr,
+ pSadrIn6->sin6_flowinfo,
+ pSadrIn6->sin6_scope_id);
break;
}
@@ -180,6 +519,85 @@ int connect_to(const char *definition, int default_port, struct timeval *timeout
return fd;
}
+// connect_to()
+//
+// definition format:
+//
+// [PROTOCOL:]IP[%INTERFACE][:PORT]
+//
+// PROTOCOL = tcp or udp
+// IP = IPv4 or IPv6 IP or hostname, optionally enclosed in [] (required for IPv6)
+// INTERFACE = for IPv6 only, the network interface to use
+// PORT = port number or service name
+
+int connect_to(const char *definition, int default_port, struct timeval *timeout) {
+ char buffer[strlen(definition) + 1];
+ strcpy(buffer, definition);
+
+ char default_service[10 + 1];
+ snprintfz(default_service, 10, "%d", default_port);
+
+ char *host = buffer, *service = default_service, *interface = "";
+ int protocol = IPPROTO_TCP, socktype = SOCK_STREAM;
+ uint32_t scope_id = 0;
+
+ if(strncmp(host, "tcp:", 4) == 0) {
+ host += 4;
+ protocol = IPPROTO_TCP;
+ socktype = SOCK_STREAM;
+ }
+ else if(strncmp(host, "udp:", 4) == 0) {
+ host += 4;
+ protocol = IPPROTO_UDP;
+ socktype = SOCK_DGRAM;
+ }
+
+ char *e = host;
+ if(*e == '[') {
+ e = ++host;
+ while(*e && *e != ']') e++;
+ if(*e == ']') {
+ *e = '\0';
+ e++;
+ }
+ }
+ else {
+ while(*e && *e != ':' && *e != '%') e++;
+ }
+
+ if(*e == '%') {
+ *e = '\0';
+ e++;
+ interface = e;
+ while(*e && *e != ':') e++;
+ }
+
+ if(*e == ':') {
+ *e = '\0';
+ e++;
+ service = e;
+ }
+
+ debug(D_CONNECT_TO, "Attempting connection to host = '%s', service = '%s', interface = '%s', protocol = %d (tcp = %d, udp = %d)", host, service, interface, protocol, IPPROTO_TCP, IPPROTO_UDP);
+
+ if(!*host) {
+ error("Definition '%s' does not specify a host.", definition);
+ return -1;
+ }
+
+ if(*interface) {
+ scope_id = if_nametoindex(interface);
+ if(!scope_id)
+ error("Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface);
+ }
+
+ if(!*service)
+ service = default_service;
+
+
+ return _connect_to(protocol, socktype, host, scope_id, service, timeout);
+}
+
int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size) {
int sock = -1;
@@ -213,6 +631,10 @@ int connect_to_one_of(const char *destination, int default_port, struct timeval
return sock;
}
+
+// --------------------------------------------------------------------------------------------------------------------
+// helpers to send/receive data in one call, in blocking mode, with a timeout
+
ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) {
for(;;) {
struct pollfd fd = {
@@ -274,3 +696,455 @@ ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout)
return send(sockfd, buf, len, flags);
}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// accept4() replacement for systems that do not have one
+
+#ifndef HAVE_ACCEPT4
+int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags) {
+ int fd = accept(sock, addr, addrlen);
+ int newflags = 0;
+
+ if (fd < 0) return fd;
+
+ if (flags & SOCK_NONBLOCK) {
+ newflags |= O_NONBLOCK;
+ flags &= ~SOCK_NONBLOCK;
+ }
+
+#ifdef SOCK_CLOEXEC
+#ifdef O_CLOEXEC
+ if (flags & SOCK_CLOEXEC) {
+ newflags |= O_CLOEXEC;
+ flags &= ~SOCK_CLOEXEC;
+ }
+#endif
+#endif
+
+ if (flags) {
+ errno = -EINVAL;
+ return -1;
+ }
+
+ if (fcntl(fd, F_SETFL, newflags) < 0) {
+ int saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+ return -1;
+ }
+
+ return fd;
+}
+#endif
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// accept_socket() - accept a socket and store client IP and port
+
+int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *client_port, size_t portsize) {
+ struct sockaddr_storage sadr;
+ socklen_t addrlen = sizeof(sadr);
+
+ int nfd = accept4(fd, (struct sockaddr *)&sadr, &addrlen, flags);
+ if (nfd >= 0) {
+ if (getnameinfo((struct sockaddr *)&sadr, addrlen, client_ip, (socklen_t)ipsize, client_port, (socklen_t)portsize, NI_NUMERICHOST | NI_NUMERICSERV) != 0) {
+ error("LISTENER: cannot getnameinfo() on received client connection.");
+ strncpyz(client_ip, "UNKNOWN", ipsize - 1);
+ strncpyz(client_port, "UNKNOWN", portsize - 1);
+ }
+
+ client_ip[ipsize - 1] = '\0';
+ client_port[portsize - 1] = '\0';
+
+ switch (((struct sockaddr *)&sadr)->sa_family) {
+ case AF_INET:
+ debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ break;
+
+ case AF_INET6:
+ if (strncmp(client_ip, "::ffff:", 7) == 0) {
+ memmove(client_ip, &client_ip[7], strlen(&client_ip[7]) + 1);
+ debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ } else
+ debug(D_LISTENER, "New IPv6 web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ break;
+
+ default:
+ debug(D_LISTENER, "New UNKNOWN web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ break;
+ }
+ }
+
+ return nfd;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// poll() based listener
+// this should be the fastest possible listener for up to 100 sockets
+// above 100, an epoll() interface is needed on Linux
+
+#define POLL_FDS_INCREASE_STEP 10
+
+#define POLLINFO_FLAG_SERVER_SOCKET 0x00000001
+#define POLLINFO_FLAG_CLIENT_SOCKET 0x00000002
+
+struct pollinfo {
+ size_t slot;
+ char *client;
+ struct pollinfo *next;
+ uint32_t flags;
+ int socktype;
+
+ void *data;
+};
+
+struct poll {
+ size_t slots;
+ size_t used;
+ size_t min;
+ size_t max;
+ struct pollfd *fds;
+ struct pollinfo *inf;
+ struct pollinfo *first_free;
+
+ void *(*add_callback)(int fd, short int *events);
+ void (*del_callback)(int fd, void *data);
+ int (*rcv_callback)(int fd, int socktype, void *data, short int *events);
+ int (*snd_callback)(int fd, int socktype, void *data, short int *events);
+};
+
+static inline struct pollinfo *poll_add_fd(struct poll *p, int fd, int socktype, short int events, uint32_t flags) {
+ debug(D_POLLFD, "POLLFD: ADD: request to add fd %d, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", fd, p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+
+ if(unlikely(fd < 0)) return NULL;
+
+ if(unlikely(!p->first_free)) {
+ size_t new_slots = p->slots + POLL_FDS_INCREASE_STEP;
+ debug(D_POLLFD, "POLLFD: ADD: increasing size (current = %zu, new = %zu, used = %zu, min = %zu, max = %zu)", p->slots, new_slots, p->used, p->min, p->max);
+
+ p->fds = reallocz(p->fds, sizeof(struct pollfd) * new_slots);
+ p->inf = reallocz(p->inf, sizeof(struct pollinfo) * new_slots);
+
+ ssize_t i;
+ for(i = new_slots - 1; i >= (ssize_t)p->slots ; i--) {
+ debug(D_POLLFD, "POLLFD: ADD: resetting new slot %zd", i);
+ p->fds[i].fd = -1;
+ p->fds[i].events = 0;
+ p->fds[i].revents = 0;
+
+ p->inf[i].slot = (size_t)i;
+ p->inf[i].flags = 0;
+ p->inf[i].socktype = -1;
+ p->inf[i].client = NULL;
+ p->inf[i].data = NULL;
+ p->inf[i].next = p->first_free;
+ p->first_free = &p->inf[i];
+ }
+
+ p->slots = new_slots;
+ }
+
+ struct pollinfo *pi = p->first_free;
+ p->first_free = p->first_free->next;
+
+ debug(D_POLLFD, "POLLFD: ADD: selected slot %zu, next free is %zd", pi->slot, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+
+ struct pollfd *pf = &p->fds[pi->slot];
+ pf->fd = fd;
+ pf->events = events;
+ pf->revents = 0;
+
+ pi->socktype = socktype;
+ pi->flags = flags;
+ pi->next = NULL;
+
+ p->used++;
+ if(unlikely(pi->slot > p->max))
+ p->max = pi->slot;
+
+ if(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET) {
+ pi->data = p->add_callback(fd, &pf->events);
+ }
+
+ if(pi->flags & POLLINFO_FLAG_SERVER_SOCKET) {
+ p->min = pi->slot;
+ }
+
+ debug(D_POLLFD, "POLLFD: ADD: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+
+ return pi;
+}
+
+static inline void poll_close_fd(struct poll *p, struct pollinfo *pi) {
+ struct pollfd *pf = &p->fds[pi->slot];
+ debug(D_POLLFD, "POLLFD: DEL: request to clear slot %zu (fd %d), old next free was %zd", pi->slot, pf->fd, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+
+ if(unlikely(pf->fd == -1)) return;
+
+ if(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET) {
+ p->del_callback(pf->fd, pi->data);
+ }
+
+ close(pf->fd);
+ pf->fd = -1;
+ pf->events = 0;
+ pf->revents = 0;
+
+ pi->socktype = -1;
+ pi->flags = 0;
+ pi->data = NULL;
+
+ freez(pi->client);
+ pi->client = NULL;
+
+ pi->next = p->first_free;
+ p->first_free = pi;
+
+ p->used--;
+ if(p->max == pi->slot) {
+ p->max = p->min;
+ ssize_t i;
+ for(i = (ssize_t)pi->slot; i > (ssize_t)p->min ;i--) {
+ if (unlikely(p->fds[i].fd != -1)) {
+ p->max = (size_t)i;
+ break;
+ }
+ }
+ }
+
+ debug(D_POLLFD, "POLLFD: DEL: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+}
+
+static void *add_callback_default(int fd, short int *events) {
+ (void)fd;
+ (void)events;
+
+ return NULL;
+}
+static void del_callback_default(int fd, void *data) {
+ (void)fd;
+ (void)data;
+
+ if(data)
+ error("POLLFD: internal error: del_callback_default() called with data pointer - possible memory leak");
+}
+
+static int rcv_callback_default(int fd, int socktype, void *data, short int *events) {
+ (void)socktype;
+ (void)data;
+ (void)events;
+
+ char buffer[1024 + 1];
+
+ ssize_t rc;
+ do {
+ rc = recv(fd, buffer, 1024, MSG_DONTWAIT);
+ if (rc < 0) {
+ // read failed
+ if (errno != EWOULDBLOCK && errno != EAGAIN) {
+ error("POLLFD: recv() failed.");
+ return -1;
+ }
+ } else if (rc) {
+ // data received
+ info("POLLFD: internal error: discarding %zd bytes received on socket %d", rc, fd);
+ }
+ } while (rc != -1);
+
+ return 0;
+}
+
+static int snd_callback_default(int fd, int socktype, void *data, short int *events) {
+ (void)socktype;
+ (void)data;
+ (void)events;
+
+ *events &= ~POLLOUT;
+
+ info("POLLFD: internal error: nothing to send on socket %d", fd);
+ return 0;
+}
+
+void poll_events_cleanup(void *data) {
+ struct poll *p = (struct poll *)data;
+
+ size_t i;
+ for(i = 0 ; i <= p->max ; i++) {
+ struct pollinfo *pi = &p->inf[i];
+ poll_close_fd(p, pi);
+ }
+
+ freez(p->fds);
+ freez(p->inf);
+}
+
+void poll_events(LISTEN_SOCKETS *sockets
+ , void *(*add_callback)(int fd, short int *events)
+ , void (*del_callback)(int fd, void *data)
+ , int (*rcv_callback)(int fd, int socktype, void *data, short int *events)
+ , int (*snd_callback)(int fd, int socktype, void *data, short int *events)
+ , void *data
+) {
+ int retval;
+
+ struct poll p = {
+ .slots = 0,
+ .used = 0,
+ .max = 0,
+ .fds = NULL,
+ .inf = NULL,
+ .first_free = NULL,
+
+ .add_callback = add_callback?add_callback:add_callback_default,
+ .del_callback = del_callback?del_callback:del_callback_default,
+ .rcv_callback = rcv_callback?rcv_callback:rcv_callback_default,
+ .snd_callback = snd_callback?snd_callback:snd_callback_default
+ };
+
+ size_t i;
+ for(i = 0; i < sockets->opened ;i++) {
+ struct pollinfo *pi = poll_add_fd(&p, sockets->fds[i], sockets->fds_types[i], POLLIN, POLLINFO_FLAG_SERVER_SOCKET);
+ pi->data = data;
+ info("POLLFD: LISTENER: listening on '%s'", (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN");
+ }
+
+ int timeout = -1; // wait forever
+
+ pthread_cleanup_push(poll_events_cleanup, &p);
+
+ for(;;) {
+ if(unlikely(netdata_exit)) break;
+
+ debug(D_POLLFD, "POLLFD: LISTENER: Waiting on %zu sockets...", p.max + 1);
+ retval = poll(p.fds, p.max + 1, timeout);
+
+ if(unlikely(retval == -1)) {
+ error("POLLFD: LISTENER: poll() failed.");
+ continue;
+ }
+ else if(unlikely(!retval)) {
+ debug(D_POLLFD, "POLLFD: LISTENER: poll() timeout.");
+ continue;
+ }
+
+ if(unlikely(netdata_exit)) break;
+
+ for(i = 0 ; i <= p.max ; i++) {
+ struct pollfd *pf = &p.fds[i];
+ struct pollinfo *pi = &p.inf[i];
+ int fd = pf->fd;
+ short int revents = pf->revents;
+ pf->revents = 0;
+
+ if(unlikely(fd == -1)) {
+ debug(D_POLLFD, "POLLFD: LISTENER: ignoring slot %zu, it does not have an fd", i);
+ continue;
+ }
+
+ debug(D_POLLFD, "POLLFD: LISTENER: processing events for slot %zu (events = %d, revents = %d)", i, pf->events, revents);
+
+ if(revents & POLLIN || revents & POLLPRI) {
+ // receiving data
+
+ if(likely(pi->flags & POLLINFO_FLAG_SERVER_SOCKET)) {
+ // new connection
+ // debug(D_POLLFD, "POLLFD: LISTENER: accepting connections from slot %zu (fd %d)", i, fd);
+
+ switch(pi->socktype) {
+ case SOCK_STREAM: {
+ // a TCP socket
+ // we accept the connection
+
+ int nfd;
+ do {
+ char client_ip[NI_MAXHOST + 1];
+ char client_port[NI_MAXSERV + 1];
+
+ debug(D_POLLFD, "POLLFD: LISTENER: calling accept4() slot %zu (fd %d)", i, fd);
+ nfd = accept_socket(fd, SOCK_NONBLOCK, client_ip, NI_MAXHOST + 1, client_port, NI_MAXSERV + 1);
+ if (nfd < 0) {
+ // accept failed
+
+ debug(D_POLLFD, "POLLFD: LISTENER: accept4() slot %zu (fd %d) failed.", i, fd);
+
+ if(errno != EWOULDBLOCK && errno != EAGAIN)
+ error("POLLFD: LISTENER: accept() failed.");
+
+ break;
+ }
+ else {
+ // accept ok
+ info("POLLFD: LISTENER: client '[%s]:%s' connected to '%s'", client_ip, client_port, sockets->fds_names[i]);
+ poll_add_fd(&p, nfd, SOCK_STREAM, POLLIN, POLLINFO_FLAG_CLIENT_SOCKET);
+
+ // it may have realloced them, so refresh our pointers
+ pf = &p.fds[i];
+ pi = &p.inf[i];
+ }
+ } while (nfd != -1);
+ break;
+ }
+
+ case SOCK_DGRAM: {
+ // a UDP socket
+ // we read data from the server socket
+
+ debug(D_POLLFD, "POLLFD: LISTENER: reading data from UDP slot %zu (fd %d)", i, fd);
+
+ p.rcv_callback(fd, pi->socktype, pi->data, &pf->events);
+ break;
+ }
+
+ default: {
+ error("POLLFD: LISTENER: Unknown socktype %d on slot %zu", pi->socktype, pi->slot);
+ break;
+ }
+ }
+ }
+
+ if(likely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET)) {
+ // read data from client TCP socket
+ debug(D_POLLFD, "POLLFD: LISTENER: reading data from TCP client slot %zu (fd %d)", i, fd);
+
+ if (p.rcv_callback(fd, pi->socktype, pi->data, &pf->events) == -1) {
+ poll_close_fd(&p, pi);
+ continue;
+ }
+ }
+ }
+
+ if(unlikely(revents & POLLOUT)) {
+ // sending data
+ debug(D_POLLFD, "POLLFD: LISTENER: sending data to socket on slot %zu (fd %d)", i, fd);
+
+ if (p.snd_callback(fd, pi->socktype, pi->data, &pf->events) == -1) {
+ poll_close_fd(&p, pi);
+ continue;
+ }
+ }
+
+ if(unlikely(revents & POLLERR)) {
+ error("POLLFD: LISTENER: processing POLLERR events for slot %zu (events = %d, revents = %d)", i, pf->events, revents);
+ poll_close_fd(&p, pi);
+ continue;
+ }
+
+ if(unlikely(revents & POLLHUP)) {
+ error("POLLFD: LISTENER: processing POLLHUP events for slot %zu (events = %d, revents = %d)", i, pf->events, pf->revents);
+ poll_close_fd(&p, pi);
+ continue;
+ }
+
+ if(unlikely(revents & POLLNVAL)) {
+ error("POLLFD: LISTENER: processing POLLNVAP events for slot %zu (events = %d, revents = %d)", i, pf->events, revents);
+ poll_close_fd(&p, pi);
+ continue;
+ }
+ }
+ }
+
+ pthread_cleanup_pop(1);
+ debug(D_POLLFD, "POLLFD: LISTENER: cleanup completed");
+}
diff --git a/src/socket.h b/src/socket.h
index 89c154a6..bb95347a 100644
--- a/src/socket.h
+++ b/src/socket.h
@@ -1,14 +1,61 @@
-//
-// Created by costa on 24/12/2016.
-//
-
#ifndef NETDATA_SOCKET_H
#define NETDATA_SOCKET_H
+#ifndef MAX_LISTEN_FDS
+#define MAX_LISTEN_FDS 50
+#endif
+
+typedef struct listen_sockets {
+ const char *config_section; // the netdata configuration section to read settings from
+ const char *default_bind_to; // the default bind to configuration string
+ int default_port; // the default port to use
+ int backlog; // the default listen backlog to use
+
+ size_t opened; // the number of sockets opened
+ size_t failed; // the number of sockets attempted to open, but failed
+ int fds[MAX_LISTEN_FDS]; // the open sockets
+ char *fds_names[MAX_LISTEN_FDS]; // descriptions for the open sockets
+ int fds_types[MAX_LISTEN_FDS]; // the socktype for the open sockets
+} LISTEN_SOCKETS;
+
+extern int listen_sockets_setup(LISTEN_SOCKETS *sockets);
+extern void listen_sockets_close(LISTEN_SOCKETS *sockets);
+
extern int connect_to(const char *definition, int default_port, struct timeval *timeout);
extern int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size);
extern ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout);
extern ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout);
+extern int sock_setnonblock(int fd);
+extern int sock_delnonblock(int fd);
+extern int sock_setreuse(int fd, int reuse);
+extern int sock_setreuse_port(int fd, int reuse);
+extern int sock_enlarge_in(int fd);
+extern int sock_enlarge_out(int fd);
+
+extern int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *client_port, size_t portsize);
+
+#ifndef HAVE_ACCEPT4
+extern int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags);
+
+#ifndef SOCK_NONBLOCK
+#define SOCK_NONBLOCK 00004000
+#endif /* #ifndef SOCK_NONBLOCK */
+
+#ifndef SOCK_CLOEXEC
+#define SOCK_CLOEXEC 02000000
+#endif /* #ifndef SOCK_CLOEXEC */
+
+#endif /* #ifndef HAVE_ACCEPT4 */
+
+
+extern void poll_events(LISTEN_SOCKETS *sockets
+ , void *(*add_callback)(int fd, short int *events)
+ , void (*del_callback)(int fd, void *data)
+ , int (*rcv_callback)(int fd, int socktype, void *data, short int *events)
+ , int (*snd_callback)(int fd, int socktype, void *data, short int *events)
+ , void *data
+);
+
#endif //NETDATA_SOCKET_H
diff --git a/src/statistical.c b/src/statistical.c
new file mode 100644
index 00000000..807bc25e
--- /dev/null
+++ b/src/statistical.c
@@ -0,0 +1,459 @@
+#include "common.h"
+
+// --------------------------------------------------------------------------------------------------------------------
+
+inline long double sum_and_count(long double *series, size_t entries, size_t *count) {
+ if(unlikely(entries == 0)) {
+ if(likely(count))
+ *count = 0;
+
+ return NAN;
+ }
+
+ if(unlikely(entries == 1)) {
+ if(likely(count))
+ *count = (isnan(series[0])?0:1);
+
+ return series[0];
+ }
+
+ size_t i, c = 0;
+ long double sum = 0;
+
+ for(i = 0; i < entries ; i++) {
+ long double value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+ c++;
+ sum += value;
+ }
+
+ if(likely(count))
+ *count = c;
+
+ if(unlikely(c == 0))
+ return NAN;
+
+ return sum;
+}
+
+inline long double sum(long double *series, size_t entries) {
+ return sum_and_count(series, entries, NULL);
+}
+
+inline long double average(long double *series, size_t entries) {
+ size_t count = 0;
+ long double sum = sum_and_count(series, entries, &count);
+
+ if(unlikely(count == 0))
+ return NAN;
+
+ return sum / (long double)count;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+long double moving_average(long double *series, size_t entries, size_t period) {
+ if(unlikely(period <= 0))
+ return 0.0;
+
+ size_t i, count;
+ long double sum = 0, avg = 0;
+ long double p[period];
+
+ for(count = 0; count < period ; count++)
+ p[count] = 0.0;
+
+ for(i = 0, count = 0; i < entries; i++) {
+ long double value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+
+ if(unlikely(count < period)) {
+ sum += value;
+ avg = (count == period - 1) ? sum / (long double)period : 0;
+ }
+ else {
+ sum = sum - p[count % period] + value;
+ avg = sum / (long double)period;
+ }
+
+ p[count % period] = value;
+ count++;
+ }
+
+ return avg;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+static int qsort_compare(const void *a, const void *b) {
+ long double *p1 = (long double *)a, *p2 = (long double *)b;
+ long double n1 = *p1, n2 = *p2;
+
+ if(unlikely(isnan(n1) || isnan(n2))) {
+ if(isnan(n1) && !isnan(n2)) return -1;
+ if(!isnan(n1) && isnan(n2)) return 1;
+ return 0;
+ }
+ if(unlikely(isinf(n1) || isinf(n2))) {
+ if(!isinf(n1) && isinf(n2)) return -1;
+ if(isinf(n1) && !isinf(n2)) return 1;
+ return 0;
+ }
+
+ if(unlikely(n1 < n2)) return -1;
+ if(unlikely(n1 > n2)) return 1;
+ return 0;
+}
+
+inline void sort_series(long double *series, size_t entries) {
+ qsort(series, entries, sizeof(long double), qsort_compare);
+}
+
+inline long double *copy_series(long double *series, size_t entries) {
+ long double *copy = mallocz(sizeof(long double) * entries);
+ memcpy(copy, series, sizeof(long double) * entries);
+ return copy;
+}
+
+long double median_on_sorted_series(long double *series, size_t entries) {
+ if(unlikely(entries == 0))
+ return NAN;
+
+ if(unlikely(entries == 1))
+ return series[0];
+
+ if(unlikely(entries == 2))
+ return (series[0] + series[1]) / 2;
+
+ long double avg;
+ if(entries % 2 == 0) {
+ size_t m = entries / 2;
+ avg = (series[m] + series[m + 1]) / 2;
+ }
+ else {
+ avg = series[entries / 2];
+ }
+
+ return avg;
+}
+
+long double median(long double *series, size_t entries) {
+ if(unlikely(entries == 0))
+ return NAN;
+
+ if(unlikely(entries == 1))
+ return series[0];
+
+ if(unlikely(entries == 2))
+ return (series[0] + series[1]) / 2;
+
+ long double *copy = copy_series(series, entries);
+ sort_series(copy, entries);
+
+ long double avg = median_on_sorted_series(copy, entries);
+
+ freez(copy);
+ return avg;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+long double moving_median(long double *series, size_t entries, size_t period) {
+ if(entries <= period)
+ return median(series, entries);
+
+ long double *data = copy_series(series, entries);
+
+ size_t i;
+ for(i = period; i < entries; i++) {
+ data[i - period] = median(&series[i - period], period);
+ }
+
+ long double avg = median(data, entries - period);
+ freez(data);
+ return avg;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+// http://stackoverflow.com/a/15150143/4525767
+long double running_median_estimate(long double *series, size_t entries) {
+ long double median = 0.0f;
+ long double average = 0.0f;
+ size_t i;
+
+ for(i = 0; i < entries ; i++) {
+ long double value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+
+ average += ( value - average ) * 0.1f; // rough running average.
+ median += copysignl( average * 0.01, value - median );
+ }
+
+ return median;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+long double standard_deviation(long double *series, size_t entries) {
+ if(unlikely(entries < 1))
+ return NAN;
+
+ if(unlikely(entries == 1))
+ return series[0];
+
+ size_t i, count = 0;
+ long double sum = 0;
+
+ for(i = 0; i < entries ; i++) {
+ long double value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+
+ count++;
+ sum += value;
+ }
+
+ if(unlikely(count == 0))
+ return NAN;
+
+ if(unlikely(count == 1))
+ return sum;
+
+ long double average = sum / (long double)count;
+
+ for(i = 0, count = 0, sum = 0; i < entries ; i++) {
+ long double value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+
+ count++;
+ sum += powl(value - average, 2);
+ }
+
+ if(unlikely(count == 0))
+ return NAN;
+
+ if(unlikely(count == 1))
+ return average;
+
+ long double variance = sum / (long double)(count - 1); // remove -1 to have a population stddev
+
+ long double stddev = sqrtl(variance);
+ return stddev;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+long double single_exponential_smoothing(long double *series, size_t entries, long double alpha) {
+ size_t i, count = 0;
+ long double level = 0, sum = 0;
+
+ if(unlikely(isnan(alpha)))
+ alpha = 0.3;
+
+ for(i = 0; i < entries ; i++) {
+ long double value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+ count++;
+
+ sum += value;
+
+ long double last_level = level;
+ level = alpha * value + (1.0 - alpha) * last_level;
+ }
+
+ return level;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+// http://grisha.org/blog/2016/02/16/triple-exponential-smoothing-forecasting-part-ii/
+long double double_exponential_smoothing(long double *series, size_t entries, long double alpha, long double beta, long double *forecast) {
+ size_t i, count = 0;
+ long double level = series[0], trend, sum;
+
+ if(unlikely(isnan(alpha)))
+ alpha = 0.3;
+
+ if(unlikely(isnan(beta)))
+ beta = 0.05;
+
+ if(likely(entries > 1))
+ trend = series[1] - series[0];
+ else
+ trend = 0;
+
+ sum = series[0];
+
+ for(i = 1; i < entries ; i++) {
+ long double value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+ count++;
+
+ sum += value;
+
+ long double last_level = level;
+
+ level = alpha * value + (1.0 - alpha) * (level + trend);
+ trend = beta * (level - last_level) + (1.0 - beta) * trend;
+ }
+
+ if(forecast)
+ *forecast = level + trend;
+
+ return level;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+/*
+ * Based on th R implementation
+ *
+ * a: level component
+ * b: trend component
+ * s: seasonal component
+ *
+ * Additive:
+ *
+ * Yhat[t+h] = a[t] + h * b[t] + s[t + 1 + (h - 1) mod p],
+ * a[t] = α (Y[t] - s[t-p]) + (1-α) (a[t-1] + b[t-1])
+ * b[t] = β (a[t] - a[t-1]) + (1-β) b[t-1]
+ * s[t] = γ (Y[t] - a[t]) + (1-γ) s[t-p]
+ *
+ * Multiplicative:
+ *
+ * Yhat[t+h] = (a[t] + h * b[t]) * s[t + 1 + (h - 1) mod p],
+ * a[t] = α (Y[t] / s[t-p]) + (1-α) (a[t-1] + b[t-1])
+ * b[t] = β (a[t] - a[t-1]) + (1-β) b[t-1]
+ * s[t] = γ (Y[t] / a[t]) + (1-γ) s[t-p]
+ */
+static int __HoltWinters(
+ long double *series,
+ int entries, // start_time + h
+
+ long double alpha, // alpha parameter of Holt-Winters Filter.
+ long double beta, // beta parameter of Holt-Winters Filter. If set to 0, the function will do exponential smoothing.
+ long double gamma, // gamma parameter used for the seasonal component. If set to 0, an non-seasonal model is fitted.
+
+ int *seasonal,
+ int *period,
+ long double *a, // Start value for level (a[0]).
+ long double *b, // Start value for trend (b[0]).
+ long double *s, // Vector of start values for the seasonal component (s_1[0] ... s_p[0])
+
+ /* return values */
+ long double *SSE, // The final sum of squared errors achieved in optimizing
+ long double *level, // Estimated values for the level component (size entries - t + 2)
+ long double *trend, // Estimated values for the trend component (size entries - t + 2)
+ long double *season // Estimated values for the seasonal component (size entries - t + 2)
+)
+{
+ if(unlikely(entries < 4))
+ return 0;
+
+ int start_time = 2;
+
+ long double res = 0, xhat = 0, stmp = 0;
+ int i, i0, s0;
+
+ /* copy start values to the beginning of the vectors */
+ level[0] = *a;
+ if(beta > 0) trend[0] = *b;
+ if(gamma > 0) memcpy(season, s, *period * sizeof(long double));
+
+ for(i = start_time - 1; i < entries; i++) {
+ /* indices for period i */
+ i0 = i - start_time + 2;
+ s0 = i0 + *period - 1;
+
+ /* forecast *for* period i */
+ xhat = level[i0 - 1] + (beta > 0 ? trend[i0 - 1] : 0);
+ stmp = gamma > 0 ? season[s0 - *period] : (*seasonal != 1);
+ if (*seasonal == 1)
+ xhat += stmp;
+ else
+ xhat *= stmp;
+
+ /* Sum of Squared Errors */
+ res = series[i] - xhat;
+ *SSE += res * res;
+
+ /* estimate of level *in* period i */
+ if (*seasonal == 1)
+ level[i0] = alpha * (series[i] - stmp)
+ + (1 - alpha) * (level[i0 - 1] + trend[i0 - 1]);
+ else
+ level[i0] = alpha * (series[i] / stmp)
+ + (1 - alpha) * (level[i0 - 1] + trend[i0 - 1]);
+
+ /* estimate of trend *in* period i */
+ if (beta > 0)
+ trend[i0] = beta * (level[i0] - level[i0 - 1])
+ + (1 - beta) * trend[i0 - 1];
+
+ /* estimate of seasonal component *in* period i */
+ if (gamma > 0) {
+ if (*seasonal == 1)
+ season[s0] = gamma * (series[i] - level[i0])
+ + (1 - gamma) * stmp;
+ else
+ season[s0] = gamma * (series[i] / level[i0])
+ + (1 - gamma) * stmp;
+ }
+ }
+
+ return 1;
+}
+
+long double holtwinters(long double *series, size_t entries, long double alpha, long double beta, long double gamma, long double *forecast) {
+ if(unlikely(isnan(alpha)))
+ alpha = 0.3;
+
+ if(unlikely(isnan(beta)))
+ beta = 0.05;
+
+ if(unlikely(isnan(gamma)))
+ gamma = 0;
+
+ int seasonal = 0;
+ int period = 0;
+ long double a0 = series[0];
+ long double b0 = 0;
+ long double s[] = {};
+
+ long double errors = 0.0;
+ size_t nb_computations = entries;
+ long double *estimated_level = callocz(nb_computations, sizeof(long double));
+ long double *estimated_trend = callocz(nb_computations, sizeof(long double));
+ long double *estimated_season = callocz(nb_computations, sizeof(long double));
+
+ int ret = __HoltWinters(
+ series,
+ (int)entries,
+ alpha,
+ beta,
+ gamma,
+ &seasonal,
+ &period,
+ &a0,
+ &b0,
+ s,
+ &errors,
+ estimated_level,
+ estimated_trend,
+ estimated_season
+ );
+
+ long double value = estimated_level[nb_computations - 1];
+
+ if(forecast)
+ *forecast = 0.0;
+
+ freez(estimated_level);
+ freez(estimated_trend);
+ freez(estimated_season);
+
+ if(!ret)
+ return 0.0;
+
+ return value;
+}
diff --git a/src/statistical.h b/src/statistical.h
new file mode 100644
index 00000000..844e579b
--- /dev/null
+++ b/src/statistical.h
@@ -0,0 +1,19 @@
+#ifndef NETDATA_STATISTICAL_H
+#define NETDATA_STATISTICAL_H
+
+extern long double average(long double *series, size_t entries);
+extern long double moving_average(long double *series, size_t entries, size_t period);
+extern long double median(long double *series, size_t entries);
+extern long double moving_median(long double *series, size_t entries, size_t period);
+extern long double running_median_estimate(long double *series, size_t entries);
+extern long double standard_deviation(long double *series, size_t entries);
+extern long double single_exponential_smoothing(long double *series, size_t entries, long double alpha);
+extern long double double_exponential_smoothing(long double *series, size_t entries, long double alpha, long double beta, long double *forecast);
+extern long double holtwinters(long double *series, size_t entries, long double alpha, long double beta, long double gamma, long double *forecast);
+extern long double sum_and_count(long double *series, size_t entries, size_t *count);
+extern long double sum(long double *series, size_t entries);
+extern long double median_on_sorted_series(long double *series, size_t entries);
+extern long double *copy_series(long double *series, size_t entries);
+extern void sort_series(long double *series, size_t entries);
+
+#endif //NETDATA_STATISTICAL_H
diff --git a/src/statsd.c b/src/statsd.c
new file mode 100644
index 00000000..4dd04757
--- /dev/null
+++ b/src/statsd.c
@@ -0,0 +1,2041 @@
+#include "common.h"
+
+#define STATSD_CHART_PREFIX "statsd"
+#define STATSD_CHART_PRIORITY 90000
+
+// --------------------------------------------------------------------------------------
+
+// #define STATSD_MULTITHREADED 1
+
+#ifdef STATSD_MULTITHREADED
+// DO NOT ENABLE MULTITHREADING - IT IS NOT WELL TESTED
+#define STATSD_AVL_TREE avl_tree_lock
+#define STATSD_AVL_INSERT avl_insert_lock
+#define STATSD_AVL_SEARCH avl_search_lock
+#define STATSD_AVL_INDEX_INIT { .avl_tree = { NULL, statsd_metric_compare }, .rwlock = AVL_LOCK_INITIALIZER }
+#define STATSD_FIRST_PTR_MUTEX netdata_mutex_t first_mutex
+#define STATSD_FIRST_PTR_MUTEX_INIT .first_mutex = NETDATA_MUTEX_INITIALIZER
+#define STATSD_FIRST_PTR_MUTEX_LOCK(index) netdata_mutex_lock(&((index)->first_mutex))
+#define STATSD_FIRST_PTR_MUTEX_UNLOCK(index) netdata_mutex_unlock(&((index)->first_mutex))
+#define STATSD_DICTIONARY_OPTIONS DICTIONARY_FLAG_DEFAULT
+#else
+#define STATSD_AVL_TREE avl_tree
+#define STATSD_AVL_INSERT avl_insert
+#define STATSD_AVL_SEARCH avl_search
+#define STATSD_AVL_INDEX_INIT { .root = NULL, .compar = statsd_metric_compare }
+#define STATSD_FIRST_PTR_MUTEX
+#define STATSD_FIRST_PTR_MUTEX_INIT
+#define STATSD_FIRST_PTR_MUTEX_LOCK(index)
+#define STATSD_FIRST_PTR_MUTEX_UNLOCK(index)
+#define STATSD_DICTIONARY_OPTIONS DICTIONARY_FLAG_SINGLE_THREADED
+#endif
+
+#define STATSD_DECIMAL_DETAIL 1000 // floating point values get multiplied by this, with the same divisor
+
+// --------------------------------------------------------------------------------------------------------------------
+// data specific to each metric type
+
+typedef struct statsd_metric_gauge {
+ long double value;
+} STATSD_METRIC_GAUGE;
+
+typedef struct statsd_metric_counter { // counter and meter
+ long long value;
+} STATSD_METRIC_COUNTER;
+
+typedef struct statsd_histogram_extensions {
+ netdata_mutex_t mutex;
+
+ // average is stored in metric->last
+ collected_number last_min;
+ collected_number last_max;
+ collected_number last_percentile;
+ collected_number last_median;
+ collected_number last_stddev;
+ collected_number last_sum;
+
+ RRDDIM *rd_min;
+ RRDDIM *rd_max;
+ RRDDIM *rd_percentile;
+ RRDDIM *rd_median;
+ RRDDIM *rd_stddev;
+ RRDDIM *rd_sum;
+
+ size_t size;
+ size_t used;
+ long double *values; // dynamic array of values collected
+} STATSD_METRIC_HISTOGRAM_EXTENSIONS;
+
+typedef struct statsd_metric_histogram { // histogram and timer
+ STATSD_METRIC_HISTOGRAM_EXTENSIONS *ext;
+} STATSD_METRIC_HISTOGRAM;
+
+typedef struct statsd_metric_set {
+ DICTIONARY *dict;
+ size_t unique;
+} STATSD_METRIC_SET;
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// this is a metric - for all types of metrics
+
+typedef enum statsd_metric_options {
+ STATSD_METRIC_OPTION_NONE = 0x00000000, // no options set
+ STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED = 0x00000001, // do not update the chart dimension, when this metric is not collected
+ STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED = 0x00000002, // render a private chart for this metric
+ STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED = 0x00000004, // the metric has been checked if it should get private chart or not
+ STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT = 0x00000008, // show the count of events for this private chart
+ STATSD_METRIC_OPTION_CHECKED_IN_APPS = 0x00000010, // set when this metric has been checked agains apps
+} STATS_METRIC_OPTIONS;
+
+typedef enum statsd_metric_type {
+ STATSD_METRIC_TYPE_GAUGE,
+ STATSD_METRIC_TYPE_COUNTER,
+ STATSD_METRIC_TYPE_METER,
+ STATSD_METRIC_TYPE_TIMER,
+ STATSD_METRIC_TYPE_HISTOGRAM,
+ STATSD_METRIC_TYPE_SET
+} STATSD_METRIC_TYPE;
+
+
+typedef struct statsd_metric {
+ avl avl; // indexing
+
+ const char *name; // the name of the metric
+ uint32_t hash; // hash of the name
+
+ STATSD_METRIC_TYPE type;
+
+ // metadata about data collection
+ collected_number events; // the number of times this metric has been collected (never resets)
+ size_t count; // the number of times this metric has been collected since the last flush
+
+ // the actual collected data
+ union {
+ STATSD_METRIC_GAUGE gauge;
+ STATSD_METRIC_COUNTER counter;
+ STATSD_METRIC_HISTOGRAM histogram;
+ STATSD_METRIC_SET set;
+ };
+
+ // chart related members
+ STATS_METRIC_OPTIONS options; // STATSD_METRIC_OPTION_* (bitfield)
+ char reset; // set to 1 to reset this metric to zero
+ collected_number last; // the last value sent to netdata
+ RRDSET *st; // the chart of this metric
+ RRDDIM *rd_value; // the dimension of this metric value
+ RRDDIM *rd_count; // the dimension for the number of events received
+
+ // linking, used for walking through all metrics
+ struct statsd_metric *next;
+} STATSD_METRIC;
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// each type of metric has its own index
+
+typedef struct statsd_index {
+ char *name; // the name of the index of metrics
+ size_t events; // the number of events processed for this index
+ size_t metrics; // the number of metrics in this index
+
+ STATSD_AVL_TREE index; // the AVL tree
+
+ STATSD_METRIC *first; // the linked list of metrics (new metrics are added in front)
+ STATSD_FIRST_PTR_MUTEX; // when mutli-threading is enabled, a lock to protect the linked list
+
+ STATS_METRIC_OPTIONS default_options; // default options for all metrics in this index
+} STATSD_INDEX;
+
+static int statsd_metric_compare(void* a, void* b);
+
+// --------------------------------------------------------------------------------------------------------------------
+// synthetic charts
+
+typedef enum statsd_app_chart_dimension_value_type {
+ STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_LAST,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_SUM,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_MIN,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_MAX,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN,
+ STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV
+} STATSD_APP_CHART_DIM_VALUE_TYPE;
+
+typedef struct statsd_app_chart_dimension {
+ const char *name;
+ const char *metric;
+ uint32_t metric_hash;
+ collected_number multiplier;
+ collected_number divisor;
+ STATSD_APP_CHART_DIM_VALUE_TYPE value_type;
+
+ RRDDIM *rd;
+ collected_number *value_ptr;
+ RRD_ALGORITHM algorithm;
+
+ struct statsd_app_chart_dimension *next;
+} STATSD_APP_CHART_DIM;
+
+typedef struct statsd_app_chart {
+ const char *source;
+ const char *id;
+ const char *name;
+ const char *title;
+ const char *family;
+ const char *context;
+ const char *units;
+ long priority;
+ RRDSET_TYPE chart_type;
+ STATSD_APP_CHART_DIM *dimensions;
+ size_t dimensions_count;
+ size_t dimensions_linked_count;
+
+ RRDSET *st;
+ struct statsd_app_chart *next;
+} STATSD_APP_CHART;
+
+typedef struct statsd_app {
+ const char *name;
+ SIMPLE_PATTERN *metrics;
+ STATS_METRIC_OPTIONS default_options;
+ RRD_MEMORY_MODE rrd_memory_mode;
+ long rrd_history_entries;
+
+ const char *source;
+ STATSD_APP_CHART *charts;
+ struct statsd_app *next;
+} STATSD_APP;
+
+// --------------------------------------------------------------------------------------------------------------------
+// global statsd data
+
+static struct statsd {
+ STATSD_INDEX gauges;
+ STATSD_INDEX counters;
+ STATSD_INDEX timers;
+ STATSD_INDEX histograms;
+ STATSD_INDEX meters;
+ STATSD_INDEX sets;
+ size_t unknown_types;
+ size_t socket_errors;
+ size_t tcp_socket_reads;
+ size_t tcp_packets_received;
+ size_t tcp_bytes_read;
+ size_t udp_socket_reads;
+ size_t udp_packets_received;
+ size_t udp_bytes_read;
+
+ int enabled;
+ int update_every;
+ SIMPLE_PATTERN *charts_for;
+
+ size_t private_charts;
+ size_t max_private_charts;
+ size_t max_private_charts_hard;
+ RRD_MEMORY_MODE private_charts_memory_mode;
+ long private_charts_rrd_history_entries;
+
+ STATSD_APP *apps;
+ size_t recvmmsg_size;
+ size_t histogram_increase_step;
+ double histogram_percentile;
+ char *histogram_percentile_str;
+ int threads;
+ LISTEN_SOCKETS sockets;
+} statsd = {
+ .enabled = 1,
+ .max_private_charts = 200,
+ .max_private_charts_hard = 1000,
+ .recvmmsg_size = 10,
+
+ .gauges = {
+ .name = "gauge",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .counters = {
+ .name = "counter",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .timers = {
+ .name = "timer",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .histograms = {
+ .name = "histogram",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .meters = {
+ .name = "meter",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+ .sets = {
+ .name = "set",
+ .events = 0,
+ .metrics = 0,
+ .index = STATSD_AVL_INDEX_INIT,
+ .default_options = STATSD_METRIC_OPTION_NONE,
+ .first = NULL,
+ STATSD_FIRST_PTR_MUTEX_INIT
+ },
+
+ .apps = NULL,
+ .histogram_percentile = 95.0,
+ .histogram_increase_step = 10,
+ .threads = 0,
+ .sockets = {
+ .config_section = CONFIG_SECTION_STATSD,
+ .default_bind_to = "udp:localhost tcp:localhost",
+ .default_port = STATSD_LISTEN_PORT,
+ .backlog = STATSD_LISTEN_BACKLOG
+ },
+};
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd index management - add/find metrics
+
+static int statsd_metric_compare(void* a, void* b) {
+ if(((STATSD_METRIC *)a)->hash < ((STATSD_METRIC *)b)->hash) return -1;
+ else if(((STATSD_METRIC *)a)->hash > ((STATSD_METRIC *)b)->hash) return 1;
+ else return strcmp(((STATSD_METRIC *)a)->name, ((STATSD_METRIC *)b)->name);
+}
+
+static inline STATSD_METRIC *stasd_metric_index_find(STATSD_INDEX *index, const char *name, uint32_t hash) {
+ STATSD_METRIC tmp;
+ tmp.name = name;
+ tmp.hash = (hash)?hash:simple_hash(tmp.name);
+
+ return (STATSD_METRIC *)STATSD_AVL_SEARCH(&index->index, (avl *)&tmp);
+}
+
+static inline STATSD_METRIC *statsd_find_or_add_metric(STATSD_INDEX *index, const char *name, STATSD_METRIC_TYPE type) {
+ debug(D_STATSD, "searching for metric '%s' under '%s'", name, index->name);
+
+ uint32_t hash = simple_hash(name);
+
+ STATSD_METRIC *m = stasd_metric_index_find(index, name, hash);
+ if(unlikely(!m)) {
+ debug(D_STATSD, "Creating new %s metric '%s'", index->name, name);
+
+ m = (STATSD_METRIC *)callocz(sizeof(STATSD_METRIC), 1);
+ m->name = strdupz(name);
+ m->hash = hash;
+ m->type = type;
+ m->options = index->default_options;
+
+ if(type == STATSD_METRIC_TYPE_HISTOGRAM || type == STATSD_METRIC_TYPE_TIMER) {
+ m->histogram.ext = callocz(sizeof(STATSD_METRIC_HISTOGRAM_EXTENSIONS), 1);
+ netdata_mutex_init(&m->histogram.ext->mutex);
+ }
+ STATSD_METRIC *n = (STATSD_METRIC *)STATSD_AVL_INSERT(&index->index, (avl *)m);
+ if(unlikely(n != m)) {
+ freez((void *)m->histogram.ext);
+ freez((void *)m->name);
+ freez((void *)m);
+ m = n;
+ }
+ else {
+ STATSD_FIRST_PTR_MUTEX_LOCK(index);
+ index->metrics++;
+ m->next = index->first;
+ index->first = m;
+ STATSD_FIRST_PTR_MUTEX_UNLOCK(index);
+ }
+ }
+
+ index->events++;
+ return m;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd parsing numbers
+
+static inline long double statsd_parse_float(const char *v, long double def) {
+ long double value;
+
+ if(likely(v && *v)) {
+ char *e = NULL;
+ value = str2ld(v, &e);
+ if(unlikely(e && *e))
+ error("STATSD: excess data '%s' after value '%s'", e, v);
+ }
+ else
+ value = def;
+
+ return value;
+}
+
+static inline long long statsd_parse_int(const char *v, long long def) {
+ long long value;
+
+ if(likely(v && *v)) {
+ char *e = NULL;
+ value = str2ll(v, &e);
+ if(unlikely(e && *e))
+ error("STATSD: excess data '%s' after value '%s'", e, v);
+ }
+ else
+ value = def;
+
+ return value;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd processors per metric type
+
+static inline void statsd_reset_metric(STATSD_METRIC *m) {
+ m->reset = 0;
+ m->count = 0;
+}
+
+static inline void statsd_process_gauge(STATSD_METRIC *m, const char *value, const char *sampling) {
+ if(unlikely(!value || !*value)) {
+ error("STATSD: metric '%s' of type gauge, with empty value is ignored.", m->name);
+ return;
+ }
+
+ if(unlikely(m->reset)) {
+ // no need to reset anything specific for gauges
+ statsd_reset_metric(m);
+ }
+
+ if(unlikely(*value == '+' || *value == '-'))
+ m->gauge.value += statsd_parse_float(value, 1.0) / statsd_parse_float(sampling, 1.0);
+ else
+ m->gauge.value = statsd_parse_float(value, 1.0) / statsd_parse_float(sampling, 1.0);
+
+ m->events++;
+ m->count++;
+}
+
+static inline void statsd_process_counter(STATSD_METRIC *m, const char *value, const char *sampling) {
+ // we accept empty values for counters
+
+ if(unlikely(m->reset)) statsd_reset_metric(m);
+
+ m->counter.value += roundl((long double)statsd_parse_int(value, 1) / statsd_parse_float(sampling, 1.0));
+
+ m->events++;
+ m->count++;
+}
+
+static inline void statsd_process_meter(STATSD_METRIC *m, const char *value, const char *sampling) {
+ // this is the same with the counter
+ statsd_process_counter(m, value, sampling);
+}
+
+static inline void statsd_process_histogram(STATSD_METRIC *m, const char *value, const char *sampling) {
+ if(unlikely(!value || !*value)) {
+ error("STATSD: metric '%s' of type histogram, with empty value is ignored.", m->name);
+ return;
+ }
+
+ if(unlikely(m->reset)) {
+ m->histogram.ext->used = 0;
+ statsd_reset_metric(m);
+ }
+
+ if(unlikely(m->histogram.ext->used == m->histogram.ext->size)) {
+ netdata_mutex_lock(&m->histogram.ext->mutex);
+ m->histogram.ext->size += statsd.histogram_increase_step;
+ m->histogram.ext->values = reallocz(m->histogram.ext->values, sizeof(long double) * m->histogram.ext->size);
+ netdata_mutex_unlock(&m->histogram.ext->mutex);
+ }
+
+ m->histogram.ext->values[m->histogram.ext->used++] = statsd_parse_float(value, 1.0) / statsd_parse_float(sampling, 1.0);
+
+ m->events++;
+ m->count++;
+}
+
+static inline void statsd_process_timer(STATSD_METRIC *m, const char *value, const char *sampling) {
+ if(unlikely(!value || !*value)) {
+ error("STATSD: metric of type set, with empty value is ignored.");
+ return;
+ }
+
+ // timers are a use case of histogram
+ statsd_process_histogram(m, value, sampling);
+}
+
+static inline void statsd_process_set(STATSD_METRIC *m, const char *value) {
+ if(unlikely(!value || !*value)) {
+ error("STATSD: metric of type set, with empty value is ignored.");
+ return;
+ }
+
+ if(unlikely(m->reset)) {
+ if(likely(m->set.dict)) {
+ dictionary_destroy(m->set.dict);
+ m->set.dict = NULL;
+ }
+ statsd_reset_metric(m);
+ }
+
+ if(unlikely(!m->set.dict)) {
+ m->set.dict = dictionary_create(STATSD_DICTIONARY_OPTIONS|DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE);
+ m->set.unique = 0;
+ }
+
+ void *t = dictionary_get(m->set.dict, value);
+ if(unlikely(!t)) {
+ dictionary_set(m->set.dict, value, NULL, 1);
+ m->set.unique++;
+ }
+
+ m->events++;
+ m->count++;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd parsing
+
+static void statsd_process_metric(const char *name, const char *value, const char *type, const char *sampling) {
+ debug(D_STATSD, "STATSD: raw metric '%s', value '%s', type '%s', rate '%s'", name?name:"(null)", value?value:"(null)", type?type:"(null)", sampling?sampling:"(null)");
+
+ if(unlikely(!name || !*name)) return;
+ if(unlikely(!type || !*type)) type = "m";
+
+ char t0 = type[0], t1 = type[1];
+
+ if(unlikely(t0 == 'g' && t1 == '\0')) {
+ statsd_process_gauge(
+ statsd_find_or_add_metric(&statsd.gauges, name, STATSD_METRIC_TYPE_GAUGE),
+ value, sampling);
+ }
+ else if(unlikely((t0 == 'c' || t0 == 'C') && t1 == '\0')) {
+ // etsy/statsd uses 'c'
+ // brubeck uses 'C'
+ statsd_process_counter(
+ statsd_find_or_add_metric(&statsd.counters, name, STATSD_METRIC_TYPE_COUNTER),
+ value, sampling);
+ }
+ else if(unlikely(t0 == 'm' && t1 == '\0')) {
+ statsd_process_meter(
+ statsd_find_or_add_metric(&statsd.meters, name, STATSD_METRIC_TYPE_METER),
+ value, sampling);
+ }
+ else if(unlikely(t0 == 'h' && t1 == '\0')) {
+ statsd_process_histogram(
+ statsd_find_or_add_metric(&statsd.histograms, name, STATSD_METRIC_TYPE_HISTOGRAM),
+ value, sampling);
+ }
+ else if(unlikely(t0 == 's' && t1 == '\0')) {
+ statsd_process_set(
+ statsd_find_or_add_metric(&statsd.sets, name, STATSD_METRIC_TYPE_SET),
+ value);
+ }
+ else if(unlikely(t0 == 'm' && t1 == 's' && type[2] == '\0')) {
+ statsd_process_timer(
+ statsd_find_or_add_metric(&statsd.timers, name, STATSD_METRIC_TYPE_TIMER),
+ value, sampling);
+ }
+ else {
+ statsd.unknown_types++;
+ error("STATSD: metric '%s' with value '%s' is sent with unknown metric type '%s'", name, value?value:"", type);
+ }
+}
+
+static inline const char *statsd_parse_skip_up_to(const char *s, char d1, char d2) {
+ char c;
+
+ for(c = *s; c && c != d1 && c != d2 && c != '\r' && c != '\n'; c = *++s) ;
+
+ return s;
+}
+
+const char *statsd_parse_skip_spaces(const char *s) {
+ char c;
+
+ for(c = *s; c && ( c == ' ' || c == '\t' || c == '\r' || c == '\n' ); c = *++s) ;
+
+ return s;
+}
+
+static inline const char *statsd_parse_field_trim(const char *start, char *end) {
+ if(unlikely(!start)) {
+ start = end;
+ return start;
+ }
+
+ while(start <= end && (*start == ' ' || *start == '\t'))
+ start++;
+
+ *end = '\0';
+ end--;
+ while(end >= start && (*end == ' ' || *end == '\t'))
+ *end-- = '\0';
+
+ return start;
+}
+
+static inline size_t statsd_process(char *buffer, size_t size, int require_newlines) {
+ buffer[size] = '\0';
+ debug(D_STATSD, "RECEIVED: %zu bytes: '%s'", size, buffer);
+
+ const char *s = buffer;
+ while(*s) {
+ const char *name = NULL, *value = NULL, *type = NULL, *sampling = NULL;
+ char *name_end = NULL, *value_end = NULL, *type_end = NULL, *sampling_end = NULL;
+
+ s = name_end = (char *)statsd_parse_skip_up_to(name = s, ':', '|');
+ if(name == name_end) {
+ s = statsd_parse_skip_spaces(s);
+ continue;
+ }
+
+ if(likely(*s == ':'))
+ s = value_end = (char *) statsd_parse_skip_up_to(value = ++s, '|', '|');
+
+ if(likely(*s == '|'))
+ s = type_end = (char *) statsd_parse_skip_up_to(type = ++s, '|', '@');
+
+ if(likely(*s == '|' || *s == '@')) {
+ s = sampling_end = (char *) statsd_parse_skip_up_to(sampling = ++s, '\r', '\n');
+ if(*sampling == '@') sampling++;
+ }
+
+ // skip everything until the end of the line
+ while(*s && *s != '\n') s++;
+
+ if(unlikely(require_newlines && *s != '\n' && s > buffer)) {
+ // move the remaining data to the beginning
+ size -= (name - buffer);
+ memmove(buffer, name, size);
+ return size;
+ }
+ else
+ s = statsd_parse_skip_spaces(s);
+
+ statsd_process_metric(
+ statsd_parse_field_trim(name, name_end)
+ , statsd_parse_field_trim(value, value_end)
+ , statsd_parse_field_trim(type, type_end)
+ , statsd_parse_field_trim(sampling, sampling_end)
+ );
+ }
+
+ return 0;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd pollfd interface
+
+#define STATSD_TCP_BUFFER_SIZE 65536 // minimize tcp reads
+#define STATSD_UDP_BUFFER_SIZE 9000 // this should be up to MTU
+
+typedef enum {
+ STATSD_SOCKET_DATA_TYPE_TCP,
+ STATSD_SOCKET_DATA_TYPE_UDP
+} STATSD_SOCKET_DATA_TYPE;
+
+struct statsd_tcp {
+ STATSD_SOCKET_DATA_TYPE type;
+ size_t size;
+ size_t len;
+ char buffer[];
+};
+
+#ifdef HAVE_RECVMMSG
+struct statsd_udp {
+ STATSD_SOCKET_DATA_TYPE type;
+ size_t size;
+ struct iovec *iovecs;
+ struct mmsghdr *msgs;
+};
+#else
+struct statsd_udp {
+ STATSD_SOCKET_DATA_TYPE type;
+ char buffer[STATSD_UDP_BUFFER_SIZE];
+};
+#endif
+
+// new TCP client connected
+static void *statsd_add_callback(int fd, short int *events) {
+ (void)fd;
+ *events = POLLIN;
+
+ struct statsd_tcp *data = (struct statsd_tcp *)callocz(sizeof(struct statsd_tcp) + STATSD_TCP_BUFFER_SIZE, 1);
+ data->type = STATSD_SOCKET_DATA_TYPE_TCP;
+ data->size = STATSD_TCP_BUFFER_SIZE - 1;
+
+ return data;
+}
+
+// TCP client disconnected
+static void statsd_del_callback(int fd, void *data) {
+ (void)fd;
+
+ if(data) {
+ struct statsd_tcp *t = data;
+ if(t->type == STATSD_SOCKET_DATA_TYPE_TCP) {
+ if(t->len != 0) {
+ statsd.socket_errors++;
+ error("STATSD: client is probably sending unterminated metrics. Closed socket left with '%s'. Trying to process it.", t->buffer);
+ statsd_process(t->buffer, t->len, 0);
+ }
+ }
+ else
+ error("STATSD: internal error: received socket data type is %d, but expected %d", (int)t->type, (int)STATSD_SOCKET_DATA_TYPE_TCP);
+
+ freez(data);
+ }
+
+ return;
+}
+
+// Receive data
+static int statsd_rcv_callback(int fd, int socktype, void *data, short int *events) {
+ *events = POLLIN;
+
+ switch(socktype) {
+ case SOCK_STREAM: {
+ struct statsd_tcp *d = (struct statsd_tcp *)data;
+ if(unlikely(!d)) {
+ error("STATSD: internal error: expected TCP data pointer is NULL");
+ statsd.socket_errors++;
+ return -1;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_TCP)) {
+ error("STATSD: internal error: socket data type should be %d, but it is %d", (int)STATSD_SOCKET_DATA_TYPE_TCP, (int)d->type);
+ statsd.socket_errors++;
+ return -1;
+ }
+#endif
+
+ int ret = 0;
+ ssize_t rc;
+ do {
+ rc = recv(fd, &d->buffer[d->len], d->size - d->len, MSG_DONTWAIT);
+ if (rc < 0) {
+ // read failed
+ if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) {
+ error("STATSD: recv() on TCP socket %d failed.", fd);
+ statsd.socket_errors++;
+ ret = -1;
+ }
+ }
+ else if (!rc) {
+ // connection closed
+ debug(D_STATSD, "STATSD: client disconnected.");
+ ret = -1;
+ }
+ else {
+ // data received
+ d->len += rc;
+ statsd.tcp_socket_reads++;
+ statsd.tcp_bytes_read += rc;
+ }
+
+ if(likely(d->len > 0)) {
+ statsd.tcp_packets_received++;
+ d->len = statsd_process(d->buffer, d->len, 1);
+ }
+
+ if(unlikely(ret == -1))
+ return -1;
+
+ } while (rc != -1);
+ break;
+ }
+
+ case SOCK_DGRAM: {
+ struct statsd_udp *d = (struct statsd_udp *)data;
+ if(unlikely(!d)) {
+ error("STATSD: internal error: expected UDP data pointer is NULL");
+ statsd.socket_errors++;
+ return -1;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_UDP)) {
+ error("STATSD: internal error: socket data should be %d, but it is %d", (int)d->type, (int)STATSD_SOCKET_DATA_TYPE_UDP);
+ statsd.socket_errors++;
+ return -1;
+ }
+#endif
+
+#ifdef HAVE_RECVMMSG
+ ssize_t rc;
+ do {
+ rc = recvmmsg(fd, d->msgs, (unsigned int)d->size, MSG_DONTWAIT, NULL);
+ if (rc < 0) {
+ // read failed
+ if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) {
+ error("STATSD: recvmmsg() on UDP socket %d failed.", fd);
+ statsd.socket_errors++;
+ return -1;
+ }
+ } else if (rc) {
+ // data received
+ statsd.udp_socket_reads++;
+ statsd.udp_packets_received += rc;
+
+ size_t i;
+ for (i = 0; i < (size_t)rc; ++i) {
+ size_t len = (size_t)d->msgs[i].msg_len;
+ statsd.udp_bytes_read += len;
+ statsd_process(d->msgs[i].msg_hdr.msg_iov->iov_base, len, 0);
+ }
+ }
+ } while (rc != -1);
+
+#else // !HAVE_RECVMMSG
+ ssize_t rc;
+ do {
+ rc = recv(fd, d->buffer, STATSD_UDP_BUFFER_SIZE - 1, MSG_DONTWAIT);
+ if (rc < 0) {
+ // read failed
+ if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) {
+ error("STATSD: recv() on UDP socket %d failed.", fd);
+ statsd.socket_errors++;
+ return -1;
+ }
+ } else if (rc) {
+ // data received
+ statsd.udp_socket_reads++;
+ statsd.udp_packets_received++;
+ statsd.udp_bytes_read += rc;
+ statsd_process(d->buffer, (size_t) rc, 0);
+ }
+ } while (rc != -1);
+#endif
+
+ break;
+ }
+
+ default: {
+ error("STATSD: internal error: unknown socktype %d on socket %d", socktype, fd);
+ statsd.socket_errors++;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int statsd_snd_callback(int fd, int socktype, void *data, short int *events) {
+ (void)fd;
+ (void)socktype;
+ (void)data;
+ (void)events;
+
+ error("STATSD: snd_callback() called, but we never requested to send data to statsd clients.");
+ return -1;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd child thread to collect metrics from network
+
+void statsd_collector_thread_cleanup(void *data) {
+ struct statsd_udp *d = data;
+
+#ifdef HAVE_RECVMMSG
+ size_t i;
+ for (i = 0; i < d->size; i++)
+ freez(d->iovecs[i].iov_base);
+
+ freez(d->iovecs);
+ freez(d->msgs);
+#endif
+
+ freez(d);
+}
+
+void *statsd_collector_thread(void *ptr) {
+ int id = *((int *)ptr);
+
+ info("STATSD collector thread No %d created with task id %d", id + 1, gettid());
+
+ if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0)
+ error("Cannot set pthread cancel type to DEFERRED.");
+
+ if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
+ error("Cannot set pthread cancel state to ENABLE.");
+
+ struct statsd_udp *d = callocz(sizeof(struct statsd_udp), 1);
+ pthread_cleanup_push(statsd_collector_thread_cleanup, d);
+
+#ifdef HAVE_RECVMMSG
+ d->type = STATSD_SOCKET_DATA_TYPE_UDP;
+ d->size = statsd.recvmmsg_size;
+ d->iovecs = callocz(sizeof(struct iovec), d->size);
+ d->msgs = callocz(sizeof(struct mmsghdr), d->size);
+
+ size_t i;
+ for (i = 0; i < d->size; i++) {
+ d->iovecs[i].iov_base = mallocz(STATSD_UDP_BUFFER_SIZE);
+ d->iovecs[i].iov_len = STATSD_UDP_BUFFER_SIZE - 1;
+ d->msgs[i].msg_hdr.msg_iov = &d->iovecs[i];
+ d->msgs[i].msg_hdr.msg_iovlen = 1;
+ }
+#endif
+
+ poll_events(&statsd.sockets
+ , statsd_add_callback
+ , statsd_del_callback
+ , statsd_rcv_callback
+ , statsd_snd_callback
+ , (void *)d
+ );
+
+ pthread_cleanup_pop(1);
+
+ debug(D_WEB_CLIENT, "STATSD: exit!");
+ pthread_exit(NULL);
+ return NULL;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd applications configuration files parsing
+
+#define STATSD_CONF_LINE_MAX 8192
+
+int statsd_readfile(const char *path, const char *filename) {
+ debug(D_STATSD, "STATSD configuration reading file '%s/%s'", path, filename);
+
+ char buffer[STATSD_CONF_LINE_MAX + 1];
+
+ FILE *fp = NULL;
+ snprintfz(buffer, STATSD_CONF_LINE_MAX, "%s/%s", path, filename);
+ fp = fopen(buffer, "r");
+ if(!fp) {
+ error("STATSD: cannot open file '%s'.", buffer);
+ return -1;
+ }
+
+ STATSD_APP *app = NULL;
+ STATSD_APP_CHART *chart = NULL;
+
+ size_t line = 0;
+ char *s;
+ while(fgets(buffer, STATSD_CONF_LINE_MAX, fp) != NULL) {
+ buffer[STATSD_CONF_LINE_MAX] = '\0';
+ line++;
+
+ s = trim(buffer);
+ if (!s || *s == '#') {
+ debug(D_STATSD, "STATSD: ignoring line %zu of file '%s/%s', it is empty.", line, path, filename);
+ continue;
+ }
+ debug(D_STATSD, "STATSD: processing line %zu of file '%s/%s': %s", line, path, filename, buffer);
+
+ int len = (int) strlen(s);
+ if (*s == '[' && s[len - 1] == ']') {
+ // new section
+ s[len - 1] = '\0';
+ s++;
+
+ if (!strcmp(s, "app")) {
+ // a new app
+ app = callocz(sizeof(STATSD_APP), 1);
+ app->name = strdupz("unnamed");
+ app->rrd_memory_mode = localhost->rrd_memory_mode;
+ app->rrd_history_entries = localhost->rrd_history_entries;
+
+ app->next = statsd.apps;
+ statsd.apps = app;
+ chart = NULL;
+ }
+ else if(app) {
+ // a new chart
+ chart = callocz(sizeof(STATSD_APP_CHART), 1);
+ netdata_fix_chart_id(s);
+ chart->id = strdupz(s);
+ chart->name = strdupz(s);
+ chart->title = strdupz("Statsd chart");
+ chart->context = strdupz(s);
+ chart->family = strdupz("overview");
+ chart->units = strdupz("value");
+ chart->priority = STATSD_CHART_PRIORITY;
+ chart->chart_type = RRDSET_TYPE_LINE;
+
+ chart->next = app->charts;
+ app->charts = chart;
+ }
+ else
+ error("STATSD: ignoring line %zu ('%s') of file '%s/%s', [app] is not defined.", line, s, path, filename);
+
+ continue;
+ }
+
+ if(!app) {
+ error("STATSD: ignoring line %zu ('%s') of file '%s/%s', it is outside all sections.", line, s, path, filename);
+ continue;
+ }
+
+ char *name = s;
+ char *value = strchr(s, '=');
+ if(!value) {
+ error("STATSD: ignoring line %zu ('%s') of file '%s/%s', there is no = in it.", line, s, path, filename);
+ continue;
+ }
+ *value = '\0';
+ value++;
+
+ name = trim(name);
+ value = trim(value);
+
+ if(!name || *name == '#') {
+ error("STATSD: ignoring line %zu of file '%s/%s', name is empty.", line, path, filename);
+ continue;
+ }
+ if(!value) {
+ debug(D_CONFIG, "STATSD: ignoring line %zu of file '%s/%s', value is empty.", line, path, filename);
+ continue;
+ }
+
+ if(!chart) {
+ if(!strcmp(name, "name")) {
+ freez((void *)app->name);
+ netdata_fix_chart_name(value);
+ app->name = strdupz(value);
+ }
+ else if (!strcmp(name, "metrics")) {
+ simple_pattern_free(app->metrics);
+ app->metrics = simple_pattern_create(value, SIMPLE_PATTERN_EXACT);
+ }
+ else if (!strcmp(name, "private charts")) {
+ if (!strcmp(value, "yes") || !strcmp(value, "on"))
+ app->default_options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ else
+ app->default_options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ }
+ else if (!strcmp(name, "gaps when not collected")) {
+ if (!strcmp(value, "yes") || !strcmp(value, "on"))
+ app->default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+ }
+ else if (!strcmp(name, "memory mode")) {
+ app->rrd_memory_mode = rrd_memory_mode_id(value);
+ }
+ else if (!strcmp(name, "history")) {
+ app->rrd_history_entries = atol(value);
+ if (app->rrd_history_entries < 5)
+ app->rrd_history_entries = 5;
+ }
+ else {
+ error("STATSD: ignoring line %zu ('%s') of file '%s/%s'. Unknown keyword for the [app] section.", line, name, path, filename);
+ continue;
+ }
+ }
+ else {
+ if(!strcmp(name, "name")) {
+ freez((void *)chart->name);
+ netdata_fix_chart_id(value);
+ chart->name = strdupz(value);
+ }
+ else if(!strcmp(name, "title")) {
+ freez((void *)chart->title);
+ chart->title = strdupz(value);
+ }
+ else if (!strcmp(name, "family")) {
+ freez((void *)chart->family);
+ chart->family = strdupz(value);
+ }
+ else if (!strcmp(name, "context")) {
+ freez((void *)chart->context);
+ netdata_fix_chart_id(value);
+ chart->context = strdupz(value);
+ }
+ else if (!strcmp(name, "units")) {
+ freez((void *)chart->units);
+ chart->units = strdupz(value);
+ }
+ else if (!strcmp(name, "priority")) {
+ chart->priority = atol(value);
+ }
+ else if (!strcmp(name, "type")) {
+ chart->chart_type = rrdset_type_id(value);
+ }
+ else if (!strcmp(name, "dimension")) {
+ // metric [name [type [multiplier [divisor]]]]
+ char *words[5];
+ pluginsd_split_words(value, words, 5);
+
+ char *metric_name = words[0];
+ char *dim_name = words[1];
+ char *type = words[2];
+ char *multipler = words[3];
+ char *divisor = words[4];
+
+ STATSD_APP_CHART_DIM *dim = callocz(sizeof(STATSD_APP_CHART_DIM), 1);
+
+ dim->metric = strdupz(metric_name);
+ dim->metric_hash = simple_hash(dim->metric);
+
+ dim->name = strdupz((dim_name && *dim_name)?dim_name:metric_name);
+ dim->multiplier = (multipler && *multipler)?str2l(multipler):1;
+ dim->divisor = (divisor && *divisor)?str2l(divisor):1;
+
+ if(!type || !*type) type = "last";
+ if(!strcmp(type, "events")) dim->value_type = STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS;
+ else if(!strcmp(type, "last")) dim->value_type = STATSD_APP_CHART_DIM_VALUE_TYPE_LAST;
+ else if(!strcmp(type, "min")) dim->value_type = STATSD_APP_CHART_DIM_VALUE_TYPE_MIN;
+ else if(!strcmp(type, "max")) dim->value_type = STATSD_APP_CHART_DIM_VALUE_TYPE_MAX;
+ else if(!strcmp(type, "sum")) dim->value_type = STATSD_APP_CHART_DIM_VALUE_TYPE_SUM;
+ else if(!strcmp(type, "average")) dim->value_type = STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE;
+ else if(!strcmp(type, "median")) dim->value_type = STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN;
+ else if(!strcmp(type, "stddev")) dim->value_type = STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV;
+ else if(!strcmp(type, "percentile")) dim->value_type = STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE;
+ else {
+ error("STATSD: invalid type '%s' at line %zu of file '%s/%s'. Using 'last'.", type, line, path, filename);
+ dim->value_type = STATSD_APP_CHART_DIM_VALUE_TYPE_LAST;
+ }
+
+ if(!dim->multiplier) {
+ error("STATSD: invalid multiplier value '%s' at line %zu of file '%s/%s'. Using 1.", multipler, line, path, filename);
+ dim->multiplier = 1;
+ }
+ if(!dim->divisor) {
+ error("STATSD: invalid divisor value '%s' at line %zu of file '%s/%s'. Using 1.", divisor, line, path, filename);
+ dim->divisor = 1;
+ }
+
+ // append it to the list of dimension
+ STATSD_APP_CHART_DIM *tdim;
+ for(tdim = chart->dimensions; tdim && tdim->next ; tdim = tdim->next) ;
+ if(!tdim) {
+ dim->next = chart->dimensions;
+ chart->dimensions = dim;
+ }
+ else {
+ dim->next = tdim->next;
+ tdim->next = dim;
+ }
+ chart->dimensions_count++;
+
+ debug(D_STATSD, "Added dimension '%s' to chart '%s' of app '%s', for metric '%s', with type %u, multiplier " COLLECTED_NUMBER_FORMAT ", divisor " COLLECTED_NUMBER_FORMAT,
+ dim->name, chart->id, app->name, dim->metric, dim->value_type, dim->multiplier, dim->divisor);
+ }
+ else {
+ error("STATSD: ignoring line %zu ('%s') of file '%s/%s'. Unknown keyword for the [%s] section.", line, name, path, filename, chart->id);
+ continue;
+ }
+ }
+ }
+
+ fclose(fp);
+ return 0;
+}
+
+static void statsd_readdir(const char *path) {
+ size_t pathlen = strlen(path);
+
+ debug(D_STATSD, "STATSD configuration reading directory '%s'", path);
+
+ DIR *dir = opendir(path);
+ if (!dir) {
+ error("STATSD configuration cannot open directory '%s'.", path);
+ return;
+ }
+
+ struct dirent *de = NULL;
+ while ((de = readdir(dir))) {
+ size_t len = strlen(de->d_name);
+
+ if(de->d_type == DT_DIR
+ && (
+ (de->d_name[0] == '.' && de->d_name[1] == '\0')
+ || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ )) {
+ debug(D_STATSD, "STATSD: ignoring directory '%s'", de->d_name);
+ continue;
+ }
+
+ else if(de->d_type == DT_DIR) {
+ char *s = mallocz(pathlen + strlen(de->d_name) + 2);
+ strcpy(s, path);
+ strcat(s, "/");
+ strcat(s, de->d_name);
+ statsd_readdir(s);
+ freez(s);
+ continue;
+ }
+
+ else if((de->d_type == DT_LNK || de->d_type == DT_REG || de->d_type == DT_UNKNOWN) &&
+ len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) {
+ statsd_readfile(path, de->d_name);
+ }
+
+ else debug(D_STATSD, "STATSD: ignoring file '%s'", de->d_name);
+ }
+
+ closedir(dir);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// send metrics to netdata - in private charts - called from the main thread
+
+// extract chart type and chart id from metric name
+static inline void statsd_get_metric_type_and_id(STATSD_METRIC *m, char *type, char *id, const char *defid, size_t len) {
+ char *s;
+
+ snprintfz(type, len, "%s_%s_%s", STATSD_CHART_PREFIX, defid, m->name);
+ for(s = type; *s ;s++)
+ if(unlikely(*s == '.')) break;
+
+ if(*s == '.') {
+ *s++ = '\0';
+ strncpyz(id, s, len);
+ }
+ else {
+ strncpyz(id, defid, len);
+ }
+
+ netdata_fix_chart_id(type);
+ netdata_fix_chart_id(id);
+}
+
+static inline RRDSET *statsd_private_rrdset_create(
+ STATSD_METRIC *m
+ , const char *type
+ , const char *id
+ , const char *name
+ , const char *family
+ , const char *context
+ , const char *title
+ , const char *units
+ , long priority
+ , int update_every
+ , RRDSET_TYPE chart_type
+) {
+ RRD_MEMORY_MODE memory_mode = statsd.private_charts_memory_mode;
+ long history = statsd.private_charts_rrd_history_entries;
+
+ if(unlikely(statsd.private_charts >= statsd.max_private_charts)) {
+ debug(D_STATSD, "STATSD: metric '%s' will be charted with memory mode = none, because the maximum number of charts has been reached.", m->name);
+ info("STATSD: metric '%s' will be charted with memory mode = none, because the maximum number of charts (%zu) has been reached. Increase the number of charts by editing netdata.conf, [statsd] section.", m->name, statsd.max_private_charts);
+ memory_mode = RRD_MEMORY_MODE_NONE;
+ history = 5;
+ }
+
+ statsd.private_charts++;
+ RRDSET *st = rrdset_create_custom(
+ localhost
+ , type
+ , id
+ , name
+ , family
+ , context
+ , title
+ , units
+ , priority
+ , update_every
+ , chart_type
+ , memory_mode
+ , history
+ );
+ rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST);
+ // rrdset_flag_set(st, RRDSET_FLAG_DEBUG);
+ return st;
+}
+
+static inline void statsd_private_chart_gauge(STATSD_METRIC *m) {
+ debug(D_STATSD, "updating private chart for gauge metric '%s'", m->name);
+
+ if(unlikely(!m->st)) {
+ char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1];
+ statsd_get_metric_type_and_id(m, type, id, "gauge", RRD_ID_LENGTH_MAX);
+
+ m->st = statsd_private_rrdset_create(
+ m
+ , type
+ , id
+ , NULL // name
+ , "gauges" // family (submenu)
+ , m->name // context
+ , m->name // title
+ , "value" // units
+ , STATSD_CHART_PRIORITY
+ , statsd.update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ m->rd_value = rrddim_add(m->st, "gauge", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
+
+ if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
+ m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(m->st);
+
+ rrddim_set_by_pointer(m->st, m->rd_value, m->last);
+
+ if(m->rd_count)
+ rrddim_set_by_pointer(m->st, m->rd_count, m->events);
+
+ rrdset_done(m->st);
+}
+
+static inline void statsd_private_chart_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) {
+ debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name);
+
+ if(unlikely(!m->st)) {
+ char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1];
+ statsd_get_metric_type_and_id(m, type, id, dim, RRD_ID_LENGTH_MAX);
+
+ m->st = statsd_private_rrdset_create(
+ m
+ , type
+ , id
+ , NULL // name
+ , family // family (submenu)
+ , m->name // context
+ , m->name // title
+ , "events/s" // units
+ , STATSD_CHART_PRIORITY
+ , statsd.update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ m->rd_value = rrddim_add(m->st, dim, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
+ m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(m->st);
+
+ rrddim_set_by_pointer(m->st, m->rd_value, m->last);
+
+ if(m->rd_count)
+ rrddim_set_by_pointer(m->st, m->rd_count, m->events);
+
+ rrdset_done(m->st);
+}
+
+static inline void statsd_private_chart_set(STATSD_METRIC *m) {
+ debug(D_STATSD, "updating private chart for set metric '%s'", m->name);
+
+ if(unlikely(!m->st)) {
+ char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1];
+ statsd_get_metric_type_and_id(m, type, id, "set", RRD_ID_LENGTH_MAX);
+
+ m->st = statsd_private_rrdset_create(
+ m
+ , type
+ , id
+ , NULL // name
+ , "sets" // family (submenu)
+ , m->name // context
+ , m->name // title
+ , "entries" // units
+ , STATSD_CHART_PRIORITY
+ , statsd.update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ m->rd_value = rrddim_add(m->st, "set", "set size", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
+ m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(m->st);
+
+ rrddim_set_by_pointer(m->st, m->rd_value, m->last);
+
+ if(m->rd_count)
+ rrddim_set_by_pointer(m->st, m->rd_count, m->events);
+
+ rrdset_done(m->st);
+}
+
+static inline void statsd_private_chart_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) {
+ debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name);
+
+ if(unlikely(!m->st)) {
+ char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1];
+ statsd_get_metric_type_and_id(m, type, id, dim, RRD_ID_LENGTH_MAX);
+
+ m->st = statsd_private_rrdset_create(
+ m
+ , type
+ , id
+ , NULL // name
+ , family // family (submenu)
+ , m->name // context
+ , m->name // title
+ , units // units
+ , STATSD_CHART_PRIORITY
+ , statsd.update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ m->histogram.ext->rd_min = rrddim_add(m->st, "min", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_max = rrddim_add(m->st, "max", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
+ m->rd_value = rrddim_add(m->st, "average", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_percentile = rrddim_add(m->st, statsd.histogram_percentile_str, NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_median = rrddim_add(m->st, "median", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_stddev = rrddim_add(m->st, "stddev", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
+ m->histogram.ext->rd_sum = rrddim_add(m->st, "sum", NULL, 1, STATSD_DECIMAL_DETAIL, RRD_ALGORITHM_ABSOLUTE);
+
+ if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
+ m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else rrdset_next(m->st);
+
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_min, m->histogram.ext->last_min);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_max, m->histogram.ext->last_max);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_percentile, m->histogram.ext->last_percentile);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_median, m->histogram.ext->last_median);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_stddev, m->histogram.ext->last_stddev);
+ rrddim_set_by_pointer(m->st, m->histogram.ext->rd_sum, m->histogram.ext->last_sum);
+ rrddim_set_by_pointer(m->st, m->rd_value, m->last);
+
+ if(m->rd_count)
+ rrddim_set_by_pointer(m->st, m->rd_count, m->events);
+
+ rrdset_done(m->st);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// statsd flush metrics
+
+static inline void statsd_flush_gauge(STATSD_METRIC *m) {
+ debug(D_STATSD, "flushing gauge metric '%s'", m->name);
+
+ int updated = 0;
+ if(m->count && !m->reset) {
+ m->last = (collected_number) (m->gauge.value * STATSD_DECIMAL_DETAIL);
+
+ m->reset = 1;
+ updated = 1;
+ }
+
+ if(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)))
+ statsd_private_chart_gauge(m);
+}
+
+static inline void statsd_flush_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) {
+ debug(D_STATSD, "flushing %s metric '%s'", dim, m->name);
+
+ int updated = 0;
+ if(m->count && !m->reset) {
+ m->last = m->counter.value;
+
+ m->reset = 1;
+ updated = 1;
+ }
+
+ if(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)))
+ statsd_private_chart_counter_or_meter(m, dim, family);
+}
+
+static inline void statsd_flush_counter(STATSD_METRIC *m) {
+ statsd_flush_counter_or_meter(m, "counter", "counters");
+}
+
+static inline void statsd_flush_meter(STATSD_METRIC *m) {
+ statsd_flush_counter_or_meter(m, "meter", "meters");
+}
+
+static inline void statsd_flush_set(STATSD_METRIC *m) {
+ debug(D_STATSD, "flushing set metric '%s'", m->name);
+
+ int updated = 0;
+ if(m->count && !m->reset) {
+ m->last = (collected_number)m->set.unique;
+
+ m->reset = 1;
+ updated = 1;
+ }
+
+ if(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)))
+ statsd_private_chart_set(m);
+}
+
+static inline void statsd_flush_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) {
+ debug(D_STATSD, "flushing %s metric '%s'", dim, m->name);
+
+ netdata_mutex_lock(&m->histogram.ext->mutex);
+
+ int updated = 0;
+ if(m->count && !m->reset && m->histogram.ext->used > 0) {
+ size_t len = m->histogram.ext->used;
+ long double *series = m->histogram.ext->values;
+ sort_series(series, len);
+
+ m->histogram.ext->last_min = (collected_number)roundl(series[0] * STATSD_DECIMAL_DETAIL);
+ m->histogram.ext->last_max = (collected_number)roundl(series[len - 1] * STATSD_DECIMAL_DETAIL);
+ m->last = (collected_number)roundl(average(series, len) * STATSD_DECIMAL_DETAIL);
+ m->histogram.ext->last_median = (collected_number)roundl(median_on_sorted_series(series, len) * STATSD_DECIMAL_DETAIL);
+ m->histogram.ext->last_stddev = (collected_number)roundl(standard_deviation(series, len) * STATSD_DECIMAL_DETAIL);
+ m->histogram.ext->last_sum = (collected_number)roundl(sum(series, len) * STATSD_DECIMAL_DETAIL);
+
+ size_t pct_len = (size_t)floor((double)len * statsd.histogram_percentile / 100.0);
+ if(pct_len < 1)
+ m->histogram.ext->last_percentile = (collected_number)(series[0] * STATSD_DECIMAL_DETAIL);
+ else
+ m->histogram.ext->last_percentile = (collected_number)roundl(average(series, pct_len) * STATSD_DECIMAL_DETAIL);
+
+ debug(D_STATSD, "STATSD %s metric %s: min " COLLECTED_NUMBER_FORMAT ", max " COLLECTED_NUMBER_FORMAT ", last " COLLECTED_NUMBER_FORMAT ", pcent " COLLECTED_NUMBER_FORMAT ", median " COLLECTED_NUMBER_FORMAT ", stddev " COLLECTED_NUMBER_FORMAT ", sum " COLLECTED_NUMBER_FORMAT,
+ dim, m->name, m->histogram.ext->last_min, m->histogram.ext->last_max, m->last, m->histogram.ext->last_percentile, m->histogram.ext->last_median, m->histogram.ext->last_stddev, m->histogram.ext->last_sum);
+
+ m->reset = 1;
+ updated = 1;
+ }
+
+
+ if(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)))
+ statsd_private_chart_timer_or_histogram(m, dim, family, units);
+
+ netdata_mutex_unlock(&m->histogram.ext->mutex);
+}
+
+static inline void statsd_flush_timer(STATSD_METRIC *m) {
+ statsd_flush_timer_or_histogram(m, "timer", "timers", "milliseconds");
+}
+
+static inline void statsd_flush_histogram(STATSD_METRIC *m) {
+ statsd_flush_timer_or_histogram(m, "histogram", "histograms", "value");
+}
+
+static inline RRD_ALGORITHM statsd_algorithm_for_metric(STATSD_METRIC *m) {
+ switch(m->type) {
+ default:
+ case STATSD_METRIC_TYPE_GAUGE:
+ case STATSD_METRIC_TYPE_SET:
+ case STATSD_METRIC_TYPE_TIMER:
+ case STATSD_METRIC_TYPE_HISTOGRAM:
+ return RRD_ALGORITHM_ABSOLUTE;
+
+ case STATSD_METRIC_TYPE_METER:
+ case STATSD_METRIC_TYPE_COUNTER:
+ return RRD_ALGORITHM_INCREMENTAL;
+ }
+}
+
+static inline void check_if_metric_is_for_app(STATSD_INDEX *index, STATSD_METRIC *m) {
+ (void)index;
+
+ STATSD_APP *app;
+ for(app = statsd.apps; app ;app = app->next) {
+ if(unlikely(simple_pattern_matches(app->metrics, m->name))) {
+ debug(D_STATSD, "metric '%s' matches app '%s'", m->name, app->name);
+
+ // the metric should get the options from the app
+
+ if(app->default_options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED)
+ m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ else
+ m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+
+ if(app->default_options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)
+ m->options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+ else
+ m->options &= ~STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED;
+
+ // check if there is a chart in this app, willing to get this metric
+ STATSD_APP_CHART *chart;
+ for(chart = app->charts; chart; chart = chart->next) {
+ STATSD_APP_CHART_DIM *dim;
+ for(dim = chart->dimensions; dim ; dim = dim->next) {
+ if(!dim->value_ptr && dim->metric_hash == m->hash && !strcmp(dim->metric, m->name)) {
+ // we have a match - this metric should be linked to this dimension
+
+ if(dim->value_type == STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS) {
+ dim->value_ptr = &m->events;
+ dim->algorithm = RRD_ALGORITHM_INCREMENTAL;
+ }
+ else if(m->type == STATSD_METRIC_TYPE_HISTOGRAM || m->type == STATSD_METRIC_TYPE_TIMER) {
+ dim->algorithm = RRD_ALGORITHM_ABSOLUTE;
+ dim->divisor *= STATSD_DECIMAL_DETAIL;
+
+ switch(dim->value_type) {
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS:
+ // will never match - added to avoid warning
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_LAST:
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE:
+ dim->value_ptr = &m->last;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_SUM:
+ dim->value_ptr = &m->histogram.ext->last_sum;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MIN:
+ dim->value_ptr = &m->histogram.ext->last_min;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MAX:
+ dim->value_ptr = &m->histogram.ext->last_max;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN:
+ dim->value_ptr = &m->histogram.ext->last_median;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE:
+ dim->value_ptr = &m->histogram.ext->last_percentile;
+ break;
+
+ case STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV:
+ dim->value_ptr = &m->histogram.ext->last_stddev;
+ break;
+ }
+ }
+ else {
+ if (dim->value_type != STATSD_APP_CHART_DIM_VALUE_TYPE_LAST)
+ error("STATSD: unsupported value type for dimension '%s' of chart '%s' of app '%s' on metric '%s'", dim->name, chart->id, app->name, m->name);
+
+ dim->value_ptr = &m->last;
+ dim->algorithm = statsd_algorithm_for_metric(m);
+
+ if(m->type == STATSD_METRIC_TYPE_GAUGE)
+ dim->divisor *= STATSD_DECIMAL_DETAIL;
+ }
+
+ if(unlikely(chart->st && dim->rd)) {
+ rrddim_set_algorithm(chart->st, dim->rd, dim->algorithm);
+ rrddim_set_multiplier(chart->st, dim->rd, dim->multiplier);
+ rrddim_set_divisor(chart->st, dim->rd, dim->divisor);
+ }
+
+ chart->dimensions_linked_count++;
+ debug(D_STATSD, "metric '%s' of type %u linked with app '%s', chart '%s', dimension '%s', algorithm '%s'", m->name, m->type, app->name, chart->id, dim->name, rrd_algorithm_name(dim->algorithm));
+ }
+ }
+ }
+ }
+ }
+}
+
+static inline void statsd_update_app_chart(STATSD_APP *app, STATSD_APP_CHART *chart) {
+ debug(D_STATSD, "updating chart '%s' for app '%s'", chart->id, app->name);
+
+ if(!chart->st) {
+ chart->st = rrdset_create_custom(
+ localhost
+ , app->name
+ , chart->id
+ , chart->name
+ , chart->family
+ , chart->context
+ , chart->title
+ , chart->units
+ , chart->priority
+ , statsd.update_every
+ , chart->chart_type
+ , app->rrd_memory_mode
+ , app->rrd_history_entries
+ );
+
+ rrdset_flag_set(chart->st, RRDSET_FLAG_STORE_FIRST);
+ // rrdset_flag_set(chart->st, RRDSET_FLAG_DEBUG);
+ }
+ else rrdset_next(chart->st);
+
+ STATSD_APP_CHART_DIM *dim;
+ for(dim = chart->dimensions; dim ;dim = dim->next) {
+ if(unlikely(!dim->rd))
+ dim->rd = rrddim_add(chart->st, dim->name, NULL, dim->multiplier, dim->divisor, dim->algorithm);
+
+ if(unlikely(dim->value_ptr)) {
+ debug(D_STATSD, "updating dimension '%s' (%s) of chart '%s' (%s) for app '%s' with value " COLLECTED_NUMBER_FORMAT, dim->name, dim->rd->id, chart->id, chart->st->id, app->name, *dim->value_ptr);
+ rrddim_set_by_pointer(chart->st, dim->rd, *dim->value_ptr);
+ }
+ }
+
+ rrdset_done(chart->st);
+ debug(D_STATSD, "completed update of chart '%s' for app '%s'", chart->id, app->name);
+}
+
+static inline void statsd_update_all_app_charts(void) {
+ // debug(D_STATSD, "updating app charts");
+
+ STATSD_APP *app;
+ for(app = statsd.apps; app ;app = app->next) {
+ // debug(D_STATSD, "updating charts for app '%s'", app->name);
+
+ STATSD_APP_CHART *chart;
+ for(chart = app->charts; chart ;chart = chart->next) {
+ if(unlikely(chart->dimensions_linked_count)) {
+ statsd_update_app_chart(app, chart);
+ }
+ }
+ }
+
+ // debug(D_STATSD, "completed update of app charts");
+}
+
+static inline void statsd_flush_index_metrics(STATSD_INDEX *index, void (*flush_metric)(STATSD_METRIC *)) {
+ STATSD_METRIC *m;
+ for(m = index->first; m ; m = m->next) {
+ if(unlikely(!(m->options & STATSD_METRIC_OPTION_CHECKED_IN_APPS))) {
+ check_if_metric_is_for_app(index, m);
+ m->options |= STATSD_METRIC_OPTION_CHECKED_IN_APPS;
+ }
+
+ if(unlikely(!(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED))) {
+ if(statsd.private_charts >= statsd.max_private_charts_hard) {
+ debug(D_STATSD, "STATSD: metric '%s' will not be charted, because the hard limit of the maximum number of charts has been reached.", m->name);
+ info("STATSD: metric '%s' will not be charted, because the hard limit of the maximum number of charts (%zu) has been reached. Increase the number of charts by editing netdata.conf, [statsd] section.", m->name, statsd.max_private_charts);
+ m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ }
+ else {
+ if (simple_pattern_matches(statsd.charts_for, m->name)) {
+ debug(D_STATSD, "STATSD: metric '%s' will be charted.", m->name);
+ m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ } else {
+ debug(D_STATSD, "STATSD: metric '%s' will not be charted.", m->name);
+ m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
+ }
+ }
+
+ m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED;
+ }
+
+ flush_metric(m);
+ }
+}
+
+
+// --------------------------------------------------------------------------------------
+// statsd main thread
+
+int statsd_listen_sockets_setup(void) {
+ return listen_sockets_setup(&statsd.sockets);
+}
+
+void statsd_main_cleanup(void *data) {
+ pthread_t *threads = data;
+
+ int i;
+ for(i = 0; i < statsd.threads ;i++)
+ pthread_cancel(threads[i]);
+
+ listen_sockets_close(&statsd.sockets);
+}
+
+void *statsd_main(void *ptr) {
+ (void)ptr;
+
+ info("STATSD main thread created with task id %d", gettid());
+
+ if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0)
+ error("Cannot set pthread cancel type to DEFERRED.");
+
+ if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
+ error("Cannot set pthread cancel state to ENABLE.");
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // statsd configuration
+
+ statsd.enabled = config_get_boolean(CONFIG_SECTION_STATSD, "enabled", statsd.enabled);
+
+ statsd.update_every = default_rrd_update_every;
+ statsd.update_every = (int)config_get_number(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every);
+ if(statsd.update_every < default_rrd_update_every) {
+ error("STATSD: minimum flush interval %d given, but the minimum is the update every of netdata. Using %d", statsd.update_every, default_rrd_update_every);
+ statsd.update_every = default_rrd_update_every;
+ }
+
+#ifdef HAVE_RECVMMSG
+ statsd.recvmmsg_size = (size_t)config_get_number(CONFIG_SECTION_STATSD, "udp messages to process at once", (long long)statsd.recvmmsg_size);
+#endif
+
+ statsd.charts_for = simple_pattern_create(config_get(CONFIG_SECTION_STATSD, "create private charts for metrics matching", "*"), SIMPLE_PATTERN_EXACT);
+ statsd.max_private_charts = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts allowed", (long long)statsd.max_private_charts);
+ statsd.max_private_charts_hard = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts * 5);
+ statsd.private_charts_memory_mode = rrd_memory_mode_id(config_get(CONFIG_SECTION_STATSD, "private charts memory mode", rrd_memory_mode_name(default_rrd_memory_mode)));
+ statsd.private_charts_rrd_history_entries = (int)config_get_number(CONFIG_SECTION_STATSD, "private charts history", default_rrd_history_entries);
+
+ statsd.histogram_percentile = (double)config_get_float(CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile);
+ if(isless(statsd.histogram_percentile, 0) || isgreater(statsd.histogram_percentile, 100)) {
+ error("STATSD: invalid histograms and timers percentile %0.5f given", statsd.histogram_percentile);
+ statsd.histogram_percentile = 95.0;
+ }
+ {
+ char buffer[100 + 1];
+ snprintf(buffer, 100, "%0.1f%%", statsd.histogram_percentile);
+ statsd.histogram_percentile_str = strdupz(buffer);
+ }
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "add dimension for number of events received", 1)) {
+ statsd.gauges.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.counters.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.meters.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.sets.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.histograms.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ statsd.timers.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
+ }
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on gauges (deleteGauges)", 0))
+ statsd.gauges.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on counters (deleteCounters)", 0))
+ statsd.counters.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on meters (deleteMeters)", 0))
+ statsd.meters.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on sets (deleteSets)", 0))
+ statsd.sets.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on histograms (deleteHistograms)", 0))
+ statsd.histograms.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+ if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on timers (deleteTimers)", 0))
+ statsd.timers.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
+
+#ifdef STATSD_MULTITHREADED
+ statsd.threads = (int)config_get_number(CONFIG_SECTION_STATSD, "threads", processors);
+ if(statsd.threads < 1) {
+ error("STATSD: Invalid number of threads %d, using %d", statsd.threads, processors);
+ statsd.threads = processors;
+ config_set_number(CONFIG_SECTION_STATSD, "collector threads", statsd.threads);
+ }
+#else
+ statsd.threads = 1;
+#endif
+
+ // read custom application definitions
+ {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/statsd.d", netdata_configured_config_dir);
+ statsd_readdir(filename);
+ }
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // statsd setup
+
+ if(!statsd.enabled) return NULL;
+
+ statsd_listen_sockets_setup();
+ if(!statsd.sockets.opened) {
+ error("STATSD: No statsd sockets to listen to. statsd will be disabled.");
+ pthread_exit(NULL);
+ }
+
+ pthread_t threads[statsd.threads];
+ int i;
+
+ for(i = 0; i < statsd.threads ;i++) {
+ if(pthread_create(&threads[i], NULL, statsd_collector_thread, &i))
+ error("STATSD: failed to create child thread.");
+
+ else if(pthread_detach(threads[i]))
+ error("STATSD: cannot request detach of child thread.");
+ }
+
+ pthread_cleanup_push(statsd_main_cleanup, &threads);
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // statsd monitoring charts
+
+ RRDSET *st_metrics = rrdset_create_localhost(
+ "netdata"
+ , "statsd_metrics"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Metrics in the netdata statsd database"
+ , "metrics"
+ , 132000
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_metrics_gauge = rrddim_add(st_metrics, "gauges", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_counter = rrddim_add(st_metrics, "counters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_timer = rrddim_add(st_metrics, "timers", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_meter = rrddim_add(st_metrics, "meters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_histogram = rrddim_add(st_metrics, "histograms", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ RRDDIM *rd_metrics_set = rrddim_add(st_metrics, "sets", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ RRDSET *st_events = rrdset_create_localhost(
+ "netdata"
+ , "statsd_events"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Events processed by the netdata statsd server"
+ , "events/s"
+ , 132001
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_events_gauge = rrddim_add(st_events, "gauges", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_counter = rrddim_add(st_events, "counters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_timer = rrddim_add(st_events, "timers", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_meter = rrddim_add(st_events, "meters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_histogram = rrddim_add(st_events, "histograms", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_set = rrddim_add(st_events, "sets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_unknown = rrddim_add(st_events, "unknown", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_events_errors = rrddim_add(st_events, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_reads = rrdset_create_localhost(
+ "netdata"
+ , "statsd_reads"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Read operations made by the netdata statsd server"
+ , "reads/s"
+ , 132002
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_reads_tcp = rrddim_add(st_reads, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_reads_udp = rrddim_add(st_reads, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_bytes = rrdset_create_localhost(
+ "netdata"
+ , "statsd_bytes"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Bytes read by the netdata statsd server"
+ , "kbps"
+ , 132003
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_bytes_tcp = rrddim_add(st_bytes, "tcp", NULL, 8, 1024, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_bytes_udp = rrddim_add(st_bytes, "udp", NULL, 8, 1024, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_packets = rrdset_create_localhost(
+ "netdata"
+ , "statsd_packets"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Network packets processed by the netdata statsd server"
+ , "packets/s"
+ , 132004
+ , statsd.update_every
+ , RRDSET_TYPE_STACKED
+ );
+ RRDDIM *rd_packets_tcp = rrddim_add(st_packets, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ RRDDIM *rd_packets_udp = rrddim_add(st_packets, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ RRDSET *st_pcharts = rrdset_create_localhost(
+ "netdata"
+ , "private_charts"
+ , NULL
+ , "statsd"
+ , NULL
+ , "Private metric charts created by the netdata statsd server"
+ , "charts"
+ , 132010
+ , statsd.update_every
+ , RRDSET_TYPE_AREA
+ );
+ RRDDIM *rd_pcharts = rrddim_add(st_pcharts, "charts", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // statsd thread to turn metrics into charts
+
+ usec_t step = statsd.update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ for(;;) {
+ usec_t hb_dt = heartbeat_next(&hb, step);
+
+ if(unlikely(netdata_exit))
+ break;
+
+ statsd_flush_index_metrics(&statsd.gauges, statsd_flush_gauge);
+ statsd_flush_index_metrics(&statsd.counters, statsd_flush_counter);
+ statsd_flush_index_metrics(&statsd.meters, statsd_flush_meter);
+ statsd_flush_index_metrics(&statsd.timers, statsd_flush_timer);
+ statsd_flush_index_metrics(&statsd.histograms, statsd_flush_histogram);
+ statsd_flush_index_metrics(&statsd.sets, statsd_flush_set);
+
+ statsd_update_all_app_charts();
+
+ if(unlikely(netdata_exit))
+ break;
+
+ if(hb_dt) {
+ rrdset_next(st_metrics);
+ rrdset_next(st_events);
+ rrdset_next(st_reads);
+ rrdset_next(st_bytes);
+ rrdset_next(st_packets);
+ rrdset_next(st_pcharts);
+ }
+
+ rrddim_set_by_pointer(st_metrics, rd_metrics_gauge, (collected_number)statsd.gauges.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_counter, (collected_number)statsd.counters.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_timer, (collected_number)statsd.timers.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_meter, (collected_number)statsd.meters.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_histogram, (collected_number)statsd.histograms.metrics);
+ rrddim_set_by_pointer(st_metrics, rd_metrics_set, (collected_number)statsd.sets.metrics);
+
+ rrddim_set_by_pointer(st_events, rd_events_gauge, (collected_number)statsd.gauges.events);
+ rrddim_set_by_pointer(st_events, rd_events_counter, (collected_number)statsd.counters.events);
+ rrddim_set_by_pointer(st_events, rd_events_timer, (collected_number)statsd.timers.events);
+ rrddim_set_by_pointer(st_events, rd_events_meter, (collected_number)statsd.meters.events);
+ rrddim_set_by_pointer(st_events, rd_events_histogram, (collected_number)statsd.histograms.events);
+ rrddim_set_by_pointer(st_events, rd_events_set, (collected_number)statsd.sets.events);
+ rrddim_set_by_pointer(st_events, rd_events_unknown, (collected_number)statsd.unknown_types);
+ rrddim_set_by_pointer(st_events, rd_events_errors, (collected_number)statsd.socket_errors);
+
+ rrddim_set_by_pointer(st_reads, rd_reads_tcp, (collected_number)statsd.tcp_socket_reads);
+ rrddim_set_by_pointer(st_reads, rd_reads_udp, (collected_number)statsd.udp_socket_reads);
+
+ rrddim_set_by_pointer(st_bytes, rd_bytes_tcp, (collected_number)statsd.tcp_bytes_read);
+ rrddim_set_by_pointer(st_bytes, rd_bytes_udp, (collected_number)statsd.udp_bytes_read);
+
+ rrddim_set_by_pointer(st_packets, rd_packets_tcp, (collected_number)statsd.tcp_packets_received);
+ rrddim_set_by_pointer(st_packets, rd_packets_udp, (collected_number)statsd.udp_packets_received);
+
+ rrddim_set_by_pointer(st_pcharts, rd_pcharts, (collected_number)statsd.private_charts);
+
+ if(unlikely(netdata_exit))
+ break;
+
+ rrdset_done(st_metrics);
+ rrdset_done(st_events);
+ rrdset_done(st_reads);
+ rrdset_done(st_bytes);
+ rrdset_done(st_packets);
+ rrdset_done(st_pcharts);
+
+ if(unlikely(netdata_exit))
+ break;
+ }
+
+ pthread_cleanup_pop(1);
+
+ pthread_exit(NULL);
+ return NULL;
+}
diff --git a/src/statsd.h b/src/statsd.h
new file mode 100644
index 00000000..17af098e
--- /dev/null
+++ b/src/statsd.h
@@ -0,0 +1,9 @@
+#ifndef NETDATA_STATSD_H
+#define NETDATA_STATSD_H
+
+#define STATSD_LISTEN_PORT 8125
+#define STATSD_LISTEN_BACKLOG 4096
+
+extern void *statsd_main(void *ptr);
+
+#endif //NETDATA_STATSD_H
diff --git a/src/storage_number.h b/src/storage_number.h
index 74d24a32..34ed0d89 100644
--- a/src/storage_number.h
+++ b/src/storage_number.h
@@ -29,6 +29,7 @@ typedef uint32_t storage_number;
// extract the flags
#define get_storage_number_flags(value) ((((storage_number)value) & (1 << 24)) | (((storage_number)value) & (2 << 24)) | (((storage_number)value) & (4 << 24)))
+#define SN_EMPTY_SLOT 0x00000000
// checks
#define does_storage_number_exist(value) ((get_storage_number_flags(value) != 0)?1:0)
diff --git a/src/sys_fs_cgroup.c b/src/sys_fs_cgroup.c
index 8f31527d..0f9c8854 100644
--- a/src/sys_fs_cgroup.c
+++ b/src/sys_fs_cgroup.c
@@ -147,11 +147,17 @@ void read_cgroup_plugin_configuration() {
enabled_cgroup_patterns = simple_pattern_create(
config_get("plugin:cgroups", "enable by default cgroups matching",
- " /system.slice/docker-*.scope "
- " /qemu.slice/*.scope " // #1949
+ // ----------------------------------------------------------------
+
+ " !*/init.scope " // ignore init.scope
+ " *.scope " // we need all *.scope for sure
+
+ // ----------------------------------------------------------------
+
+ " !*/vcpu* " // libvirtd adds these sub-cgroups
+ " !*/emulator " // libvirtd adds these sub-cgroups
" !*.mount "
" !*.partition "
- " !*.scope "
" !*.service "
" !*.slice "
" !*.swap "
@@ -171,12 +177,14 @@ void read_cgroup_plugin_configuration() {
enabled_cgroup_paths = simple_pattern_create(
config_get("plugin:cgroups", "search for cgroups in subpaths matching",
- " !*-qemu " // #345
+ " !*/init.scope " // ignore init.scope
+ " !*-qemu " // #345
" !/init.scope "
" !/system "
" !/systemd "
" !/user "
" !/user.slice "
+ " !/lxc/*/ns/* " // #2161
" * "
), SIMPLE_PATTERN_EXACT);
@@ -185,13 +193,13 @@ void read_cgroup_plugin_configuration() {
enabled_cgroup_renames = simple_pattern_create(
config_get("plugin:cgroups", "run script to rename cgroups matching",
- " /qemu.slice/*.scope " // #1949
+ " *.scope "
" *docker* "
" *lxc* "
+ " *qemu* "
" !/ "
" !*.mount "
" !*.partition "
- " !*.scope "
" !*.service "
" !*.slice "
" !*.swap "
@@ -893,20 +901,20 @@ static inline struct cgroup *cgroup_add(const char *id) {
static inline void cgroup_free(struct cgroup *cg) {
debug(D_CGROUP, "Removing cgroup '%s' with chart id '%s' (was %s and %s)", cg->id, cg->chart_id, (cg->enabled)?"enabled":"disabled", (cg->available)?"available":"not available");
- if(cg->st_cpu) rrdset_flag_set(cg->st_cpu, RRDSET_FLAG_OBSOLETE);
- if(cg->st_cpu_per_core) rrdset_flag_set(cg->st_cpu_per_core, RRDSET_FLAG_OBSOLETE);
- if(cg->st_mem) rrdset_flag_set(cg->st_mem, RRDSET_FLAG_OBSOLETE);
- if(cg->st_writeback) rrdset_flag_set(cg->st_writeback, RRDSET_FLAG_OBSOLETE);
- if(cg->st_mem_activity) rrdset_flag_set(cg->st_mem_activity, RRDSET_FLAG_OBSOLETE);
- if(cg->st_pgfaults) rrdset_flag_set(cg->st_pgfaults, RRDSET_FLAG_OBSOLETE);
- if(cg->st_mem_usage) rrdset_flag_set(cg->st_mem_usage, RRDSET_FLAG_OBSOLETE);
- if(cg->st_mem_failcnt) rrdset_flag_set(cg->st_mem_failcnt, RRDSET_FLAG_OBSOLETE);
- if(cg->st_io) rrdset_flag_set(cg->st_io, RRDSET_FLAG_OBSOLETE);
- if(cg->st_serviced_ops) rrdset_flag_set(cg->st_serviced_ops, RRDSET_FLAG_OBSOLETE);
- if(cg->st_throttle_io) rrdset_flag_set(cg->st_throttle_io, RRDSET_FLAG_OBSOLETE);
- if(cg->st_throttle_serviced_ops) rrdset_flag_set(cg->st_throttle_serviced_ops, RRDSET_FLAG_OBSOLETE);
- if(cg->st_queued_ops) rrdset_flag_set(cg->st_queued_ops, RRDSET_FLAG_OBSOLETE);
- if(cg->st_merged_ops) rrdset_flag_set(cg->st_merged_ops, RRDSET_FLAG_OBSOLETE);
+ if(cg->st_cpu) rrdset_is_obsolete(cg->st_cpu);
+ if(cg->st_cpu_per_core) rrdset_is_obsolete(cg->st_cpu_per_core);
+ if(cg->st_mem) rrdset_is_obsolete(cg->st_mem);
+ if(cg->st_writeback) rrdset_is_obsolete(cg->st_writeback);
+ if(cg->st_mem_activity) rrdset_is_obsolete(cg->st_mem_activity);
+ if(cg->st_pgfaults) rrdset_is_obsolete(cg->st_pgfaults);
+ if(cg->st_mem_usage) rrdset_is_obsolete(cg->st_mem_usage);
+ if(cg->st_mem_failcnt) rrdset_is_obsolete(cg->st_mem_failcnt);
+ if(cg->st_io) rrdset_is_obsolete(cg->st_io);
+ if(cg->st_serviced_ops) rrdset_is_obsolete(cg->st_serviced_ops);
+ if(cg->st_throttle_io) rrdset_is_obsolete(cg->st_throttle_io);
+ if(cg->st_throttle_serviced_ops) rrdset_is_obsolete(cg->st_throttle_serviced_ops);
+ if(cg->st_queued_ops) rrdset_is_obsolete(cg->st_queued_ops);
+ if(cg->st_merged_ops) rrdset_is_obsolete(cg->st_merged_ops);
freez(cg->cpuacct_usage.cpu_percpu);
diff --git a/src/unit_test.c b/src/unit_test.c
index 0866d215..9b008138 100644
--- a/src/unit_test.c
+++ b/src/unit_test.c
@@ -17,8 +17,8 @@ int check_storage_number(calculated_number n, int debug) {
if(dcdiff < 0) dcdiff = -dcdiff;
- size_t len = print_calculated_number(buffer, d);
- calculated_number p = str2l(buffer);
+ size_t len = (size_t)print_calculated_number(buffer, d);
+ calculated_number p = str2ld(buffer, NULL);
calculated_number pdiff = n - p;
calculated_number pcdiff = pdiff * 100.0 / n;
if(pcdiff < 0) pcdiff = -pcdiff;
@@ -229,6 +229,45 @@ int unit_test_storage()
return r;
}
+int unit_test_str2ld() {
+ char *values[] = {
+ "1.234567", "-35.6", "0.00123", "23842384234234.2", ".1", "1.2e-10",
+ "hello", "1wrong", "nan", "inf", NULL
+ };
+
+ int i;
+ for(i = 0; values[i] ; i++) {
+ char *e_mine = "hello", *e_sys = "world";
+ long double mine = str2ld(values[i], &e_mine);
+ long double sys = strtold(values[i], &e_sys);
+
+ if(isnan(mine)) {
+ if(!isnan(sys)) {
+ fprintf(stderr, "Value '%s' is parsed as %Lf, but system believes it is %Lf.\n", values[i], mine, sys);
+ return -1;
+ }
+ }
+ else if(isinf(mine)) {
+ if(!isinf(sys)) {
+ fprintf(stderr, "Value '%s' is parsed as %Lf, but system believes it is %Lf.\n", values[i], mine, sys);
+ return -1;
+ }
+ }
+ else if(mine != sys && abs(mine-sys) > 0.000001) {
+ fprintf(stderr, "Value '%s' is parsed as %Lf, but system believes it is %Lf, delta %Lf.\n", values[i], mine, sys, sys-mine);
+ return -1;
+ }
+
+ if(e_mine != e_sys) {
+ fprintf(stderr, "Value '%s' is parsed correctly, but endptr is not right\n", values[i]);
+ return -1;
+ }
+
+ fprintf(stderr, "str2ld() parsed value '%s' exactly the same way with strtold(), returned %Lf vs %Lf\n", values[i], mine, sys);
+ }
+
+ return 0;
+}
// --------------------------------------------------------------------------------------------------------------------
@@ -244,7 +283,7 @@ struct test {
int update_every;
unsigned long long multiplier;
unsigned long long divisor;
- int algorithm;
+ RRD_ALGORITHM algorithm;
unsigned long feed_entries;
unsigned long result_entries;
@@ -884,7 +923,7 @@ int run_test(struct test *test)
{
fprintf(stderr, "\nRunning test '%s':\n%s\n", test->name, test->description);
- default_rrd_memory_mode = RRD_MEMORY_MODE_RAM;
+ default_rrd_memory_mode = RRD_MEMORY_MODE_ALLOC;
default_rrd_update_every = test->update_every;
char name[101];
@@ -916,7 +955,9 @@ int run_test(struct test *test)
(float)time_now / 1000000.0,
((calculated_number)test->feed[c].value - (calculated_number)last) * (calculated_number)test->multiplier / (calculated_number)test->divisor,
(((calculated_number)test->feed[c].value - (calculated_number)last) * (calculated_number)test->multiplier / (calculated_number)test->divisor) / (calculated_number)test->feed[c].microseconds * (calculated_number)1000000);
- rrdset_next_usec_unfiltered(st, test->feed[c].microseconds);
+
+ // rrdset_next_usec_unfiltered(st, test->feed[c].microseconds);
+ st->usec_since_last_update = test->feed[c].microseconds;
}
else {
fprintf(stderr, " > %s: feeding position %lu\n", test->name, c+1);
@@ -1091,7 +1132,7 @@ int unit_test(long delay, long shift)
snprintfz(name, 100, "unittest-%d-%ld-%ld", repeat, delay, shift);
//debug_flags = 0xffffffff;
- default_rrd_memory_mode = RRD_MEMORY_MODE_RAM;
+ default_rrd_memory_mode = RRD_MEMORY_MODE_ALLOC;
default_rrd_update_every = 1;
int do_abs = 1;
@@ -1125,7 +1166,8 @@ int unit_test(long delay, long shift)
fprintf(stderr, "\n\nLOOP = %lu, DELAY = %ld, VALUE = " COLLECTED_NUMBER_FORMAT "\n", c, delay, i);
if(c) {
- rrdset_next_usec_unfiltered(st, delay);
+ // rrdset_next_usec_unfiltered(st, delay);
+ st->usec_since_last_update = delay;
}
if(do_abs) rrddim_set(st, "absolute", i);
if(do_inc) rrddim_set(st, "incremental", i);
diff --git a/src/unit_test.h b/src/unit_test.h
index 916ad71f..3240b5f0 100644
--- a/src/unit_test.h
+++ b/src/unit_test.h
@@ -4,5 +4,6 @@
extern int unit_test_storage(void);
extern int unit_test(long delay, long shift);
extern int run_all_mockup_tests(void);
+extern int unit_test_str2ld(void);
#endif /* NETDATA_UNIT_TEST_H */
diff --git a/src/web_api_v1.c b/src/web_api_v1.c
index 0acc43ac..3ffd8c32 100644
--- a/src/web_api_v1.c
+++ b/src/web_api_v1.c
@@ -208,6 +208,10 @@ inline int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w,
inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url) {
int format = ALLMETRICS_SHELL;
+ int help = 0, types = 0, names = backend_send_names; // prometheus options
+ const char *prometheus_server = w->client_ip;
+ uint32_t prometheus_options = backend_options;
+ const char *prometheus_prefix = backend_prefix;
while(url) {
char *value = mystrsep(&url, "?&");
@@ -222,11 +226,40 @@ inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client
format = ALLMETRICS_SHELL;
else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS))
format = ALLMETRICS_PROMETHEUS;
+ else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS))
+ format = ALLMETRICS_PROMETHEUS_ALL_HOSTS;
else if(!strcmp(value, ALLMETRICS_FORMAT_JSON))
format = ALLMETRICS_JSON;
else
format = 0;
}
+ else if(!strcmp(name, "help")) {
+ if(!strcmp(value, "yes"))
+ help = 1;
+ else
+ help = 0;
+ }
+ else if(!strcmp(name, "types")) {
+ if(!strcmp(value, "yes"))
+ types = 1;
+ else
+ types = 0;
+ }
+ else if(!strcmp(name, "names")) {
+ if(!strcmp(value, "yes"))
+ names = 1;
+ else
+ names = 0;
+ }
+ else if(!strcmp(name, "server")) {
+ prometheus_server = value;
+ }
+ else if(!strcmp(name, "prefix")) {
+ prometheus_prefix = value;
+ }
+ else if(!strcmp(name, "data") || !strcmp(name, "source") || !strcmp(name, "data source") || !strcmp(name, "data-source") || !strcmp(name, "data_source") || !strcmp(name, "datasource")) {
+ prometheus_options = backend_parse_data_source(value, prometheus_options);
+ }
}
buffer_flush(w->response.data);
@@ -245,12 +278,17 @@ inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client
case ALLMETRICS_PROMETHEUS:
w->response.data->contenttype = CT_PROMETHEUS;
- rrd_stats_api_v1_charts_allmetrics_prometheus(host, w->response.data);
+ rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(host, w->response.data, prometheus_server, prometheus_prefix, prometheus_options, help, types, names);
+ return 200;
+
+ case ALLMETRICS_PROMETHEUS_ALL_HOSTS:
+ w->response.data->contenttype = CT_PROMETHEUS;
+ rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(host, w->response.data, prometheus_server, prometheus_prefix, prometheus_options, help, types, names);
return 200;
default:
w->response.data->contenttype = CT_TEXT_PLAIN;
- buffer_strcat(w->response.data, "Which format? '" ALLMETRICS_FORMAT_SHELL "', '" ALLMETRICS_FORMAT_PROMETHEUS "' and '" ALLMETRICS_FORMAT_JSON "' are currently supported.");
+ buffer_strcat(w->response.data, "Which format? '" ALLMETRICS_FORMAT_SHELL "', '" ALLMETRICS_FORMAT_PROMETHEUS "', '" ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "' and '" ALLMETRICS_FORMAT_JSON "' are currently supported.");
return 400;
}
}
diff --git a/src/web_client.c b/src/web_client.c
index b5b25899..7da08070 100644
--- a/src/web_client.c
+++ b/src/web_client.c
@@ -57,13 +57,7 @@ struct web_client *web_client_create(int listener) {
w->mode = WEB_CLIENT_MODE_NORMAL;
{
- struct sockaddr *sadr;
- socklen_t addrlen;
-
- sadr = (struct sockaddr*) &w->clientaddr;
- addrlen = sizeof(w->clientaddr);
-
- w->ifd = accept4(listener, sadr, &addrlen, SOCK_NONBLOCK);
+ w->ifd = accept_socket(listener, SOCK_NONBLOCK, w->client_ip, sizeof(w->client_ip), w->client_port, sizeof(w->client_port));
if (w->ifd == -1) {
error("%llu: Cannot accept new incoming connection.", w->id);
freez(w);
@@ -71,33 +65,6 @@ struct web_client *web_client_create(int listener) {
}
w->ofd = w->ifd;
- if(getnameinfo(sadr, addrlen, w->client_ip, NI_MAXHOST, w->client_port, NI_MAXSERV, NI_NUMERICHOST | NI_NUMERICSERV) != 0) {
- error("Cannot getnameinfo() on received client connection.");
- strncpyz(w->client_ip, "UNKNOWN", NI_MAXHOST);
- strncpyz(w->client_port, "UNKNOWN", NI_MAXSERV);
- }
- w->client_ip[NI_MAXHOST] = '\0';
- w->client_port[NI_MAXSERV] = '\0';
-
- switch(sadr->sa_family) {
- case AF_INET:
- debug(D_WEB_CLIENT_ACCESS, "%llu: New IPv4 web client from %s port %s on socket %d.", w->id, w->client_ip, w->client_port, w->ifd);
- break;
-
- case AF_INET6:
- if(strncmp(w->client_ip, "::ffff:", 7) == 0) {
- memmove(w->client_ip, &w->client_ip[7], strlen(&w->client_ip[7]) + 1);
- debug(D_WEB_CLIENT_ACCESS, "%llu: New IPv4 web client from %s port %s on socket %d.", w->id, w->client_ip, w->client_port, w->ifd);
- }
- else
- debug(D_WEB_CLIENT_ACCESS, "%llu: New IPv6 web client from %s port %s on socket %d.", w->id, w->client_ip, w->client_port, w->ifd);
- break;
-
- default:
- debug(D_WEB_CLIENT_ACCESS, "%llu: New UNKNOWN web client from %s port %s on socket %d.", w->id, w->client_ip, w->client_port, w->ifd);
- break;
- }
-
int flag = 1;
if(setsockopt(w->ofd, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int)) != 0)
error("%llu: failed to enable TCP_NODELAY on socket.", w->id);
@@ -388,8 +355,8 @@ int mysendfile(struct web_client *w, char *filename) {
return 404;
}
}
- if(fcntl(w->ifd, F_SETFL, O_NONBLOCK) < 0)
- error("%llu: Cannot set O_NONBLOCK on file '%s'.", w->id, webfilename);
+
+ sock_setnonblock(w->ifd);
// pick a Content-Type for the file
if(strstr(filename, ".html") != NULL) w->response.data->contenttype = CT_TEXT_HTML;
@@ -995,13 +962,22 @@ static inline void web_client_send_http_header(struct web_client *w) {
web_client_crock_socket(w);
- ssize_t bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0);
+ size_t count = 0;
+ ssize_t bytes;
+ while((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) {
+ count++;
+
+ if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) {
+ error("Cannot send HTTP headers to web client.");
+ break;
+ }
+ }
+
if(bytes != (ssize_t) buffer_strlen(w->response.header_output)) {
if(bytes > 0)
w->stats_sent_bytes += bytes;
- debug(D_WEB_CLIENT, "%llu: HTTP Header failed to be sent (I sent %zu bytes but the system sent %zd bytes). Closing web client."
- , w->id
+ error("HTTP headers failed to be sent (I sent %zu bytes but the system sent %zd bytes). Closing web client."
, buffer_strlen(w->response.header_output)
, bytes);
diff --git a/src/web_client.h b/src/web_client.h
index 70c5b1ff..617917df 100644
--- a/src/web_client.h
+++ b/src/web_client.h
@@ -83,7 +83,6 @@ struct web_client {
char cookie2[COOKIE_MAX+1];
char origin[ORIGIN_MAX+1];
- struct sockaddr_storage clientaddr;
struct response response;
size_t stats_received_bytes;
diff --git a/src/web_server.c b/src/web_server.c
index 593a82a5..491cd11a 100644
--- a/src/web_server.c
+++ b/src/web_server.c
@@ -1,15 +1,14 @@
#include "common.h"
-int listen_backlog = LISTEN_BACKLOG;
-size_t listen_fds_count = 0;
-int listen_fds[MAX_LISTEN_FDS] = { [0 ... 99] = -1 };
-char *listen_fds_names[MAX_LISTEN_FDS] = { [0 ... 99] = NULL };
-int listen_port = LISTEN_PORT;
+static LISTEN_SOCKETS api_sockets = {
+ .config_section = CONFIG_SECTION_WEB,
+ .default_bind_to = "*",
+ .default_port = API_LISTEN_PORT,
+ .backlog = API_LISTEN_BACKLOG
+};
WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_MULTI_THREADED;
-static int shown_server_socket_error = 0;
-
#ifdef NETDATA_INTERNAL_CHECKS
static void log_allocations(void)
{
@@ -47,42 +46,7 @@ static void log_allocations(void)
}
#endif /* NETDATA_INTERNAL_CHECKS */
-#ifndef HAVE_ACCEPT4
-int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags) {
- int fd = accept(sock, addr, addrlen);
- int newflags = 0;
-
- if (fd < 0) return fd;
-
- if (flags & SOCK_NONBLOCK) {
- newflags |= O_NONBLOCK;
- flags &= ~SOCK_NONBLOCK;
- }
-
-#ifdef SOCK_CLOEXEC
-#ifdef O_CLOEXEC
- if (flags & SOCK_CLOEXEC) {
- newflags |= O_CLOEXEC;
- flags &= ~SOCK_CLOEXEC;
- }
-#endif
-#endif
-
- if (flags) {
- errno = -EINVAL;
- return -1;
- }
-
- if (fcntl(fd, F_SETFL, newflags) < 0) {
- int saved_errno = errno;
- close(fd);
- errno = saved_errno;
- return -1;
- }
-
- return fd;
-}
-#endif
+// --------------------------------------------------------------------------------------
WEB_SERVER_MODE web_server_mode_id(const char *mode) {
if(!strcmp(mode, "none"))
@@ -107,277 +71,15 @@ const char *web_server_mode_name(WEB_SERVER_MODE id) {
}
}
-int create_listen_socket4(const char *ip, int port, int listen_backlog) {
- int sock;
- int sockopt = 1;
-
- debug(D_LISTENER, "IPv4 creating new listening socket on ip '%s' port %d", ip, port);
-
- sock = socket(AF_INET, SOCK_STREAM, 0);
- if(sock < 0) {
- error("IPv4 socket() on ip '%s' port %d failed.", ip, port);
- shown_server_socket_error = 1;
- return -1;
- }
-
- /* avoid "address already in use" */
- if(setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void*)&sockopt, sizeof(sockopt)) != 0)
- error("Cannot set SO_REUSEADDR on ip '%s' port's %d.", ip, port);
-
- struct sockaddr_in name;
- memset(&name, 0, sizeof(struct sockaddr_in));
- name.sin_family = AF_INET;
- name.sin_port = htons (port);
-
- int ret = inet_pton(AF_INET, ip, (void *)&name.sin_addr.s_addr);
- if(ret != 1) {
- error("Failed to convert IP '%s' to a valid IPv4 address.", ip);
- shown_server_socket_error = 1;
- close(sock);
- return -1;
- }
-
- if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
- close(sock);
- error("IPv4 bind() on ip '%s' port %d failed.", ip, port);
- shown_server_socket_error = 1;
- return -1;
- }
-
- if(listen(sock, listen_backlog) < 0) {
- close(sock);
- error("IPv4 listen() on ip '%s' port %d failed.", ip, port);
- shown_server_socket_error = 1;
- return -1;
- }
-
- debug(D_LISTENER, "Listening on IPv4 ip '%s' port %d", ip, port);
- return sock;
-}
-
-int create_listen_socket6(const char *ip, int port, int listen_backlog) {
- int sock = -1;
- int sockopt = 1;
- int ipv6only = 1;
-
- debug(D_LISTENER, "IPv6 creating new listening socket on ip '%s' port %d", ip, port);
-
- sock = socket(AF_INET6, SOCK_STREAM, 0);
- if (sock < 0) {
- error("IPv6 socket() on ip '%s' port %d failed.", ip, port);
- shown_server_socket_error = 1;
- return -1;
- }
-
- /* avoid "address already in use" */
- if(setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void*)&sockopt, sizeof(sockopt)) != 0)
- error("Cannot set SO_REUSEADDR on ip '%s' port's %d.", ip, port);
-
- /* IPv6 only */
- if(setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&ipv6only, sizeof(ipv6only)) != 0)
- error("Cannot set IPV6_V6ONLY on ip '%s' port's %d.", ip, port);
-
- struct sockaddr_in6 name;
- memset(&name, 0, sizeof(struct sockaddr_in6));
- name.sin6_family = AF_INET6;
- name.sin6_port = htons ((uint16_t) port);
-
- int ret = inet_pton(AF_INET6, ip, (void *)&name.sin6_addr.s6_addr);
- if(ret != 1) {
- error("Failed to convert IP '%s' to a valid IPv6 address.", ip);
- shown_server_socket_error = 1;
- close(sock);
- return -1;
- }
-
- name.sin6_scope_id = 0;
-
- if (bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
- close(sock);
- error("IPv6 bind() on ip '%s' port %d failed.", ip, port);
- shown_server_socket_error = 1;
- return -1;
- }
-
- if (listen(sock, listen_backlog) < 0) {
- close(sock);
- error("IPv6 listen() on ip '%s' port %d failed.", ip, port);
- shown_server_socket_error = 1;
- return -1;
- }
-
- debug(D_LISTENER, "Listening on IPv6 ip '%s' port %d", ip, port);
- return sock;
-}
-
-static inline int add_listen_socket(int fd, const char *ip, int port) {
- if(listen_fds_count >= MAX_LISTEN_FDS) {
- error("Too many listening sockets. Failed to add listening socket at ip '%s' port %d", ip, port);
- shown_server_socket_error = 1;
- close(fd);
- return -1;
- }
-
- listen_fds[listen_fds_count] = fd;
-
- char buffer[100 + 1];
- snprintfz(buffer, 100, "[%s]:%d", ip, port);
- listen_fds_names[listen_fds_count] = strdupz(buffer);
-
- listen_fds_count++;
- return 0;
-}
-
-int is_listen_socket(int fd) {
- size_t i;
- for(i = 0; i < listen_fds_count ;i++)
- if(listen_fds[i] == fd) return 1;
-
- return 0;
-}
-
-static inline void close_listen_sockets(void) {
- size_t i;
- for(i = 0; i < listen_fds_count ;i++) {
- close(listen_fds[i]);
- listen_fds[i] = -1;
-
- freez(listen_fds_names[i]);
- listen_fds_names[i] = NULL;
- }
-
- listen_fds_count = 0;
-}
-
-static inline int bind_to_one(const char *definition, int default_port, int listen_backlog) {
- int added = 0;
- struct addrinfo hints;
- struct addrinfo *result = NULL, *rp = NULL;
-
- char buffer[strlen(definition) + 1];
- strcpy(buffer, definition);
-
- char buffer2[10 + 1];
- snprintfz(buffer2, 10, "%d", default_port);
-
- char *ip = buffer, *port = buffer2;
-
- char *e = ip;
- if(*e == '[') {
- e = ++ip;
- while(*e && *e != ']') e++;
- if(*e == ']') {
- *e = '\0';
- e++;
- }
- }
- else {
- while(*e && *e != ':') e++;
- }
-
- if(*e == ':') {
- port = e + 1;
- *e = '\0';
- }
-
- if(!*ip || *ip == '*' || !strcmp(ip, "any") || !strcmp(ip, "all"))
- ip = NULL;
- if(!*port)
- port = buffer2;
-
- memset(&hints, 0, sizeof(struct addrinfo));
- hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
- hints.ai_socktype = SOCK_DGRAM; /* Datagram socket */
- hints.ai_flags = AI_PASSIVE; /* For wildcard IP address */
- hints.ai_protocol = 0; /* Any protocol */
- hints.ai_canonname = NULL;
- hints.ai_addr = NULL;
- hints.ai_next = NULL;
-
- int r = getaddrinfo(ip, port, &hints, &result);
- if (r != 0) {
- error("getaddrinfo('%s', '%s'): %s\n", ip, port, gai_strerror(r));
- return -1;
- }
-
- for (rp = result; rp != NULL; rp = rp->ai_next) {
- int fd = -1;
-
- char rip[INET_ADDRSTRLEN + INET6_ADDRSTRLEN] = "INVALID";
- int rport = default_port;
-
- switch (rp->ai_addr->sa_family) {
- case AF_INET: {
- struct sockaddr_in *sin = (struct sockaddr_in *) rp->ai_addr;
- inet_ntop(AF_INET, &sin->sin_addr, rip, INET_ADDRSTRLEN);
- rport = ntohs(sin->sin_port);
- fd = create_listen_socket4(rip, rport, listen_backlog);
- break;
- }
-
- case AF_INET6: {
- struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) rp->ai_addr;
- inet_ntop(AF_INET6, &sin6->sin6_addr, rip, INET6_ADDRSTRLEN);
- rport = ntohs(sin6->sin6_port);
- fd = create_listen_socket6(rip, rport, listen_backlog);
- break;
- }
- }
-
- if (fd == -1)
- error("Cannot bind to ip '%s', port %d", rip, rport);
- else {
- add_listen_socket(fd, rip, rport);
- added++;
- }
- }
-
- freeaddrinfo(result);
-
- return added;
-}
-
-int create_listen_sockets(void) {
- shown_server_socket_error = 0;
-
- listen_backlog = (int) config_get_number(CONFIG_SECTION_WEB, "listen backlog", LISTEN_BACKLOG);
-
- listen_port = (int) config_get_number(CONFIG_SECTION_WEB, "default port", LISTEN_PORT);
- if(listen_port < 1 || listen_port > 65535) {
- error("Invalid listen port %d given. Defaulting to %d.", listen_port, LISTEN_PORT);
- listen_port = (int) config_set_number(CONFIG_SECTION_WEB, "default port", LISTEN_PORT);
- }
- debug(D_OPTIONS, "Default listen port set to %d.", listen_port);
-
- char *s = config_get(CONFIG_SECTION_WEB, "bind to", "*");
- while(*s) {
- char *e = s;
-
- // skip separators, moving both s(tart) and e(nd)
- while(isspace(*e) || *e == ',') s = ++e;
-
- // move e(nd) to the first separator
- while(*e && !isspace(*e) && *e != ',') e++;
-
- // is there anything?
- if(!*s || s == e) break;
-
- char buf[e - s + 1];
- strncpyz(buf, s, e - s);
- bind_to_one(buf, listen_port, listen_backlog);
+// --------------------------------------------------------------------------------------
- s = e;
- }
+int api_listen_sockets_setup(void) {
+ int socks = listen_sockets_setup(&api_sockets);
- if(!listen_fds_count)
- fatal("Cannot listen on any socket. Exiting...");
- else if(shown_server_socket_error) {
- size_t i;
- for(i = 0; i < listen_fds_count ;i++)
- info("Listen socket %s opened.", listen_fds_names[i]);
- }
+ if(!socks)
+ fatal("LISTENER: Cannot listen on any API socket. Exiting...");
- return (int)listen_fds_count;
+ return socks;
}
// --------------------------------------------------------------------------------------
@@ -422,25 +124,25 @@ void *socket_listen_main_multi_threaded(void *ptr) {
if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
error("Cannot set pthread cancel state to ENABLE.");
- if(!listen_fds_count)
+ if(!api_sockets.opened)
fatal("LISTENER: No sockets to listen to.");
- struct pollfd *fds = callocz(sizeof(struct pollfd), listen_fds_count);
+ struct pollfd *fds = callocz(sizeof(struct pollfd), api_sockets.opened);
size_t i;
- for(i = 0; i < listen_fds_count ;i++) {
- fds[i].fd = listen_fds[i];
+ for(i = 0; i < api_sockets.opened ;i++) {
+ fds[i].fd = api_sockets.fds[i];
fds[i].events = POLLIN;
fds[i].revents = 0;
- info("Listening on '%s'", (listen_fds_names[i])?listen_fds_names[i]:"UNKNOWN");
+ info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN");
}
int timeout = 10 * 1000;
for(;;) {
// debug(D_WEB_CLIENT, "LISTENER: Waiting...");
- retval = poll(fds, listen_fds_count, timeout);
+ retval = poll(fds, api_sockets.opened, timeout);
if(unlikely(retval == -1)) {
error("LISTENER: poll() failed.");
@@ -453,7 +155,7 @@ void *socket_listen_main_multi_threaded(void *ptr) {
continue;
}
- for(i = 0 ; i < listen_fds_count ; i++) {
+ for(i = 0 ; i < api_sockets.opened ; i++) {
short int revents = fds[i].revents;
// check for new incoming connections
@@ -486,7 +188,7 @@ void *socket_listen_main_multi_threaded(void *ptr) {
}
debug(D_WEB_CLIENT, "LISTENER: exit!");
- close_listen_sockets();
+ listen_sockets_close(&api_sockets);
freez(fds);
@@ -555,7 +257,7 @@ void *socket_listen_main_single_threaded(void *ptr) {
if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
error("Cannot set pthread cancel state to ENABLE.");
- if(!listen_fds_count)
+ if(!api_sockets.opened)
fatal("LISTENER: no listen sockets available.");
size_t i;
@@ -568,16 +270,16 @@ void *socket_listen_main_single_threaded(void *ptr) {
FD_ZERO (&efds);
int fdmax = 0;
- for(i = 0; i < listen_fds_count ; i++) {
- if (listen_fds[i] < 0 || listen_fds[i] >= FD_SETSIZE)
- fatal("LISTENER: Listen socket %d is not ready, or invalid.", listen_fds[i]);
+ for(i = 0; i < api_sockets.opened ; i++) {
+ if (api_sockets.fds[i] < 0 || api_sockets.fds[i] >= FD_SETSIZE)
+ fatal("LISTENER: Listen socket %d is not ready, or invalid.", api_sockets.fds[i]);
- info("Listening on '%s'", (listen_fds_names[i])?listen_fds_names[i]:"UNKNOWN");
+ info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN");
- FD_SET(listen_fds[i], &ifds);
- FD_SET(listen_fds[i], &efds);
- if(fdmax < listen_fds[i])
- fdmax = listen_fds[i];
+ FD_SET(api_sockets.fds[i], &ifds);
+ FD_SET(api_sockets.fds[i], &efds);
+ if(fdmax < api_sockets.fds[i])
+ fdmax = api_sockets.fds[i];
}
for(;;) {
@@ -596,10 +298,10 @@ void *socket_listen_main_single_threaded(void *ptr) {
else if(likely(retval)) {
debug(D_WEB_CLIENT_ACCESS, "LISTENER: got something.");
- for(i = 0; i < listen_fds_count ; i++) {
- if (FD_ISSET(listen_fds[i], &rifds)) {
+ for(i = 0; i < api_sockets.opened ; i++) {
+ if (FD_ISSET(api_sockets.fds[i], &rifds)) {
debug(D_WEB_CLIENT_ACCESS, "LISTENER: new connection.");
- w = web_client_create(listen_fds[i]);
+ w = web_client_create(api_sockets.fds[i]);
if (single_threaded_link_client(w, &ifds, &ofds, &ifds, &fdmax) != 0) {
web_client_free(w);
}
@@ -658,7 +360,7 @@ void *socket_listen_main_single_threaded(void *ptr) {
}
debug(D_WEB_CLIENT, "LISTENER: exit!");
- close_listen_sockets();
+ listen_sockets_close(&api_sockets);
static_thread->enabled = 0;
pthread_exit(NULL);
diff --git a/src/web_server.h b/src/web_server.h
index 41dcfcf0..aa293695 100644
--- a/src/web_server.h
+++ b/src/web_server.h
@@ -6,11 +6,12 @@
#define WEB_PATH_DATASOURCE "datasource"
#define WEB_PATH_GRAPH "graph"
-#define LISTEN_PORT 19999
-#define LISTEN_BACKLOG 100
+#ifndef API_LISTEN_PORT
+#define API_LISTEN_PORT 19999
+#endif
-#ifndef MAX_LISTEN_FDS
-#define MAX_LISTEN_FDS 100
+#ifndef API_LISTEN_BACKLOG
+#define API_LISTEN_BACKLOG 4096
#endif
typedef enum web_server_mode {
@@ -24,23 +25,8 @@ extern WEB_SERVER_MODE web_server_mode;
extern WEB_SERVER_MODE web_server_mode_id(const char *mode);
extern const char *web_server_mode_name(WEB_SERVER_MODE id);
-
extern void *socket_listen_main_multi_threaded(void *ptr);
extern void *socket_listen_main_single_threaded(void *ptr);
-extern int create_listen_sockets(void);
-extern int is_listen_socket(int fd);
-
-#ifndef HAVE_ACCEPT4
-extern int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags);
-
-#ifndef SOCK_NONBLOCK
-#define SOCK_NONBLOCK 00004000
-#endif /* #ifndef SOCK_NONBLOCK */
-
-#ifndef SOCK_CLOEXEC
-#define SOCK_CLOEXEC 02000000
-#endif /* #ifndef SOCK_CLOEXEC */
-
-#endif /* #ifndef HAVE_ACCEPT4 */
+extern int api_listen_sockets_setup(void);
#endif /* NETDATA_WEB_SERVER_H */
diff --git a/src/zfs_common.c b/src/zfs_common.c
new file mode 100644
index 00000000..7fa05b03
--- /dev/null
+++ b/src/zfs_common.c
@@ -0,0 +1,677 @@
+#include "common.h"
+#include "zfs_common.h"
+
+extern struct arcstats arcstats;
+
+void generate_charts_arcstats(int update_every) {
+
+ // ARC reads
+ unsigned long long aread = arcstats.hits + arcstats.misses;
+
+ // Demand reads
+ unsigned long long dhit = arcstats.demand_data_hits + arcstats.demand_metadata_hits;
+ unsigned long long dmiss = arcstats.demand_data_misses + arcstats.demand_metadata_misses;
+ unsigned long long dread = dhit + dmiss;
+
+ // Prefetch reads
+ unsigned long long phit = arcstats.prefetch_data_hits + arcstats.prefetch_metadata_hits;
+ unsigned long long pmiss = arcstats.prefetch_data_misses + arcstats.prefetch_metadata_misses;
+ unsigned long long pread = phit + pmiss;
+
+ // Metadata reads
+ unsigned long long mhit = arcstats.prefetch_metadata_hits + arcstats.demand_metadata_hits;
+ unsigned long long mmiss = arcstats.prefetch_metadata_misses + arcstats.demand_metadata_misses;
+ unsigned long long mread = mhit + mmiss;
+
+ // l2 reads
+ unsigned long long l2hit = arcstats.l2_hits + arcstats.l2_misses;
+ unsigned long long l2miss = arcstats.prefetch_metadata_misses + arcstats.demand_metadata_misses;
+ unsigned long long l2read = l2hit + l2miss;
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_arc_size = NULL;
+ static RRDDIM *rd_arc_size = NULL;
+ static RRDDIM *rd_arc_target_size = NULL;
+ static RRDDIM *rd_arc_target_min_size = NULL;
+ static RRDDIM *rd_arc_target_max_size = NULL;
+
+ if (unlikely(!st_arc_size)) {
+ st_arc_size = rrdset_create_localhost(
+ "zfs"
+ , "arc_size"
+ , NULL
+ , ZFS_FAMILY_SIZE
+ , NULL
+ , "ZFS ARC Size"
+ , "MB"
+ , 2000
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_arc_size = rrddim_add(st_arc_size, "size", "arcsz", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_arc_target_size = rrddim_add(st_arc_size, "target", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_arc_target_min_size = rrddim_add(st_arc_size, "min", "min (hard limit)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_arc_target_max_size = rrddim_add(st_arc_size, "max", "max (high water)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_arc_size);
+
+ rrddim_set_by_pointer(st_arc_size, rd_arc_size, arcstats.size);
+ rrddim_set_by_pointer(st_arc_size, rd_arc_target_size, arcstats.c);
+ rrddim_set_by_pointer(st_arc_size, rd_arc_target_min_size, arcstats.c_min);
+ rrddim_set_by_pointer(st_arc_size, rd_arc_target_max_size, arcstats.c_max);
+ rrdset_done(st_arc_size);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(l2exist)) {
+ static RRDSET *st_l2_size = NULL;
+ static RRDDIM *rd_l2_size = NULL;
+ static RRDDIM *rd_l2_asize = NULL;
+
+ if (unlikely(!st_l2_size)) {
+ st_l2_size = rrdset_create_localhost(
+ "zfs"
+ , "l2_size"
+ , NULL
+ , ZFS_FAMILY_SIZE
+ , NULL
+ , "ZFS L2 ARC Size"
+ , "MB"
+ , 2000
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_l2_asize = rrddim_add(st_l2_size, "actual", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rd_l2_size = rrddim_add(st_l2_size, "size", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_l2_size);
+
+ rrddim_set_by_pointer(st_l2_size, rd_l2_size, arcstats.l2_size);
+ rrddim_set_by_pointer(st_l2_size, rd_l2_asize, arcstats.l2_asize);
+ rrdset_done(st_l2_size);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_reads = NULL;
+ static RRDDIM *rd_aread = NULL;
+ static RRDDIM *rd_dread = NULL;
+ static RRDDIM *rd_pread = NULL;
+ static RRDDIM *rd_mread = NULL;
+ static RRDDIM *rd_l2read = NULL;
+
+ if (unlikely(!st_reads)) {
+ st_reads = rrdset_create_localhost(
+ "zfs"
+ , "reads"
+ , NULL
+ , ZFS_FAMILY_ACCESSES
+ , NULL
+ , "ZFS Reads"
+ , "reads/s"
+ , 2010
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_aread = rrddim_add(st_reads, "areads", "arc", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_dread = rrddim_add(st_reads, "dreads", "demand", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_pread = rrddim_add(st_reads, "preads", "prefetch", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mread = rrddim_add(st_reads, "mreads", "metadata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(l2exist)
+ rd_l2read = rrddim_add(st_reads, "l2reads", "l2", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_reads);
+
+ rrddim_set_by_pointer(st_reads, rd_aread, aread);
+ rrddim_set_by_pointer(st_reads, rd_dread, dread);
+ rrddim_set_by_pointer(st_reads, rd_pread, pread);
+ rrddim_set_by_pointer(st_reads, rd_mread, mread);
+
+ if(l2exist)
+ rrddim_set_by_pointer(st_reads, rd_l2read, l2read);
+
+ rrdset_done(st_reads);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(l2exist)) {
+ static RRDSET *st_l2bytes = NULL;
+ static RRDDIM *rd_l2_read_bytes = NULL;
+ static RRDDIM *rd_l2_write_bytes = NULL;
+
+ if (unlikely(!st_l2bytes)) {
+ st_l2bytes = rrdset_create_localhost(
+ "zfs"
+ , "bytes"
+ , NULL
+ , ZFS_FAMILY_ACCESSES
+ , NULL
+ , "ZFS ARC L2 Read/Write Rate"
+ , "kilobytes/s"
+ , 2200
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_l2_read_bytes = rrddim_add(st_l2bytes, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rd_l2_write_bytes = rrddim_add(st_l2bytes, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_l2bytes);
+
+ rrddim_set_by_pointer(st_l2bytes, rd_l2_read_bytes, arcstats.l2_read_bytes);
+ rrddim_set_by_pointer(st_l2bytes, rd_l2_write_bytes, arcstats.l2_write_bytes);
+ rrdset_done(st_l2bytes);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_ahits = NULL;
+ static RRDDIM *rd_ahits = NULL;
+ static RRDDIM *rd_amisses = NULL;
+
+ if (unlikely(!st_ahits)) {
+ st_ahits = rrdset_create_localhost(
+ "zfs"
+ , "hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS ARC Hits"
+ , "percentage"
+ , 2020
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_ahits = rrddim_add(st_ahits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_amisses = rrddim_add(st_ahits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_ahits);
+
+ rrddim_set_by_pointer(st_ahits, rd_ahits, arcstats.hits);
+ rrddim_set_by_pointer(st_ahits, rd_amisses, arcstats.misses);
+ rrdset_done(st_ahits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_dhits = NULL;
+ static RRDDIM *rd_dhits = NULL;
+ static RRDDIM *rd_dmisses = NULL;
+
+ if (unlikely(!st_dhits)) {
+ st_dhits = rrdset_create_localhost(
+ "zfs"
+ , "dhits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Demand Hits"
+ , "percentage"
+ , 2030
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_dhits = rrddim_add(st_dhits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_dmisses = rrddim_add(st_dhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_dhits);
+
+ rrddim_set_by_pointer(st_dhits, rd_dhits, dhit);
+ rrddim_set_by_pointer(st_dhits, rd_dmisses, dmiss);
+ rrdset_done(st_dhits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_phits = NULL;
+ static RRDDIM *rd_phits = NULL;
+ static RRDDIM *rd_pmisses = NULL;
+
+ if (unlikely(!st_phits)) {
+ st_phits = rrdset_create_localhost(
+ "zfs"
+ , "phits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Prefetch Hits"
+ , "percentage"
+ , 2040
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_phits = rrddim_add(st_phits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_pmisses = rrddim_add(st_phits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_phits);
+
+ rrddim_set_by_pointer(st_phits, rd_phits, phit);
+ rrddim_set_by_pointer(st_phits, rd_pmisses, pmiss);
+ rrdset_done(st_phits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_mhits = NULL;
+ static RRDDIM *rd_mhits = NULL;
+ static RRDDIM *rd_mmisses = NULL;
+
+ if (unlikely(!st_mhits)) {
+ st_mhits = rrdset_create_localhost(
+ "zfs"
+ , "mhits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Metadata Hits"
+ , "percentage"
+ , 2050
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_mhits = rrddim_add(st_mhits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_mmisses = rrddim_add(st_mhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_mhits);
+
+ rrddim_set_by_pointer(st_mhits, rd_mhits, mhit);
+ rrddim_set_by_pointer(st_mhits, rd_mmisses, mmiss);
+ rrdset_done(st_mhits);
+ }
+
+ // --------------------------------------------------------------------
+
+ if(likely(l2exist)) {
+ static RRDSET *st_l2hits = NULL;
+ static RRDDIM *rd_l2hits = NULL;
+ static RRDDIM *rd_l2misses = NULL;
+
+ if (unlikely(!st_l2hits)) {
+ st_l2hits = rrdset_create_localhost(
+ "zfs"
+ , "l2hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS L2 Hits"
+ , "percentage"
+ , 2060
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_l2hits = rrddim_add(st_l2hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_l2misses = rrddim_add(st_l2hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_l2hits);
+
+ rrddim_set_by_pointer(st_l2hits, rd_l2hits, l2hit);
+ rrddim_set_by_pointer(st_l2hits, rd_l2misses, l2miss);
+ rrdset_done(st_l2hits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_list_hits = NULL;
+ static RRDDIM *rd_mfu = NULL;
+ static RRDDIM *rd_mru = NULL;
+ static RRDDIM *rd_mfug = NULL;
+ static RRDDIM *rd_mrug = NULL;
+
+ if (unlikely(!st_list_hits)) {
+ st_list_hits = rrdset_create_localhost(
+ "zfs"
+ , "list_hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS List Hits"
+ , "hits/s"
+ , 2100
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ rd_mfu = rrddim_add(st_list_hits, "mfu", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mfug = rrddim_add(st_list_hits, "mfug", "mfu ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mru = rrddim_add(st_list_hits, "mru", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mrug = rrddim_add(st_list_hits, "mrug", "mru ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_list_hits);
+
+ rrddim_set_by_pointer(st_list_hits, rd_mfu, arcstats.mfu_hits);
+ rrddim_set_by_pointer(st_list_hits, rd_mru, arcstats.mru_hits);
+ rrddim_set_by_pointer(st_list_hits, rd_mfug, arcstats.mfu_ghost_hits);
+ rrddim_set_by_pointer(st_list_hits, rd_mrug, arcstats.mru_ghost_hits);
+ rrdset_done(st_list_hits);
+ }
+}
+
+void generate_charts_arc_summary(int update_every) {
+ unsigned long long arc_accesses_total = arcstats.hits + arcstats.misses;
+ unsigned long long real_hits = arcstats.mfu_hits + arcstats.mru_hits;
+ unsigned long long real_misses = arc_accesses_total - real_hits;
+
+ //unsigned long long anon_hits = arcstats.hits - (arcstats.mfu_hits + arcstats.mru_hits + arcstats.mfu_ghost_hits + arcstats.mru_ghost_hits);
+
+ unsigned long long arc_size = arcstats.size;
+ unsigned long long mru_size = arcstats.p;
+ //unsigned long long target_min_size = arcstats.c_min;
+ //unsigned long long target_max_size = arcstats.c_max;
+ unsigned long long target_size = arcstats.c;
+ //unsigned long long target_size_ratio = (target_max_size / target_min_size);
+
+ unsigned long long mfu_size;
+ if(arc_size > target_size)
+ mfu_size = arc_size - mru_size;
+ else
+ mfu_size = target_size - mru_size;
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_arc_size_breakdown = NULL;
+ static RRDDIM *rd_most_recent = NULL;
+ static RRDDIM *rd_most_frequent = NULL;
+
+ if (unlikely(!st_arc_size_breakdown)) {
+ st_arc_size_breakdown = rrdset_create_localhost(
+ "zfs"
+ , "arc_size_breakdown"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS ARC Size Breakdown"
+ , "percentage"
+ , 2020
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_most_recent = rrddim_add(st_arc_size_breakdown, "recent", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL);
+ rd_most_frequent = rrddim_add(st_arc_size_breakdown, "frequent", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL);
+ }
+ else
+ rrdset_next(st_arc_size_breakdown);
+
+ rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_recent, mru_size);
+ rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_frequent, mfu_size);
+ rrdset_done(st_arc_size_breakdown);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_memory = NULL;
+#ifndef __FreeBSD__
+ static RRDDIM *rd_direct = NULL;
+#endif
+ static RRDDIM *rd_throttled = NULL;
+#ifndef __FreeBSD__
+ static RRDDIM *rd_indirect = NULL;
+#endif
+
+ if (unlikely(!st_memory)) {
+ st_memory = rrdset_create_localhost(
+ "zfs"
+ , "memory_ops"
+ , NULL
+ , ZFS_FAMILY_OPERATIONS
+ , NULL
+ , "ZFS Memory Operations"
+ , "operations/s"
+ , 2023
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+#ifndef __FreeBSD__
+ rd_direct = rrddim_add(st_memory, "direct", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#endif
+ rd_throttled = rrddim_add(st_memory, "throttled", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#ifndef __FreeBSD__
+ rd_indirect = rrddim_add(st_memory, "indirect", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+#endif
+ }
+ else
+ rrdset_next(st_memory);
+
+#ifndef __FreeBSD__
+ rrddim_set_by_pointer(st_memory, rd_direct, arcstats.memory_direct_count);
+#endif
+ rrddim_set_by_pointer(st_memory, rd_throttled, arcstats.memory_throttle_count);
+#ifndef __FreeBSD__
+ rrddim_set_by_pointer(st_memory, rd_indirect, arcstats.memory_indirect_count);
+#endif
+ rrdset_done(st_memory);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_important_ops = NULL;
+ static RRDDIM *rd_deleted = NULL;
+ static RRDDIM *rd_mutex_misses = NULL;
+ static RRDDIM *rd_evict_skips = NULL;
+ static RRDDIM *rd_hash_collisions = NULL;
+
+ if (unlikely(!st_important_ops)) {
+ st_important_ops = rrdset_create_localhost(
+ "zfs"
+ , "important_ops"
+ , NULL
+ , ZFS_FAMILY_OPERATIONS
+ , NULL
+ , "ZFS Important Operations"
+ , "operations/s"
+ , 2022
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_evict_skips = rrddim_add(st_important_ops, "eskip", "evict skip", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_deleted = rrddim_add(st_important_ops, "deleted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mutex_misses = rrddim_add(st_important_ops, "mtxmis", "mutex miss", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_hash_collisions = rrddim_add(st_important_ops, "hash_collisions", "hash collisions", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st_important_ops);
+
+ rrddim_set_by_pointer(st_important_ops, rd_deleted, arcstats.deleted);
+ rrddim_set_by_pointer(st_important_ops, rd_evict_skips, arcstats.evict_skip);
+ rrddim_set_by_pointer(st_important_ops, rd_mutex_misses, arcstats.mutex_miss);
+ rrddim_set_by_pointer(st_important_ops, rd_hash_collisions, arcstats.hash_collisions);
+ rrdset_done(st_important_ops);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_actual_hits = NULL;
+ static RRDDIM *rd_actual_hits = NULL;
+ static RRDDIM *rd_actual_misses = NULL;
+
+ if (unlikely(!st_actual_hits)) {
+ st_actual_hits = rrdset_create_localhost(
+ "zfs"
+ , "actual_hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Actual Cache Hits"
+ , "percentage"
+ , 2019
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_actual_hits = rrddim_add(st_actual_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_actual_misses = rrddim_add(st_actual_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_actual_hits);
+
+ rrddim_set_by_pointer(st_actual_hits, rd_actual_hits, real_hits);
+ rrddim_set_by_pointer(st_actual_hits, rd_actual_misses, real_misses);
+ rrdset_done(st_actual_hits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_demand_data_hits = NULL;
+ static RRDDIM *rd_demand_data_hits = NULL;
+ static RRDDIM *rd_demand_data_misses = NULL;
+
+ if (unlikely(!st_demand_data_hits)) {
+ st_demand_data_hits = rrdset_create_localhost(
+ "zfs"
+ , "demand_data_hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Data Demand Efficiency"
+ , "percentage"
+ , 2031
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_demand_data_hits = rrddim_add(st_demand_data_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_demand_data_misses = rrddim_add(st_demand_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_demand_data_hits);
+
+ rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_hits, arcstats.demand_data_hits);
+ rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_misses, arcstats.demand_data_misses);
+ rrdset_done(st_demand_data_hits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_prefetch_data_hits = NULL;
+ static RRDDIM *rd_prefetch_data_hits = NULL;
+ static RRDDIM *rd_prefetch_data_misses = NULL;
+
+ if (unlikely(!st_prefetch_data_hits)) {
+ st_prefetch_data_hits = rrdset_create_localhost(
+ "zfs"
+ , "prefetch_data_hits"
+ , NULL
+ , ZFS_FAMILY_EFFICIENCY
+ , NULL
+ , "ZFS Data Prefetch Efficiency"
+ , "percentage"
+ , 2032
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_prefetch_data_hits = rrddim_add(st_prefetch_data_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rd_prefetch_data_misses = rrddim_add(st_prefetch_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ }
+ else
+ rrdset_next(st_prefetch_data_hits);
+
+ rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_hits, arcstats.prefetch_data_hits);
+ rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_misses, arcstats.prefetch_data_misses);
+ rrdset_done(st_prefetch_data_hits);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_hash_elements = NULL;
+ static RRDDIM *rd_hash_elements_current = NULL;
+ static RRDDIM *rd_hash_elements_max = NULL;
+
+ if (unlikely(!st_hash_elements)) {
+ st_hash_elements = rrdset_create_localhost(
+ "zfs"
+ , "hash_elements"
+ , NULL
+ , ZFS_FAMILY_HASH
+ , NULL
+ , "ZFS ARC Hash Elements"
+ , "elements"
+ , 2300
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_hash_elements_current = rrddim_add(st_hash_elements, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_hash_elements_max = rrddim_add(st_hash_elements, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_hash_elements);
+
+ rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_current, arcstats.hash_elements);
+ rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_max, arcstats.hash_elements_max);
+ rrdset_done(st_hash_elements);
+ }
+
+ // --------------------------------------------------------------------
+
+ {
+ static RRDSET *st_hash_chains = NULL;
+ static RRDDIM *rd_hash_chains_current = NULL;
+ static RRDDIM *rd_hash_chains_max = NULL;
+
+ if (unlikely(!st_hash_chains)) {
+ st_hash_chains = rrdset_create_localhost(
+ "zfs"
+ , "hash_chains"
+ , NULL
+ , ZFS_FAMILY_HASH
+ , NULL
+ , "ZFS ARC Hash Chains"
+ , "chains"
+ , 2310
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_hash_chains_current = rrddim_add(st_hash_chains, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_hash_chains_max = rrddim_add(st_hash_chains, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else
+ rrdset_next(st_hash_chains);
+
+ rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_current, arcstats.hash_chains);
+ rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_max, arcstats.hash_chain_max);
+ rrdset_done(st_hash_chains);
+ }
+
+ // --------------------------------------------------------------------
+
+} \ No newline at end of file
diff --git a/src/zfs_common.h b/src/zfs_common.h
new file mode 100644
index 00000000..9d3aa7df
--- /dev/null
+++ b/src/zfs_common.h
@@ -0,0 +1,109 @@
+#ifndef NETDATA_ZFS_COMMON_H
+#define NETDATA_ZFS_COMMON_H
+
+#define ZFS_FAMILY_SIZE "size"
+#define ZFS_FAMILY_EFFICIENCY "efficiency"
+#define ZFS_FAMILY_ACCESSES "accesses"
+#define ZFS_FAMILY_OPERATIONS "operations"
+#define ZFS_FAMILY_HASH "hashes"
+
+struct arcstats {
+ unsigned long long hits;
+ unsigned long long misses;
+ unsigned long long demand_data_hits;
+ unsigned long long demand_data_misses;
+ unsigned long long demand_metadata_hits;
+ unsigned long long demand_metadata_misses;
+ unsigned long long prefetch_data_hits;
+ unsigned long long prefetch_data_misses;
+ unsigned long long prefetch_metadata_hits;
+ unsigned long long prefetch_metadata_misses;
+ unsigned long long mru_hits;
+ unsigned long long mru_ghost_hits;
+ unsigned long long mfu_hits;
+ unsigned long long mfu_ghost_hits;
+ unsigned long long deleted;
+ unsigned long long mutex_miss;
+ unsigned long long evict_skip;
+ unsigned long long evict_not_enough;
+ unsigned long long evict_l2_cached;
+ unsigned long long evict_l2_eligible;
+ unsigned long long evict_l2_ineligible;
+ unsigned long long evict_l2_skip;
+ unsigned long long hash_elements;
+ unsigned long long hash_elements_max;
+ unsigned long long hash_collisions;
+ unsigned long long hash_chains;
+ unsigned long long hash_chain_max;
+ unsigned long long p;
+ unsigned long long c;
+ unsigned long long c_min;
+ unsigned long long c_max;
+ unsigned long long size;
+ unsigned long long hdr_size;
+ unsigned long long data_size;
+ unsigned long long metadata_size;
+ unsigned long long other_size;
+ unsigned long long anon_size;
+ unsigned long long anon_evictable_data;
+ unsigned long long anon_evictable_metadata;
+ unsigned long long mru_size;
+ unsigned long long mru_evictable_data;
+ unsigned long long mru_evictable_metadata;
+ unsigned long long mru_ghost_size;
+ unsigned long long mru_ghost_evictable_data;
+ unsigned long long mru_ghost_evictable_metadata;
+ unsigned long long mfu_size;
+ unsigned long long mfu_evictable_data;
+ unsigned long long mfu_evictable_metadata;
+ unsigned long long mfu_ghost_size;
+ unsigned long long mfu_ghost_evictable_data;
+ unsigned long long mfu_ghost_evictable_metadata;
+ unsigned long long l2_hits;
+ unsigned long long l2_misses;
+ unsigned long long l2_feeds;
+ unsigned long long l2_rw_clash;
+ unsigned long long l2_read_bytes;
+ unsigned long long l2_write_bytes;
+ unsigned long long l2_writes_sent;
+ unsigned long long l2_writes_done;
+ unsigned long long l2_writes_error;
+ unsigned long long l2_writes_lock_retry;
+ unsigned long long l2_evict_lock_retry;
+ unsigned long long l2_evict_reading;
+ unsigned long long l2_evict_l1cached;
+ unsigned long long l2_free_on_write;
+ unsigned long long l2_cdata_free_on_write;
+ unsigned long long l2_abort_lowmem;
+ unsigned long long l2_cksum_bad;
+ unsigned long long l2_io_error;
+ unsigned long long l2_size;
+ unsigned long long l2_asize;
+ unsigned long long l2_hdr_size;
+ unsigned long long l2_compress_successes;
+ unsigned long long l2_compress_zeros;
+ unsigned long long l2_compress_failures;
+ unsigned long long memory_throttle_count;
+ unsigned long long duplicate_buffers;
+ unsigned long long duplicate_buffers_size;
+ unsigned long long duplicate_reads;
+ unsigned long long memory_direct_count;
+ unsigned long long memory_indirect_count;
+ unsigned long long arc_no_grow;
+ unsigned long long arc_tempreserve;
+ unsigned long long arc_loaned_bytes;
+ unsigned long long arc_prune;
+ unsigned long long arc_meta_used;
+ unsigned long long arc_meta_limit;
+ unsigned long long arc_meta_max;
+ unsigned long long arc_meta_min;
+ unsigned long long arc_need_free;
+ unsigned long long arc_sys_free;
+};
+
+int l2exist;
+
+void generate_charts_arcstats(int update_every);
+void generate_charts_arc_summary(int update_every);
+
+#endif //NETDATA_ZFS_COMMON_H
diff --git a/system/Makefile.in b/system/Makefile.in
index aa0a60e0..ff19c946 100644
--- a/system/Makefile.in
+++ b/system/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11.3 from Makefile.am.
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
-# Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -16,6 +15,51 @@
@SET_MAKE@
VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -34,8 +78,8 @@ PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
-DIST_COMMON = $(dist_noinst_DATA) $(srcdir)/Makefile.am \
- $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
+DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
+ $(srcdir)/Makefile.am $(dist_noinst_DATA)
subdir = system
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
@@ -52,12 +96,31 @@ mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
SOURCES =
DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
DATA = $(dist_noinst_DATA) $(nodist_noinst_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
@@ -265,11 +328,11 @@ $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
-tags: TAGS
-TAGS:
+tags TAGS:
+
+ctags CTAGS:
-ctags: CTAGS
-CTAGS:
+cscope cscopelist:
distdir: $(DISTFILES)
@@ -406,15 +469,16 @@ uninstall-am:
.MAKE: install-am install-strip
-.PHONY: all all-am check check-am clean clean-generic distclean \
- distclean-generic distdir dvi dvi-am html html-am info info-am \
- install install-am install-data install-data-am install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am uninstall uninstall-am
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
+ pdf-am ps ps-am tags-am uninstall uninstall-am
.in:
if sed \
diff --git a/web/Makefile.am b/web/Makefile.am
index 03a48759..b587f5a1 100644
--- a/web/Makefile.am
+++ b/web/Makefile.am
@@ -17,6 +17,7 @@ dist_web_DATA = \
favicon.ico \
goto-host-from-alarm.html \
index.html \
+ infographic.html \
netdata-swagger.yaml \
netdata-swagger.json \
robots.txt \
diff --git a/web/Makefile.in b/web/Makefile.in
index 9ec69e6a..42939d2a 100644
--- a/web/Makefile.in
+++ b/web/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11.3 from Makefile.am.
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
-# Foundation, Inc.
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -16,6 +15,51 @@
@SET_MAKE@
VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -35,11 +79,11 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
subdir = web
-DIST_COMMON = $(dist_web_DATA) $(dist_webcss_DATA) $(dist_webdnt_DATA) \
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(dist_web_DATA) $(dist_webcss_DATA) $(dist_webdnt_DATA) \
$(dist_webfonts_DATA) $(dist_webimages_DATA) \
$(dist_weblib_DATA) $(dist_webold_DATA) \
- $(dist_webwellknown_DATA) $(srcdir)/Makefile.am \
- $(srcdir)/Makefile.in
+ $(dist_webwellknown_DATA)
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
$(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
@@ -55,8 +99,25 @@ mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
SOURCES =
DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
@@ -92,9 +153,11 @@ DATA = $(dist_web_DATA) $(dist_webcss_DATA) $(dist_webdnt_DATA) \
$(dist_webfonts_DATA) $(dist_webimages_DATA) \
$(dist_weblib_DATA) $(dist_webold_DATA) \
$(dist_webwellknown_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
@@ -255,6 +318,7 @@ dist_web_DATA = \
favicon.ico \
goto-host-from-alarm.html \
index.html \
+ infographic.html \
netdata-swagger.yaml \
netdata-swagger.json \
robots.txt \
@@ -388,8 +452,11 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
$(am__aclocal_m4_deps):
install-dist_webDATA: $(dist_web_DATA)
@$(NORMAL_INSTALL)
- test -z "$(webdir)" || $(MKDIR_P) "$(DESTDIR)$(webdir)"
@list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -406,8 +473,11 @@ uninstall-dist_webDATA:
dir='$(DESTDIR)$(webdir)'; $(am__uninstall_files_from_dir)
install-dist_webcssDATA: $(dist_webcss_DATA)
@$(NORMAL_INSTALL)
- test -z "$(webcssdir)" || $(MKDIR_P) "$(DESTDIR)$(webcssdir)"
@list='$(dist_webcss_DATA)'; test -n "$(webcssdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webcssdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webcssdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -424,8 +494,11 @@ uninstall-dist_webcssDATA:
dir='$(DESTDIR)$(webcssdir)'; $(am__uninstall_files_from_dir)
install-dist_webdntDATA: $(dist_webdnt_DATA)
@$(NORMAL_INSTALL)
- test -z "$(webdntdir)" || $(MKDIR_P) "$(DESTDIR)$(webdntdir)"
@list='$(dist_webdnt_DATA)'; test -n "$(webdntdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webdntdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webdntdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -442,8 +515,11 @@ uninstall-dist_webdntDATA:
dir='$(DESTDIR)$(webdntdir)'; $(am__uninstall_files_from_dir)
install-dist_webfontsDATA: $(dist_webfonts_DATA)
@$(NORMAL_INSTALL)
- test -z "$(webfontsdir)" || $(MKDIR_P) "$(DESTDIR)$(webfontsdir)"
@list='$(dist_webfonts_DATA)'; test -n "$(webfontsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webfontsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webfontsdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -460,8 +536,11 @@ uninstall-dist_webfontsDATA:
dir='$(DESTDIR)$(webfontsdir)'; $(am__uninstall_files_from_dir)
install-dist_webimagesDATA: $(dist_webimages_DATA)
@$(NORMAL_INSTALL)
- test -z "$(webimagesdir)" || $(MKDIR_P) "$(DESTDIR)$(webimagesdir)"
@list='$(dist_webimages_DATA)'; test -n "$(webimagesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webimagesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webimagesdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -478,8 +557,11 @@ uninstall-dist_webimagesDATA:
dir='$(DESTDIR)$(webimagesdir)'; $(am__uninstall_files_from_dir)
install-dist_weblibDATA: $(dist_weblib_DATA)
@$(NORMAL_INSTALL)
- test -z "$(weblibdir)" || $(MKDIR_P) "$(DESTDIR)$(weblibdir)"
@list='$(dist_weblib_DATA)'; test -n "$(weblibdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(weblibdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(weblibdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -496,8 +578,11 @@ uninstall-dist_weblibDATA:
dir='$(DESTDIR)$(weblibdir)'; $(am__uninstall_files_from_dir)
install-dist_weboldDATA: $(dist_webold_DATA)
@$(NORMAL_INSTALL)
- test -z "$(webolddir)" || $(MKDIR_P) "$(DESTDIR)$(webolddir)"
@list='$(dist_webold_DATA)'; test -n "$(webolddir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webolddir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webolddir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -514,8 +599,11 @@ uninstall-dist_weboldDATA:
dir='$(DESTDIR)$(webolddir)'; $(am__uninstall_files_from_dir)
install-dist_webwellknownDATA: $(dist_webwellknown_DATA)
@$(NORMAL_INSTALL)
- test -z "$(webwellknowndir)" || $(MKDIR_P) "$(DESTDIR)$(webwellknowndir)"
@list='$(dist_webwellknown_DATA)'; test -n "$(webwellknowndir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(webwellknowndir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(webwellknowndir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -530,11 +618,11 @@ uninstall-dist_webwellknownDATA:
@list='$(dist_webwellknown_DATA)'; test -n "$(webwellknowndir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(webwellknowndir)'; $(am__uninstall_files_from_dir)
-tags: TAGS
-TAGS:
+tags TAGS:
+
+ctags CTAGS:
-ctags: CTAGS
-CTAGS:
+cscope cscopelist:
distdir: $(DISTFILES)
@@ -679,10 +767,10 @@ uninstall-am: uninstall-dist_webDATA uninstall-dist_webcssDATA \
.MAKE: install-am install-strip
-.PHONY: all all-am check check-am clean clean-generic distclean \
- distclean-generic distdir dvi dvi-am html html-am info info-am \
- install install-am install-data install-data-am \
- install-dist_webDATA install-dist_webcssDATA \
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_webDATA install-dist_webcssDATA \
install-dist_webdntDATA install-dist_webfontsDATA \
install-dist_webimagesDATA install-dist_weblibDATA \
install-dist_weboldDATA install-dist_webwellknownDATA \
@@ -691,11 +779,12 @@ uninstall-am: uninstall-dist_webDATA uninstall-dist_webcssDATA \
install-man install-pdf install-pdf-am install-ps \
install-ps-am install-strip installcheck installcheck-am \
installdirs maintainer-clean maintainer-clean-generic \
- mostlyclean mostlyclean-generic pdf pdf-am ps ps-am uninstall \
- uninstall-am uninstall-dist_webDATA uninstall-dist_webcssDATA \
- uninstall-dist_webdntDATA uninstall-dist_webfontsDATA \
- uninstall-dist_webimagesDATA uninstall-dist_weblibDATA \
- uninstall-dist_weboldDATA uninstall-dist_webwellknownDATA
+ mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
+ uninstall uninstall-am uninstall-dist_webDATA \
+ uninstall-dist_webcssDATA uninstall-dist_webdntDATA \
+ uninstall-dist_webfontsDATA uninstall-dist_webimagesDATA \
+ uninstall-dist_weblibDATA uninstall-dist_weboldDATA \
+ uninstall-dist_webwellknownDATA
version.txt:
diff --git a/web/dashboard.css b/web/dashboard.css
index 8eeaa8be..2147c603 100644
--- a/web/dashboard.css
+++ b/web/dashboard.css
@@ -49,6 +49,38 @@ body {
/* width and height is given per chart with data-width and data-height */
}
+.netdata-container-gauge {
+ display: inline-block;
+ overflow: hidden;
+
+ /* required for child elements to have absolute position */
+ position: relative;
+
+ /* width and height is given per chart with data-width and data-height */
+}
+
+.netdata-container-gauge:after {
+ padding-top: 60%;
+ display: block;
+ content: '';
+}
+
+.netdata-container-easypiechart {
+ display: inline-block;
+ overflow: hidden;
+
+ /* required for child elements to have absolute position */
+ position: relative;
+
+ /* width and height is given per chart with data-width and data-height */
+}
+
+.netdata-container-easypiechart:after {
+ padding-top: 100%;
+ display: block;
+ content: '';
+}
+
.netdata-aspect {
position: relative;
width: 100%;
@@ -128,12 +160,15 @@ body {
.netdata-message {
display: inline-block;
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
text-align: left;
vertical-align: top;
font-weight: bold;
font-size: x-small;
- width: 100%;
- height: 100%;
overflow: hidden;
background: inherit;
z-index: 0;
@@ -399,7 +434,7 @@ body {
margin-left: 18%;
text-align: center;
color: #999999;
- font-weight: normal;
+ font-weight: bold;
}
.easyPieChartUnits {
@@ -423,6 +458,8 @@ body {
position: absolute;
top: 0;
left: 0;
+ bottom: 0;
+ right: 0;
z-index: 0;
}
@@ -471,7 +508,7 @@ body {
position: absolute;
float: left;
left: 0;
- bottom: 10%;
+ bottom: 8%;
width: 92%;
margin-left: 8%;
text-align: left;
@@ -484,7 +521,7 @@ body {
position: absolute;
float: left;
left: 0;
- bottom: 10%;
+ bottom: 8%;
width: 95%;
margin-right: 5%;
text-align: right;
diff --git a/web/dashboard.html b/web/dashboard.html
index 75de65ad..4453c996 100644
--- a/web/dashboard.html
+++ b/web/dashboard.html
@@ -652,4 +652,4 @@ So, to avoid flashing the charts, we destroy and re-create the charts on each up
<!-- <script> netdataServer = "http://box:19999"; </script> -->
<!-- load the dashboard manager - it will do the rest -->
-<script type="text/javascript" src="dashboard.js?v20170105-7"></script>
+<script type="text/javascript" src="dashboard.js?v20170715-1"></script>
diff --git a/web/dashboard.js b/web/dashboard.js
index b34accde..1f240a4c 100644
--- a/web/dashboard.js
+++ b/web/dashboard.js
@@ -144,7 +144,7 @@ var NETDATA = window.NETDATA || {};
NETDATA.themes = {
white: {
bootstrap_css: NETDATA.serverDefault + 'css/bootstrap-3.3.7.css',
- dashboard_css: NETDATA.serverDefault + 'dashboard.css?v20161229-2',
+ dashboard_css: NETDATA.serverDefault + 'dashboard.css?v20170605-2',
background: '#FFFFFF',
foreground: '#000000',
grid: '#F0F0F0',
@@ -161,7 +161,7 @@ var NETDATA = window.NETDATA || {};
},
slate: {
bootstrap_css: NETDATA.serverDefault + 'css/bootstrap-slate-flat-3.3.7.css?v20161229-1',
- dashboard_css: NETDATA.serverDefault + 'dashboard.slate.css?v20161229-2',
+ dashboard_css: NETDATA.serverDefault + 'dashboard.slate.css?v20170605-2',
background: '#272b30',
foreground: '#C8C8C8',
grid: '#283236',
@@ -287,7 +287,7 @@ var NETDATA = window.NETDATA || {};
// rendering the chart that is panned or zoomed).
// Used with .current.global_pan_sync_time
- last_resized: Date.now(), // the timestamp of the last resize request
+ last_page_resize: Date.now(), // the timestamp of the last resize request
last_page_scroll: 0, // the timestamp the last time the page was scrolled
@@ -571,7 +571,7 @@ var NETDATA = window.NETDATA || {};
NETDATA.onresizeCallback = null;
NETDATA.onresize = function() {
- NETDATA.options.last_resized = Date.now();
+ NETDATA.options.last_page_resize = Date.now();
NETDATA.onscroll();
if(typeof NETDATA.onresizeCallback === 'function')
@@ -602,18 +602,18 @@ var NETDATA = window.NETDATA || {};
// we have to cancel pending requests too
while (len--) {
- if (targets[len]._updating === true) {
+ if (targets[len].fetching_data === true) {
if (typeof targets[len].xhr !== 'undefined') {
targets[len].xhr.abort();
targets[len].running = false;
- targets[len]._updating = false;
+ targets[len].fetching_data = false;
}
targets[len].isVisible();
}
}
}
else {
- // just find which chart is visible
+ // just find which charts are visible
while (len--)
targets[len].isVisible();
@@ -644,9 +644,14 @@ var NETDATA = window.NETDATA || {};
NETDATA.onscroll_updater_running = false;
};
+ NETDATA.scrollUp = false;
+ NETDATA.scrollY = window.scrollY;
NETDATA.onscroll = function() {
// console.log('onscroll');
+ NETDATA.scrollUp = (window.scrollY > NETDATA.scrollY);
+ NETDATA.scrollY = window.scrollY;
+
NETDATA.options.last_page_scroll = Date.now();
NETDATA.options.auto_refresher_stop_until = 0;
@@ -734,6 +739,254 @@ var NETDATA = window.NETDATA || {};
};
// ----------------------------------------------------------------------------------------------------------------
+ // fast numbers formatting
+
+ NETDATA.fastNumberFormat = {
+ formatters_fixed: [],
+ formatters_zero_based: [],
+
+ // this is the fastest and the preferred
+ getIntlNumberFormat: function(min, max) {
+ var key = max;
+ if(min === max) {
+ if(typeof this.formatters_fixed[key] === 'undefined')
+ this.formatters_fixed[key] = new Intl.NumberFormat(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+
+ return this.formatters_fixed[key];
+ }
+ else if(min === 0) {
+ if(typeof this.formatters_zero_based[key] === 'undefined')
+ this.formatters_zero_based[key] = new Intl.NumberFormat(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+
+ return this.formatters_zero_based[key];
+ }
+ else {
+ // this is never used
+ // it is added just for completeness
+ return new Intl.NumberFormat(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+ },
+
+ // this respects locale
+ getLocaleString: function(min, max) {
+ var key = max;
+ if(min === max) {
+ if(typeof this.formatters_fixed[key] === 'undefined')
+ this.formatters_fixed[key] = {
+ format: function (value) {
+ return value.toLocaleString(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+ };
+
+ return this.formatters_fixed[key];
+ }
+ else if(min === 0) {
+ if(typeof this.formatters_zero_based[key] === 'undefined')
+ this.formatters_zero_based[key] = {
+ format: function (value) {
+ return value.toLocaleString(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+ };
+
+ return this.formatters_zero_based[key];
+ }
+ else {
+ return {
+ format: function (value) {
+ return value.toLocaleString(undefined, {
+ // style: 'decimal',
+ // minimumIntegerDigits: 1,
+ // minimumSignificantDigits: 1,
+ // maximumSignificantDigits: 1,
+ useGrouping: true,
+ minimumFractionDigits: min,
+ maximumFractionDigits: max
+ });
+ }
+ };
+ }
+ },
+
+ getFixed: function(min, max) {
+ var key = max;
+ if(min === max) {
+ if(typeof this.formatters_fixed[key] === 'undefined')
+ this.formatters_fixed[key] = {
+ format: function (value) {
+ if(value === 0) return "0";
+ return value.toFixed(max);
+ }
+ };
+
+ return this.formatters_fixed[key];
+ }
+ else if(min === 0) {
+ if(typeof this.formatters_zero_based[key] === 'undefined')
+ this.formatters_zero_based[key] = {
+ format: function (value) {
+ if(value === 0) return "0";
+ return value.toFixed(max);
+ }
+ };
+
+ return this.formatters_zero_based[key];
+ }
+ else {
+ return {
+ format: function (value) {
+ if(value === 0) return "0";
+ return value.toFixed(max);
+ }
+ };
+ }
+ },
+
+ testIntlNumberFormat: function() {
+ var n = 1.12345;
+ var e1 = "1.12", e2 = "1,12";
+ var s = "";
+
+ try {
+ var x = new Intl.NumberFormat(undefined, {
+ useGrouping: true,
+ minimumFractionDigits: 2,
+ maximumFractionDigits: 2
+ });
+
+ s = x.format(n);
+ }
+ catch(e) {
+ s = "";
+ }
+
+ // console.log('NumberFormat: ', s);
+ return (s === e1 || s === e2);
+ },
+
+ testLocaleString: function() {
+ var n = 1.12345;
+ var e1 = "1.12", e2 = "1,12";
+ var s = "";
+
+ try {
+ s = value.toLocaleString(undefined, {
+ useGrouping: true,
+ minimumFractionDigits: 2,
+ maximumFractionDigits: 2
+ });
+ }
+ catch(e) {
+ s = "";
+ }
+
+ // console.log('localeString: ', s);
+ return (s === e1 || s === e2);
+ },
+
+ // on first run we decide which formatter to use
+ get: function(min, max) {
+ if(this.testIntlNumberFormat()) {
+ // console.log('numberformat');
+ this.get = this.getIntlNumberFormat;
+ }
+ else if(this.testLocaleString()) {
+ // console.log('localestring');
+ this.get = this.getLocaleString;
+ }
+ else {
+ // console.log('fixed');
+ this.get = this.getFixed;
+ }
+ return this.get(min, max);
+ }
+ };
+
+ // ----------------------------------------------------------------------------------------------------------------
+ // element data attributes
+
+ NETDATA.dataAttribute = function(element, attribute, def) {
+ var key = 'data-' + attribute.toString();
+ if(element.hasAttribute(key) === true) {
+ var data = element.getAttribute(key);
+
+ if(data === 'true') return true;
+ if(data === 'false') return false;
+ if(data === 'null') return null;
+
+ // Only convert to a number if it doesn't change the string
+ if(data === +data + '') return +data;
+
+ if(/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/.test(data))
+ return JSON.parse(data);
+
+ return data;
+ }
+ else return def;
+ };
+
+ NETDATA.dataAttributeBoolean = function(element, attribute, def) {
+ var value = NETDATA.dataAttribute(element, attribute, def);
+
+ if(value === true || value === false)
+ return value;
+
+ if(typeof(value) === 'string') {
+ if(value === 'yes' || value === 'on')
+ return true;
+
+ if(value === '' || value === 'no' || value === 'off' || value === 'null')
+ return false;
+
+ return def;
+ }
+
+ if(typeof(value) === 'number')
+ return value !== 0;
+
+ return def;
+ };
+
+ // ----------------------------------------------------------------------------------------------------------------
// commonMin & commonMax
NETDATA.commonMin = {
@@ -741,14 +994,13 @@ var NETDATA = window.NETDATA || {};
latest: {},
get: function(state) {
- if(typeof state.__commonMin === 'undefined') {
+ if(typeof state.tmp.__commonMin === 'undefined') {
// get the commonMin setting
- var self = $(state.element);
- state.__commonMin = self.data('common-min') || null;
+ state.tmp.__commonMin = NETDATA.dataAttribute(state.element, 'common-min', null);
}
var min = state.data.min;
- var name = state.__commonMin;
+ var name = state.tmp.__commonMin;
if(name === null) {
// we don't need commonMin
@@ -766,11 +1018,11 @@ var NETDATA = window.NETDATA || {};
var uuid = state.uuid;
if(typeof t[uuid] !== 'undefined') {
if(t[uuid] === min) {
- //state.log('commonMin ' + state.__commonMin + ' not changed: ' + this.latest[name]);
+ //state.log('commonMin ' + state.tmp.__commonMin + ' not changed: ' + this.latest[name]);
return this.latest[name];
}
else if(min < this.latest[name]) {
- //state.log('commonMin ' + state.__commonMin + ' increased: ' + min);
+ //state.log('commonMin ' + state.tmp.__commonMin + ' increased: ' + min);
t[uuid] = min;
this.latest[name] = min;
return min;
@@ -785,7 +1037,7 @@ var NETDATA = window.NETDATA || {};
for(var i in t)
if(t.hasOwnProperty(i) && t[i] < m) m = t[i];
- //state.log('commonMin ' + state.__commonMin + ' updated: ' + m);
+ //state.log('commonMin ' + state.tmp.__commonMin + ' updated: ' + m);
this.latest[name] = m;
return m;
}
@@ -796,14 +1048,13 @@ var NETDATA = window.NETDATA || {};
latest: {},
get: function(state) {
- if(typeof state.__commonMax === 'undefined') {
+ if(typeof state.tmp.__commonMax === 'undefined') {
// get the commonMax setting
- var self = $(state.element);
- state.__commonMax = self.data('common-max') || null;
+ state.tmp.__commonMax = NETDATA.dataAttribute(state.element, 'common-max', null);
}
var max = state.data.max;
- var name = state.__commonMax;
+ var name = state.tmp.__commonMax;
if(name === null) {
// we don't need commonMax
@@ -821,11 +1072,11 @@ var NETDATA = window.NETDATA || {};
var uuid = state.uuid;
if(typeof t[uuid] !== 'undefined') {
if(t[uuid] === max) {
- //state.log('commonMax ' + state.__commonMax + ' not changed: ' + this.latest[name]);
+ //state.log('commonMax ' + state.tmp.__commonMax + ' not changed: ' + this.latest[name]);
return this.latest[name];
}
else if(max > this.latest[name]) {
- //state.log('commonMax ' + state.__commonMax + ' increased: ' + max);
+ //state.log('commonMax ' + state.tmp.__commonMax + ' increased: ' + max);
t[uuid] = max;
this.latest[name] = max;
return max;
@@ -840,7 +1091,7 @@ var NETDATA = window.NETDATA || {};
for(var i in t)
if(t.hasOwnProperty(i) && t[i] > m) m = t[i];
- //state.log('commonMax ' + state.__commonMax + ' updated: ' + m);
+ //state.log('commonMax ' + state.tmp.__commonMax + ' updated: ' + m);
this.latest[name] = m;
return m;
}
@@ -1237,13 +1488,15 @@ var NETDATA = window.NETDATA || {};
// Our state object, where all per-chart values are stored
var chartState = function(element) {
- var self = $(element);
this.element = element;
// IMPORTANT:
// all private functions should use 'that', instead of 'this'
var that = this;
+ // ============================================================================================================
+ // ERROR HANDLING
+
/* error() - private
* show an error instead of the chart
*/
@@ -1261,18 +1514,34 @@ var NETDATA = window.NETDATA || {};
}
};
+ // console logging
+ this.log = function(msg) {
+ console.log(this.id + ' (' + this.library_name + ' ' + this.uuid + '): ' + msg);
+ };
+
+
+ // ============================================================================================================
+ // EARLY INITIALIZATION
+
+ // These are variables that should exist even if the chart is never to be rendered.
+ // Be careful what you add here - there may be thousands of charts on the page.
+
// GUID - a unique identifier for the chart
this.uuid = NETDATA.guid();
// string - the name of chart
- this.id = self.data('netdata');
+ this.id = NETDATA.dataAttribute(this.element, 'netdata', undefined);
+ if(typeof this.id === 'undefined') {
+ error("netdata elements need data-netdata");
+ return;
+ }
// string - the key for localStorage settings
- this.settings_id = self.data('id') || null;
+ this.settings_id = NETDATA.dataAttribute(this.element, 'id', null);
// the user given dimensions of the element
- this.width = self.data('width') || NETDATA.chartDefaults.width;
- this.height = self.data('height') || NETDATA.chartDefaults.height;
+ this.width = NETDATA.dataAttribute(this.element, 'width', NETDATA.chartDefaults.width);
+ this.height = NETDATA.dataAttribute(this.element, 'height', NETDATA.chartDefaults.height);
this.height_original = this.height;
if(this.settings_id !== null) {
@@ -1285,86 +1554,24 @@ var NETDATA = window.NETDATA || {};
});
}
- // string - the netdata server URL, without any path
- this.host = self.data('host') || NETDATA.chartDefaults.host;
-
- // make sure the host does not end with /
- // all netdata API requests use absolute paths
- while(this.host.slice(-1) === '/')
- this.host = this.host.substring(0, this.host.length - 1);
-
- // string - the grouping method requested by the user
- this.method = self.data('method') || NETDATA.chartDefaults.method;
-
- // the time-range requested by the user
- this.after = self.data('after') || NETDATA.chartDefaults.after;
- this.before = self.data('before') || NETDATA.chartDefaults.before;
-
- // the pixels per point requested by the user
- this.pixels_per_point = self.data('pixels-per-point') || 1;
- this.points = self.data('points') || null;
-
- // the dimensions requested by the user
- this.dimensions = self.data('dimensions') || null;
-
// the chart library requested by the user
- this.library_name = self.data('chart-library') || NETDATA.chartDefaults.library;
-
- // how many retries we have made to load chart data from the server
- this.retries_on_data_failures = 0;
-
- // object - the chart library used
- this.library = null;
-
- // color management
- this.colors = null;
- this.colors_assigned = {};
- this.colors_available = null;
-
- // the element already created by the user
- this.element_message = null;
-
- // the element with the chart
- this.element_chart = null;
-
- // the element with the legend of the chart (if created by us)
- this.element_legend = null;
- this.element_legend_childs = {
- hidden: null,
- title_date: null,
- title_time: null,
- title_units: null,
- perfect_scroller: null, // the container to apply perfect scroller to
- series: null
- };
-
- this.chart_url = null; // string - the url to download chart info
- this.chart = null; // object - the chart as downloaded from the server
-
- this.title = self.data('title') || null; // the title of the chart
- this.units = self.data('units') || null; // the units of the chart dimensions
- this.append_options = self.data('append-options') || null; // additional options to pass to netdata
- this.override_options = self.data('override-options') || null; // override options to pass to netdata
-
- this.running = false; // boolean - true when the chart is being refreshed now
- this.enabled = true; // boolean - is the chart enabled for refresh?
- this.paused = false; // boolean - is the chart paused for any reason?
- this.selected = false; // boolean - is the chart shown a selection?
- this.debug = false; // boolean - console.log() debug info about this chart
-
- this.netdata_first = 0; // milliseconds - the first timestamp in netdata
- this.netdata_last = 0; // milliseconds - the last timestamp in netdata
- this.requested_after = null; // milliseconds - the timestamp of the request after param
- this.requested_before = null; // milliseconds - the timestamp of the request before param
- this.requested_padding = null;
- this.view_after = 0;
- this.view_before = 0;
+ this.library_name = NETDATA.dataAttribute(this.element, 'chart-library', NETDATA.chartDefaults.library);
- this.value_decimal_detail = -1;
- var d = self.data('decimal-digits');
- if(typeof d === 'number') {
- this.value_decimal_detail = d;
+ // check the requested library is available
+ // we don't initialize it here - it will be initialized when
+ // this chart will be first used
+ if(typeof NETDATA.chartLibraries[this.library_name] === 'undefined') {
+ NETDATA.error(402, this.library_name);
+ error('chart library "' + this.library_name + '" is not found');
+ this.enabled = false;
+ }
+ else if(NETDATA.chartLibraries[this.library_name].enabled === false) {
+ NETDATA.error(403, this.library_name);
+ error('chart library "' + this.library_name + '" is not enabled');
+ this.enabled = false;
}
+ else
+ this.library = NETDATA.chartLibraries[this.library_name];
this.auto = {
name: 'auto',
@@ -1392,79 +1599,192 @@ var NETDATA = window.NETDATA || {};
// auto, pan, zoom
this.current = this.auto;
- // check the requested library is available
- // we don't initialize it here - it will be initialized when
- // this chart will be first used
- if(typeof NETDATA.chartLibraries[that.library_name] === 'undefined') {
- NETDATA.error(402, that.library_name);
- error('chart library "' + that.library_name + '" is not found');
- return;
- }
- else if(NETDATA.chartLibraries[that.library_name].enabled === false) {
- NETDATA.error(403, that.library_name);
- error('chart library "' + that.library_name + '" is not enabled');
- return;
- }
- else
- that.library = NETDATA.chartLibraries[that.library_name];
+ this.running = false; // boolean - true when the chart is being refreshed now
+ this.enabled = true; // boolean - is the chart enabled for refresh?
- // milliseconds - the time the last refresh took
- this.refresh_dt_ms = 0;
+ that.tmp = {};
- // if we need to report the rendering speed
- // find the element that needs to be updated
- var refresh_dt_element_name = self.data('dt-element-name') || null; // string - the element to print refresh_dt_ms
+ // ============================================================================================================
+ // PRIVATE FUNCTIONS
- if(refresh_dt_element_name !== null) {
- this.refresh_dt_element = document.getElementById(refresh_dt_element_name) || null;
- }
- else
- this.refresh_dt_element = null;
+ // reset the runtime status variables to their defaults
+ var runtimeInit = function() {
+ that.paused = false; // boolean - is the chart paused for any reason?
+ that.selected = false; // boolean - is the chart shown a selection?
- this.dimensions_visibility = new dimensionsVisibility(this);
+ that.chart_created = false; // boolean - is the library.create() been called?
+ that.dom_created = false; // boolean - is the chart DOM been created?
+ that.fetching_data = false; // boolean - true while we fetch data via ajax
- this._updating = false;
+ that.updates_counter = 0; // numeric - the number of refreshes made so far
+ that.updates_since_last_unhide = 0; // numeric - the number of refreshes made since the last time the chart was unhidden
+ that.updates_since_last_creation = 0; // numeric - the number of refreshes made since the last time the chart was created
- // ============================================================================================================
- // PRIVATE FUNCTIONS
+ that.tm = {
+ last_initialized: 0, // milliseconds - the timestamp it was last initialized
+ last_dom_created: 0, // milliseconds - the timestamp its DOM was last created
+ last_mode_switch: 0, // milliseconds - the timestamp it switched modes
+
+ last_info_downloaded: 0, // milliseconds - the timestamp we downloaded the chart
+ last_updated: 0, // the timestamp the chart last updated with data
+ pan_and_zoom_seq: 0, // the sequence number of the global synchronization
+ // between chart.
+ // Used with NETDATA.globalPanAndZoom.seq
+ last_visible_check: 0, // the time we last checked if it is visible
+ last_resized: 0, // the time the chart was resized
+ last_hidden: 0, // the time the chart was hidden
+ last_unhidden: 0, // the time the chart was unhidden
+ last_autorefreshed: 0 // the time the chart was last refreshed
+ };
- var createDOM = function() {
+ that.data = null; // the last data as downloaded from the netdata server
+ that.data_url = 'invalid://'; // string - the last url used to update the chart
+ that.data_points = 0; // number - the number of points returned from netdata
+ that.data_after = 0; // milliseconds - the first timestamp of the data
+ that.data_before = 0; // milliseconds - the last timestamp of the data
+ that.data_update_every = 0; // milliseconds - the frequency to update the data
+
+ that.tmp = {}; // members that can be destroyed to save memory
+ };
+
+ // initialize all the variables that are required for the chart to be rendered
+ var lateInitialization = function() {
+ if(typeof that.host !== 'undefined')
+ return;
+
+ // string - the netdata server URL, without any path
+ that.host = NETDATA.dataAttribute(that.element, 'host', NETDATA.chartDefaults.host);
+
+ // make sure the host does not end with /
+ // all netdata API requests use absolute paths
+ while(that.host.slice(-1) === '/')
+ that.host = that.host.substring(0, that.host.length - 1);
+
+ // string - the grouping method requested by the user
+ that.method = NETDATA.dataAttribute(that.element, 'method', NETDATA.chartDefaults.method);
+
+ // the time-range requested by the user
+ that.after = NETDATA.dataAttribute(that.element, 'after', NETDATA.chartDefaults.after);
+ that.before = NETDATA.dataAttribute(that.element, 'before', NETDATA.chartDefaults.before);
+
+ // the pixels per point requested by the user
+ that.pixels_per_point = NETDATA.dataAttribute(that.element, 'pixels-per-point', 1);
+ that.points = NETDATA.dataAttribute(that.element, 'points', null);
+
+ // the dimensions requested by the user
+ that.dimensions = NETDATA.dataAttribute(that.element, 'dimensions', null);
+
+ that.title = NETDATA.dataAttribute(that.element, 'title', null); // the title of the chart
+ that.units = NETDATA.dataAttribute(that.element, 'units', null); // the units of the chart dimensions
+ that.append_options = NETDATA.dataAttribute(that.element, 'append-options', null); // additional options to pass to netdata
+ that.override_options = NETDATA.dataAttribute(that.element, 'override-options', null); // override options to pass to netdata
+
+ that.debug = NETDATA.dataAttributeBoolean(that.element, 'debug', false);
+
+ that.value_decimal_detail = -1;
+ var d = NETDATA.dataAttribute(that.element, 'decimal-digits', -1);
+ if(typeof d === 'number')
+ that.value_decimal_detail = d;
+ else if(typeof d !== 'undefined')
+ that.log('ignoring decimal-digits value: ' + d.toString());
+
+ // if we need to report the rendering speed
+ // find the element that needs to be updated
+ var refresh_dt_element_name = NETDATA.dataAttribute(that.element, 'dt-element-name', null); // string - the element to print refresh_dt_ms
+
+ if(refresh_dt_element_name !== null) {
+ that.refresh_dt_element = document.getElementById(refresh_dt_element_name) || null;
+ }
+ else
+ that.refresh_dt_element = null;
+
+ that.dimensions_visibility = new dimensionsVisibility(that);
+
+ that.netdata_first = 0; // milliseconds - the first timestamp in netdata
+ that.netdata_last = 0; // milliseconds - the last timestamp in netdata
+ that.requested_after = null; // milliseconds - the timestamp of the request after param
+ that.requested_before = null; // milliseconds - the timestamp of the request before param
+ that.requested_padding = null;
+ that.view_after = 0;
+ that.view_before = 0;
+
+ that.refresh_dt_ms = 0; // milliseconds - the time the last refresh took
+
+ // how many retries we have made to load chart data from the server
+ that.retries_on_data_failures = 0;
+
+ // color management
+ that.colors = null;
+ that.colors_assigned = {};
+ that.colors_available = null;
+ that.colors_custom = null;
+
+ that.element_message = null; // the element already created by the user
+ that.element_chart = null; // the element with the chart
+ that.element_legend = null; // the element with the legend of the chart (if created by us)
+ that.element_legend_childs = {
+ hidden: null,
+ title_date: null,
+ title_time: null,
+ title_units: null,
+ perfect_scroller: null, // the container to apply perfect scroller to
+ series: null
+ };
+
+ that.chart_url = null; // string - the url to download chart info
+ that.chart = null; // object - the chart as downloaded from the server
+ };
+
+ var destroyDOM = function() {
if(that.enabled === false) return;
- if(that.element_message !== null) that.element_message.innerHTML = '';
- if(that.element_legend !== null) that.element_legend.innerHTML = '';
- if(that.element_chart !== null) that.element_chart.innerHTML = '';
+ if(that.debug === true)
+ that.log('destroyDOM()');
+ // that.element.className = 'netdata-message icon';
+ // that.element.innerHTML = '<i class="fa fa-refresh"></i> netdata';
that.element.innerHTML = '';
+ that.element_message = null;
+ that.element_legend = null;
+ that.element_chart = null;
+ that.element_legend_childs.series = null;
+
+ that.chart_created = false;
+ that.dom_created = false;
+
+ that.tm.last_resized = 0;
+ that.tm.last_dom_created = 0;
+ };
+
+ var createDOM = function() {
+ if(that.enabled === false) return;
+ lateInitialization();
+
+ destroyDOM();
+
+ if(that.debug === true)
+ that.log('createDOM()');
that.element_message = document.createElement('div');
that.element_message.className = 'netdata-message icon hidden';
that.element.appendChild(that.element_message);
- that.element_chart = document.createElement('div');
- that.element_chart.id = that.library_name + '-' + that.uuid + '-chart';
- that.element.appendChild(that.element_chart);
+ that.dom_created = true;
+ that.chart_created = false;
- if(that.hasLegend() === true) {
- that.element.className = "netdata-container-with-legend";
- that.element_chart.className = 'netdata-chart-with-legend-right netdata-' + that.library_name + '-chart-with-legend-right';
+ that.tm.last_dom_created =
+ that.tm.last_resized = Date.now();
- that.element_legend = document.createElement('div');
- that.element_legend.className = 'netdata-chart-legend netdata-' + that.library_name + '-legend';
- that.element.appendChild(that.element_legend);
- }
- else {
- that.element.className = "netdata-container";
- that.element_chart.className = ' netdata-chart netdata-' + that.library_name + '-chart';
+ showLoading();
+ };
- that.element_legend = null;
- }
- that.element_legend_childs.series = null;
+ var initDOM = function() {
+ that.element.className = that.library.container_class(that);
if(typeof(that.width) === 'string')
- $(that.element).css('width', that.width);
+ that.element.style.width = that.width;
else if(typeof(that.width) === 'number')
- $(that.element).css('width', that.width + 'px');
+ that.element.style.width = that.width.toString() + 'px';
if(typeof(that.library.aspect_ratio) === 'undefined') {
if(typeof(that.height) === 'string')
@@ -1472,23 +1792,9 @@ var NETDATA = window.NETDATA || {};
else if(typeof(that.height) === 'number')
that.element.style.height = that.height.toString() + 'px';
}
- else {
- var w = that.element.offsetWidth;
- if(w === null || w === 0) {
- // the div is hidden
- // this will resize the chart when next viewed
- that.tm.last_resized = 0;
- }
- else
- that.element.style.height = (w * that.library.aspect_ratio / 100).toString() + 'px';
- }
if(NETDATA.chartDefaults.min_width !== null)
- $(that.element).css('min-width', NETDATA.chartDefaults.min_width);
-
- that.tm.last_dom_created = Date.now();
-
- showLoading();
+ that.element.style.min_width = NETDATA.chartDefaults.min_width;
};
/* init() private
@@ -1496,45 +1802,18 @@ var NETDATA = window.NETDATA || {};
* destroy all (possibly) created state elements
* create the basic DOM for a chart
*/
- var init = function() {
+ var init = function(opt) {
if(that.enabled === false) return;
- that.paused = false;
- that.selected = false;
-
- that.chart_created = false; // boolean - is the library.create() been called?
- that.updates_counter = 0; // numeric - the number of refreshes made so far
- that.updates_since_last_unhide = 0; // numeric - the number of refreshes made since the last time the chart was unhidden
- that.updates_since_last_creation = 0; // numeric - the number of refreshes made since the last time the chart was created
-
- that.tm = {
- last_initialized: 0, // milliseconds - the timestamp it was last initialized
- last_dom_created: 0, // milliseconds - the timestamp its DOM was last created
- last_mode_switch: 0, // milliseconds - the timestamp it switched modes
-
- last_info_downloaded: 0, // milliseconds - the timestamp we downloaded the chart
- last_updated: 0, // the timestamp the chart last updated with data
- pan_and_zoom_seq: 0, // the sequence number of the global synchronization
- // between chart.
- // Used with NETDATA.globalPanAndZoom.seq
- last_visible_check: 0, // the time we last checked if it is visible
- last_resized: 0, // the time the chart was resized
- last_hidden: 0, // the time the chart was hidden
- last_unhidden: 0, // the time the chart was unhidden
- last_autorefreshed: 0 // the time the chart was last refreshed
- };
-
- that.data = null; // the last data as downloaded from the netdata server
- that.data_url = 'invalid://'; // string - the last url used to update the chart
- that.data_points = 0; // number - the number of points returned from netdata
- that.data_after = 0; // milliseconds - the first timestamp of the data
- that.data_before = 0; // milliseconds - the last timestamp of the data
- that.data_update_every = 0; // milliseconds - the frequency to update the data
+ runtimeInit();
that.tm.last_initialized = Date.now();
- createDOM();
-
that.setMode('auto');
+
+ if(opt !== 'fast') {
+ if (that.isVisible(true) || opt === 'force')
+ createDOM();
+ }
};
var maxMessageFontSize = function() {
@@ -1575,12 +1854,12 @@ var NETDATA = window.NETDATA || {};
that.element_message.innerHTML = icon;
maxMessageFontSize();
$(that.element_message).removeClass('hidden');
- that.___messageHidden___ = undefined;
+ that.tmp.___messageHidden___ = undefined;
};
var hideMessage = function() {
- if(typeof that.___messageHidden___ === 'undefined') {
- that.___messageHidden___ = true;
+ if(typeof that.tmp.___messageHidden___ === 'undefined') {
+ that.tmp.___messageHidden___ = true;
$(that.element_message).addClass('hidden');
}
};
@@ -1608,7 +1887,7 @@ var NETDATA = window.NETDATA || {};
};
var isHidden = function() {
- return (typeof that.___chartIsHidden___ !== 'undefined');
+ return (typeof that.tmp.___chartIsHidden___ !== 'undefined');
};
// hide the chart, when it is not visible - called from isVisible()
@@ -1619,12 +1898,15 @@ var NETDATA = window.NETDATA || {};
if(that.chart_created === true) {
if(NETDATA.options.current.destroy_on_hide === true) {
// we should destroy it
- init();
+ init('force');
}
else {
showRendering();
that.element_chart.style.display = 'none';
if(that.element_legend !== null) that.element_legend.style.display = 'none';
+ if(that.element_legend_childs.toolbox !== null) that.element_legend_childs.toolbox.style.display = 'none';
+ if(that.element_legend_childs.resize_handler !== null) that.element_legend_childs.resize_handler.style.display = 'none';
+
that.tm.last_hidden = Date.now();
// de-allocate data
@@ -1635,25 +1917,27 @@ var NETDATA = window.NETDATA || {};
}
}
- that.___chartIsHidden___ = true;
+ that.tmp.___chartIsHidden___ = true;
};
// unhide the chart, when it is visible - called from isVisible()
var unhideChart = function() {
if(isHidden() === false) return;
- that.___chartIsHidden___ = undefined;
+ that.tmp.___chartIsHidden___ = undefined;
that.updates_since_last_unhide = 0;
if(that.chart_created === false) {
// we need to re-initialize it, to show our background
// logo in bootstrap tabs, until the chart loads
- init();
+ init('force');
}
else {
that.tm.last_unhidden = Date.now();
that.element_chart.style.display = '';
if(that.element_legend !== null) that.element_legend.style.display = '';
+ if(that.element_legend_childs.toolbox !== null) that.element_legend_childs.toolbox.style.display = '';
+ if(that.element_legend_childs.resize_handler !== null) that.element_legend_childs.resize_handler.style.display = '';
resizeChart();
hideMessage();
}
@@ -1724,11 +2008,11 @@ var NETDATA = window.NETDATA || {};
// to be called just before the chart library to make sure that
// a properly sized dom is available
var resizeChart = function() {
- if(that.isVisible() === true && that.tm.last_resized < NETDATA.options.last_resized) {
+ if(that.isVisible() === true && that.tm.last_resized < NETDATA.options.last_page_resize) {
if(that.chart_created === false) return;
if(that.needsRecreation()) {
- init();
+ init('force');
}
else if(typeof that.library.resize === 'function') {
that.library.resize(that);
@@ -1900,7 +2184,7 @@ var NETDATA = window.NETDATA || {};
// that.data_update_every = 30 * 1000;
//that.element_chart.style.display = 'none';
//if(that.element_legend !== null) that.element_legend.style.display = 'none';
- //that.___chartIsHidden___ = true;
+ //that.tmp.___chartIsHidden___ = true;
};
// ============================================================================================================
@@ -2104,11 +2388,6 @@ var NETDATA = window.NETDATA || {};
// ----------------------------------------------------------------------------------------------------------------
- // console logging
- this.log = function(msg) {
- console.log(this.id + ' (' + this.library_name + ' ' + this.uuid + '): ' + msg);
- };
-
this.pauseChart = function() {
if(this.paused === false) {
if(this.debug === true)
@@ -2248,6 +2527,7 @@ var NETDATA = window.NETDATA || {};
var __legendFormatValueChartDecimalsLastMin = undefined;
var __legendFormatValueChartDecimalsLastMax = undefined;
var __legendFormatValueChartDecimals = -1;
+ var __intlNumberFormat = null;
this.legendFormatValueDecimalsFromMinMax = function(min, max) {
if(min === __legendFormatValueChartDecimalsLastMin && max === __legendFormatValueChartDecimalsLastMax)
return;
@@ -2255,13 +2535,18 @@ var NETDATA = window.NETDATA || {};
__legendFormatValueChartDecimalsLastMin = min;
__legendFormatValueChartDecimalsLastMax = max;
+ var old = __legendFormatValueChartDecimals;
+
if(this.data !== null && this.data.min === this.data.max)
+ // it is a fixed number, let the visualizer decide based on the value
__legendFormatValueChartDecimals = -1;
else if(this.value_decimal_detail !== -1)
+ // there is an override
__legendFormatValueChartDecimals = this.value_decimal_detail;
else {
+ // ok, let's calculate the proper number of decimal points
var delta;
if (min === max)
@@ -2275,39 +2560,39 @@ var NETDATA = window.NETDATA || {};
else if (delta > 0.1) __legendFormatValueChartDecimals = 2;
else __legendFormatValueChartDecimals = 4;
}
+
+ if(__legendFormatValueChartDecimals !== old) {
+ if(__legendFormatValueChartDecimals < 0)
+ __intlNumberFormat = null;
+ else
+ __intlNumberFormat = NETDATA.fastNumberFormat.get(
+ __legendFormatValueChartDecimals,
+ __legendFormatValueChartDecimals
+ );
+ }
};
this.legendFormatValue = function(value) {
if(typeof value !== 'number') return '-';
- var dmin, dmax;
-
- if(__legendFormatValueChartDecimals < 0) {
- dmin = 0;
- var abs = value;
- if(abs > 1000) dmax = 0;
- else if(abs > 10 ) dmax = 1;
- else if(abs > 1) dmax = 2;
- else if(abs > 0.1) dmax = 2;
- else dmax = 4;
- }
- else {
- dmin = dmax = __legendFormatValueChartDecimals;
- }
+ if(__intlNumberFormat !== null)
+ return __intlNumberFormat.format(value);
+ var dmin, dmax;
if(this.value_decimal_detail !== -1) {
dmin = dmax = this.value_decimal_detail;
}
+ else {
+ dmin = 0;
+ var abs = (value < 0) ? -value : value;
+ if (abs > 1000) dmax = 0;
+ else if (abs > 10) dmax = 1;
+ else if (abs > 1) dmax = 2;
+ else if (abs > 0.1) dmax = 2;
+ else dmax = 4;
+ }
- return value.toLocaleString(undefined, {
- // style: 'decimal',
- // minimumIntegerDigits: 1,
- // minimumSignificantDigits: 1,
- // maximumSignificantDigits: 1,
- useGrouping: true,
- minimumFractionDigits: dmin,
- maximumFractionDigits: dmax
- });
+ return NETDATA.fastNumberFormat.get(dmin, dmax).format(value);
};
this.legendSetLabelValue = function(label, value) {
@@ -2357,23 +2642,23 @@ var NETDATA = window.NETDATA || {};
};
this.__legendSetDateString = function(date) {
- if(date !== this.__last_shown_legend_date) {
+ if(date !== this.tmp.__last_shown_legend_date) {
this.element_legend_childs.title_date.innerText = date;
- this.__last_shown_legend_date = date;
+ this.tmp.__last_shown_legend_date = date;
}
};
this.__legendSetTimeString = function(time) {
- if(time !== this.__last_shown_legend_time) {
+ if(time !== this.tmp.__last_shown_legend_time) {
this.element_legend_childs.title_time.innerText = time;
- this.__last_shown_legend_time = time;
+ this.tmp.__last_shown_legend_time = time;
}
};
this.__legendSetUnitsString = function(units) {
- if(units !== this.__last_shown_legend_units) {
+ if(units !== this.tmp.__last_shown_legend_units) {
this.element_legend_childs.title_units.innerText = units;
- this.__last_shown_legend_units = units;
+ this.tmp.__last_shown_legend_units = units;
}
};
@@ -2468,16 +2753,28 @@ var NETDATA = window.NETDATA || {};
};
// this should be called just ONCE per dimension per chart
- this._chartDimensionColor = function(label) {
- if(this.colors === null) this.chartColors();
+ this.__chartDimensionColor = function(label) {
+ this.chartPrepareColorPalette();
if(typeof this.colors_assigned[label] === 'undefined') {
+
if(this.colors_available.length === 0) {
- var len = NETDATA.themes.current.colors.length;
+ var len;
+
+ // copy the custom colors
+ if(this.colors_custom !== null) {
+ len = this.colors_custom.length;
+ while (len--)
+ this.colors_available.unshift(this.colors_custom[len]);
+ }
+
+ // copy the standard palette colors
+ len = NETDATA.themes.current.colors.length;
while(len--)
this.colors_available.unshift(NETDATA.themes.current.colors[len]);
}
+ // assign a color to this dimension
this.colors_assigned[label] = this.colors_available.shift();
if(this.debug === true)
@@ -2492,39 +2789,77 @@ var NETDATA = window.NETDATA || {};
return this.colors_assigned[label];
};
- this.chartColors = function() {
- if(this.colors !== null) return this.colors;
+ this.chartPrepareColorPalette = function() {
+ var len;
+
+ if(this.colors_custom !== null) return;
+
+ if(this.debug === true)
+ this.log("Preparing chart color palette");
this.colors = [];
this.colors_available = [];
+ this.colors_custom = [];
// add the standard colors
- var len = NETDATA.themes.current.colors.length;
+ len = NETDATA.themes.current.colors.length;
while(len--)
this.colors_available.unshift(NETDATA.themes.current.colors[len]);
// add the user supplied colors
- var c = $(this.element).data('colors');
+ var c = NETDATA.dataAttribute(this.element, 'colors', undefined);
// this.log('read colors: ' + c);
- if(typeof c !== 'undefined' && c !== null && c.length > 0) {
- if(typeof c !== 'string') {
- this.log('invalid color given: ' + c + ' (give a space separated list of colors)');
- }
- else {
- c = c.split(' ');
- var added = 0;
-
- while(added < 20) {
- len = c.length;
- while(len--) {
- added++;
- this.colors_available.unshift(c[len]);
- // this.log('adding color: ' + c[len]);
- }
- }
+ if(typeof c === 'string' && c.length > 0) {
+ c = c.split(' ');
+ len = c.length;
+ while(len--) {
+ if(this.debug === true)
+ this.log("Adding custom color " + c[len].toString() + " to palette");
+
+ this.colors_custom.unshift(c[len]);
+ this.colors_available.unshift(c[len]);
}
}
+ if(this.debug === true) {
+ this.log("colors_custom:");
+ this.log(this.colors_custom);
+ this.log("colors_available:");
+ this.log(this.colors_available);
+ }
+ };
+
+ // get the ordered list of chart colors
+ // this includes user defined colors
+ this.chartCustomColors = function() {
+ this.chartPrepareColorPalette();
+
+ var colors;
+ if(this.colors_custom.length)
+ colors = this.colors_custom;
+ else
+ colors = this.colors;
+
+ if(this.debug === true) {
+ this.log("chartCustomColors() returns:");
+ this.log(colors);
+ }
+
+ return colors;
+ };
+
+ // get the ordered list of chart ASSIGNED colors
+ // (this returns only the colors that have been
+ // assigned to dimensions, prepended with any
+ // custom colors defined)
+ this.chartColors = function() {
+ this.chartPrepareColorPalette();
+
+ if(this.debug === true) {
+ this.log("chartColors() returns:");
+ this.log(this.colors);
+ }
+
return this.colors;
};
@@ -2555,7 +2890,7 @@ var NETDATA = window.NETDATA || {};
if(needed === false) {
// make sure colors available
- this.chartColors();
+ this.chartPrepareColorPalette();
// do we have to update the current values?
// we do this, only when the visible chart is current
@@ -2577,12 +2912,12 @@ var NETDATA = window.NETDATA || {};
keys = Object.keys(this.chart.dimensions);
len = keys.length;
for(i = 0; i < len ;i++)
- this._chartDimensionColor(this.chart.dimensions[keys[i]].name);
+ this.__chartDimensionColor(this.chart.dimensions[keys[i]].name);
}
}
// we will re-generate the colors for the chart
- // based on the selected dimensions
- this.colors = null;
+ // based on the dimensions this result has data for
+ this.colors = [];
if(this.debug === true)
this.log('updating Legend DOM');
@@ -2591,12 +2926,12 @@ var NETDATA = window.NETDATA || {};
this.dimensions_visibility.invalidateAll();
var genLabel = function(state, parent, dim, name, count) {
- var color = state._chartDimensionColor(name);
+ var color = state.__chartDimensionColor(name);
var user_element = null;
- var user_id = self.data('show-value-of-' + name.toLowerCase() + '-at') || null;
+ var user_id = NETDATA.dataAttribute(state.element, 'show-value-of-' + name.toLowerCase() + '-at', null);
if(user_id === null)
- user_id = self.data('show-value-of-' + dim.toLowerCase() + '-at') || null;
+ user_id = NETDATA.dataAttribute(state.element, 'show-value-of-' + dim.toLowerCase() + '-at', null);
if(user_id !== null) {
user_element = document.getElementById(user_id) || null;
if (user_element === null)
@@ -2635,7 +2970,26 @@ var NETDATA = window.NETDATA || {};
var content = document.createElement('div');
- if(this.hasLegend()) {
+ if(this.element_chart === null) {
+ this.element_chart = document.createElement('div');
+ this.element_chart.id = this.library_name + '-' + this.uuid + '-chart';
+ this.element.appendChild(this.element_chart);
+
+ if(this.hasLegend() === true)
+ this.element_chart.className = 'netdata-chart-with-legend-right netdata-' + this.library_name + '-chart-with-legend-right';
+ else
+ this.element_chart.className = ' netdata-chart netdata-' + this.library_name + '-chart';
+ }
+
+ if(this.hasLegend() === true) {
+ if(this.element_legend === null) {
+ this.element_legend = document.createElement('div');
+ this.element_legend.className = 'netdata-chart-legend netdata-' + this.library_name + '-legend';
+ this.element.appendChild(this.element_legend);
+ }
+ else
+ this.element_legend.innerHTML = '';
+
this.element_legend_childs = {
content: content,
resize_handler: document.createElement('div'),
@@ -2653,10 +3007,7 @@ var NETDATA = window.NETDATA || {};
series: {}
};
- this.element_legend.innerHTML = '';
-
if(this.library.toolboxPanAndZoom !== null) {
-
var get_pan_and_zoom_step = function(event) {
if (event.ctrlKey)
return NETDATA.options.current.pan_and_zoom_factor * NETDATA.options.current.pan_and_zoom_factor_multiplier_control;
@@ -2717,7 +3068,7 @@ var NETDATA = window.NETDATA || {};
title: 'Chart Reset',
content: 'Reset all the charts to their default auto-refreshing state. You can also <b>double click</b> the chart contents with your mouse or your finger (on touch devices).<br/><small>Help, can be disabled from the settings.</small>'
});
-
+
this.element_legend_childs.toolbox_right.className += ' netdata-legend-toolbox-button';
this.element_legend_childs.toolbox_right.innerHTML = '<i class="fa fa-forward"></i>';
this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_right);
@@ -2741,7 +3092,7 @@ var NETDATA = window.NETDATA || {};
content: 'Pan the chart to the right. You can also <b>drag it</b> with your mouse or your finger (on touch devices).<br/><small>Help, can be disabled from the settings.</small>'
});
-
+
this.element_legend_childs.toolbox_zoomin.className += ' netdata-legend-toolbox-button';
this.element_legend_childs.toolbox_zoomin.innerHTML = '<i class="fa fa-plus"></i>';
this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_zoomin);
@@ -2763,7 +3114,7 @@ var NETDATA = window.NETDATA || {};
title: 'Chart Zoom In',
content: 'Zoom in the chart. You can also press SHIFT and select an area of the chart to zoom in. On Chrome and Opera, you can press the SHIFT or the ALT keys and then use the mouse wheel to zoom in or out.<br/><small>Help, can be disabled from the settings.</small>'
});
-
+
this.element_legend_childs.toolbox_zoomout.className += ' netdata-legend-toolbox-button';
this.element_legend_childs.toolbox_zoomout.innerHTML = '<i class="fa fa-minus"></i>';
this.element_legend_childs.toolbox.appendChild(this.element_legend_childs.toolbox_zoomout);
@@ -2786,7 +3137,7 @@ var NETDATA = window.NETDATA || {};
title: 'Chart Zoom Out',
content: 'Zoom out the chart. On Chrome and Opera, you can also press the SHIFT or the ALT keys and then use the mouse wheel to zoom in or out.<br/><small>Help, can be disabled from the settings.</small>'
});
-
+
//this.element_legend_childs.toolbox_volume.className += ' netdata-legend-toolbox-button';
//this.element_legend_childs.toolbox_volume.innerHTML = '<i class="fa fa-sort-amount-desc"></i>';
//this.element_legend_childs.toolbox_volume.title = 'Visible Volume';
@@ -2805,7 +3156,7 @@ var NETDATA = window.NETDATA || {};
this.element_legend_childs.toolbox_zoomout = null;
this.element_legend_childs.toolbox_volume = null;
}
-
+
this.element_legend_childs.resize_handler.className += " netdata-legend-resize-handler";
this.element_legend_childs.resize_handler.innerHTML = '<i class="fa fa-chevron-up"></i><i class="fa fa-chevron-down"></i>';
this.element.appendChild(this.element_legend_childs.resize_handler);
@@ -2834,19 +3185,19 @@ var NETDATA = window.NETDATA || {};
this.element_legend_childs.title_date.className += " netdata-legend-title-date";
this.element_legend.appendChild(this.element_legend_childs.title_date);
- this.__last_shown_legend_date = undefined;
+ this.tmp.__last_shown_legend_date = undefined;
this.element_legend.appendChild(document.createElement('br'));
this.element_legend_childs.title_time.className += " netdata-legend-title-time";
this.element_legend.appendChild(this.element_legend_childs.title_time);
- this.__last_shown_legend_time = undefined;
+ this.tmp.__last_shown_legend_time = undefined;
this.element_legend.appendChild(document.createElement('br'));
this.element_legend_childs.title_units.className += " netdata-legend-title-units";
this.element_legend.appendChild(this.element_legend_childs.title_units);
- this.__last_shown_legend_units = undefined;
+ this.tmp.__last_shown_legend_units = undefined;
this.element_legend.appendChild(document.createElement('br'));
@@ -2940,16 +3291,14 @@ var NETDATA = window.NETDATA || {};
};
this.hasLegend = function() {
- if(typeof this.___hasLegendCache___ !== 'undefined')
- return this.___hasLegendCache___;
+ if(typeof this.tmp.___hasLegendCache___ !== 'undefined')
+ return this.tmp.___hasLegendCache___;
var leg = false;
- if(this.library && this.library.legend(this) === 'right-side') {
- var legend = $(this.element).data('legend') || 'yes';
- if(legend === 'yes') leg = true;
- }
+ if(this.library && this.library.legend(this) === 'right-side')
+ leg = NETDATA.dataAttributeBoolean(this.element, 'legend', true);
- this.___hasLegendCache___ = leg;
+ this.tmp.___hasLegendCache___ = leg;
return leg;
};
@@ -2983,12 +3332,17 @@ var NETDATA = window.NETDATA || {};
};
this.needsRecreation = function() {
- return (
+ var ret = (
this.chart_created === true
&& this.library
&& this.library.autoresize() === false
- && this.tm.last_resized < NETDATA.options.last_resized
+ && this.tm.last_resized < NETDATA.options.last_page_resize
);
+
+ if(this.debug === true)
+ this.log('needsRecreation(): ' + ret.toString() + ', chart_created = ' + this.chart_created.toString());
+
+ return ret;
};
this.chartURL = function() {
@@ -3139,7 +3493,7 @@ var NETDATA = window.NETDATA || {};
if(this.debug === true)
this.log('max updates of ' + this.updates_since_last_creation.toString() + ' reached. Forcing re-generation.');
- init();
+ init('force');
return;
}
@@ -3191,9 +3545,9 @@ var NETDATA = window.NETDATA || {};
this.updateChart = function(callback) {
if(this.debug === true)
- this.log('updateChart() called.');
+ this.log('updateChart()');
- if(this._updating === true) {
+ if(this.fetching_data === true) {
if(this.debug === true)
this.log('I am already updating...');
@@ -3222,6 +3576,9 @@ var NETDATA = window.NETDATA || {};
return;
}
+ if(that.dom_created !== true)
+ createDOM();
+
if(this.chart === null)
return this.getChart(function() {
return that.updateChart(callback);
@@ -3255,7 +3612,7 @@ var NETDATA = window.NETDATA || {};
if(NETDATA.statistics.refreshes_active > NETDATA.statistics.refreshes_active_max)
NETDATA.statistics.refreshes_active_max = NETDATA.statistics.refreshes_active;
- this._updating = true;
+ this.fetching_data = true;
this.xhr = $.ajax( {
url: this.data_url,
@@ -3296,60 +3653,45 @@ var NETDATA = window.NETDATA || {};
that.xhr = undefined;
NETDATA.statistics.refreshes_active--;
- that._updating = false;
+ that.fetching_data = false;
if(typeof callback === 'function')
return callback();
});
};
- this.isVisible = function(nocache) {
- if(typeof nocache === 'undefined')
- nocache = false;
-
- // this.log('last_visible_check: ' + this.tm.last_visible_check + ', last_page_scroll: ' + NETDATA.options.last_page_scroll);
-
- // caching - we do not evaluate the charts visibility
- // if the page has not been scrolled since the last check
- if(nocache === false && this.tm.last_visible_check > NETDATA.options.last_page_scroll)
- return this.___isVisible___;
+ var __isVisible = function() {
+ // tolerance is the number of pixels a chart can be off-screen
+ // to consider it as visible and refresh it as if was visible
+ var tolerance = 0;
- this.tm.last_visible_check = Date.now();
+ that.tm.last_visible_check = Date.now();
- var wh = window.innerHeight;
- var x = this.element.getBoundingClientRect();
- var ret = 0;
- var tolerance = 0;
+ var rect = that.element.getBoundingClientRect();
- if(x.width === 0 || x.height === 0) {
- hideChart();
- this.___isVisible___ = false;
- return this.___isVisible___;
- }
+ var screenTop = window.scrollY;
+ var screenBottom = screenTop + window.innerHeight;
- if(x.top < 0 && -x.top > x.height) {
- // the chart is entirely above
- ret = -x.top - x.height;
- }
- else if(x.top > wh) {
- // the chart is entirely below
- ret = x.top - wh;
- }
+ var chartTop = rect.top + screenTop;
+ var chartBottom = chartTop + rect.height;
- if(ret > tolerance) {
- // the chart is too far
+ return !(rect.width === 0 || rect.height === 0 || chartBottom + tolerance < screenTop || chartTop - tolerance > screenBottom);
+ };
- hideChart();
- this.___isVisible___ = false;
- return this.___isVisible___;
- }
- else {
- // the chart is inside or very close
+ this.isVisible = function(nocache) {
+ // this.log('last_visible_check: ' + this.tm.last_visible_check + ', last_page_scroll: ' + NETDATA.options.last_page_scroll);
- unhideChart();
- this.___isVisible___ = true;
- return this.___isVisible___;
- }
+ // caching - we do not evaluate the charts visibility
+ // if the page has not been scrolled since the last check
+ if((typeof nocache === 'undefined' || nocache === false)
+ && typeof this.tmp.___isVisible___ !== 'undefined'
+ && this.tm.last_visible_check > NETDATA.options.last_page_scroll)
+ return this.tmp.___isVisible___;
+
+ this.tmp.___isVisible___ = __isVisible();
+ if(this.tmp.___isVisible___ === true) unhideChart();
+ else hideChart();
+ return this.tmp.___isVisible___;
};
this.isAutoRefreshable = function() {
@@ -3399,8 +3741,8 @@ var NETDATA = window.NETDATA || {};
if(this.isAutoRefreshable() === true) {
// allow the first update, even if the page is not visible
if(this.updates_counter && this.updates_since_last_unhide && NETDATA.options.page_is_visible === false) {
- if(NETDATA.options.debug.focus === true || this.debug === true)
- this.log('canBeAutoRefreshed(): page does not have focus');
+ // if(NETDATA.options.debug.focus === true || this.debug === true)
+ // this.log('canBeAutoRefreshed(): page does not have focus');
return false;
}
@@ -3473,7 +3815,7 @@ var NETDATA = window.NETDATA || {};
}
};
- this._defaultsFromDownloadedChart = function(chart) {
+ this.__defaultsFromDownloadedChart = function(chart) {
this.chart = chart;
this.chart_url = chart.url;
this.data_update_every = chart.update_every * 1000;
@@ -3491,7 +3833,7 @@ var NETDATA = window.NETDATA || {};
this.getChart = function(callback) {
this.chart = NETDATA.chartRegistry.get(this.host, this.id);
if(this.chart) {
- this._defaultsFromDownloadedChart(this.chart);
+ this.__defaultsFromDownloadedChart(this.chart);
if(typeof callback === 'function')
return callback();
@@ -3510,7 +3852,7 @@ var NETDATA = window.NETDATA || {};
})
.done(function(chart) {
chart.url = that.chart_url;
- that._defaultsFromDownloadedChart(chart);
+ that.__defaultsFromDownloadedChart(chart);
NETDATA.chartRegistry.add(that.host, that.id, chart);
})
.fail(function() {
@@ -3527,7 +3869,8 @@ var NETDATA = window.NETDATA || {};
// ============================================================================================================
// INITIALIZATION
- init();
+ initDOM();
+ init('fast');
};
NETDATA.resetAllCharts = function(state) {
@@ -3556,10 +3899,12 @@ var NETDATA = window.NETDATA || {};
// get or create a chart state, given a DOM element
NETDATA.chartState = function(element) {
- var state = $(element).data('netdata-state-object') || null;
+ var self = $(element);
+
+ var state = self.data('netdata-state-object') || null;
if(state === null) {
state = new chartState(element);
- $(element).data('netdata-state-object', state);
+ self.data('netdata-state-object', state);
}
return state;
};
@@ -3710,9 +4055,11 @@ var NETDATA = window.NETDATA || {};
if(NETDATA.options.debug.main_loop === true)
console.log('fast rendering...');
- state.autoRefresh(function() {
- NETDATA.chartRefresherNoParallel(++index);
- });
+ setTimeout(function() {
+ state.autoRefresh(function () {
+ NETDATA.chartRefresherNoParallel(++index);
+ });
+ }, 0);
}
else {
if(NETDATA.options.debug.main_loop === true) console.log('waiting for next refresh...');
@@ -3783,7 +4130,10 @@ var NETDATA = window.NETDATA || {};
}
}
- parallel.unshift(state);
+ if(NETDATA.scrollUp === true)
+ parallel.unshift(state);
+ else
+ parallel.push(state);
}
if(parallel.length > 0) {
@@ -3911,12 +4261,12 @@ var NETDATA = window.NETDATA || {};
NETDATA.peityChartUpdate = function(state, data) {
state.peity_instance.innerHTML = data.result;
- if(state.peity_options.stroke !== state.chartColors()[0]) {
- state.peity_options.stroke = state.chartColors()[0];
+ if(state.peity_options.stroke !== state.chartCustomColors()[0]) {
+ state.peity_options.stroke = state.chartCustomColors()[0];
if(state.chart.chart_type === 'line')
state.peity_options.fill = NETDATA.themes.current.background;
else
- state.peity_options.fill = NETDATA.colorLuminance(state.chartColors()[0], NETDATA.chartDefaults.fill_luminance);
+ state.peity_options.fill = NETDATA.colorLuminance(state.chartCustomColors()[0], NETDATA.chartDefaults.fill_luminance);
}
$(state.peity_instance).peity('line', state.peity_options);
@@ -3927,10 +4277,9 @@ var NETDATA = window.NETDATA || {};
state.peity_instance = document.createElement('div');
state.element_chart.appendChild(state.peity_instance);
- var self = $(state.element);
state.peity_options = {
stroke: NETDATA.themes.current.foreground,
- strokeWidth: self.data('peity-strokewidth') || 1,
+ strokeWidth: NETDATA.dataAttribute(state.element, 'peity-strokewidth', 1),
width: state.chartWidth(),
height: state.chartHeight(),
fill: NETDATA.themes.current.foreground
@@ -3979,52 +4328,51 @@ var NETDATA = window.NETDATA || {};
};
NETDATA.sparklineChartCreate = function(state, data) {
- var self = $(state.element);
- var type = self.data('sparkline-type') || 'line';
- var lineColor = self.data('sparkline-linecolor') || state.chartColors()[0];
- var fillColor = self.data('sparkline-fillcolor') || ((state.chart.chart_type === 'line')?NETDATA.themes.current.background:NETDATA.colorLuminance(lineColor, NETDATA.chartDefaults.fill_luminance));
- var chartRangeMin = self.data('sparkline-chartrangemin') || undefined;
- var chartRangeMax = self.data('sparkline-chartrangemax') || undefined;
- var composite = self.data('sparkline-composite') || undefined;
- var enableTagOptions = self.data('sparkline-enabletagoptions') || undefined;
- var tagOptionPrefix = self.data('sparkline-tagoptionprefix') || undefined;
- var tagValuesAttribute = self.data('sparkline-tagvaluesattribute') || undefined;
- var disableHiddenCheck = self.data('sparkline-disablehiddencheck') || undefined;
- var defaultPixelsPerValue = self.data('sparkline-defaultpixelspervalue') || undefined;
- var spotColor = self.data('sparkline-spotcolor') || undefined;
- var minSpotColor = self.data('sparkline-minspotcolor') || undefined;
- var maxSpotColor = self.data('sparkline-maxspotcolor') || undefined;
- var spotRadius = self.data('sparkline-spotradius') || undefined;
- var valueSpots = self.data('sparkline-valuespots') || undefined;
- var highlightSpotColor = self.data('sparkline-highlightspotcolor') || undefined;
- var highlightLineColor = self.data('sparkline-highlightlinecolor') || undefined;
- var lineWidth = self.data('sparkline-linewidth') || undefined;
- var normalRangeMin = self.data('sparkline-normalrangemin') || undefined;
- var normalRangeMax = self.data('sparkline-normalrangemax') || undefined;
- var drawNormalOnTop = self.data('sparkline-drawnormalontop') || undefined;
- var xvalues = self.data('sparkline-xvalues') || undefined;
- var chartRangeClip = self.data('sparkline-chartrangeclip') || undefined;
- var chartRangeMinX = self.data('sparkline-chartrangeminx') || undefined;
- var chartRangeMaxX = self.data('sparkline-chartrangemaxx') || undefined;
- var disableInteraction = self.data('sparkline-disableinteraction') || false;
- var disableTooltips = self.data('sparkline-disabletooltips') || false;
- var disableHighlight = self.data('sparkline-disablehighlight') || false;
- var highlightLighten = self.data('sparkline-highlightlighten') || 1.4;
- var highlightColor = self.data('sparkline-highlightcolor') || undefined;
- var tooltipContainer = self.data('sparkline-tooltipcontainer') || undefined;
- var tooltipClassname = self.data('sparkline-tooltipclassname') || undefined;
- var tooltipFormat = self.data('sparkline-tooltipformat') || undefined;
- var tooltipPrefix = self.data('sparkline-tooltipprefix') || undefined;
- var tooltipSuffix = self.data('sparkline-tooltipsuffix') || ' ' + state.units;
- var tooltipSkipNull = self.data('sparkline-tooltipskipnull') || true;
- var tooltipValueLookups = self.data('sparkline-tooltipvaluelookups') || undefined;
- var tooltipFormatFieldlist = self.data('sparkline-tooltipformatfieldlist') || undefined;
- var tooltipFormatFieldlistKey = self.data('sparkline-tooltipformatfieldlistkey') || undefined;
- var numberFormatter = self.data('sparkline-numberformatter') || function(n){ return n.toFixed(2); };
- var numberDigitGroupSep = self.data('sparkline-numberdigitgroupsep') || undefined;
- var numberDecimalMark = self.data('sparkline-numberdecimalmark') || undefined;
- var numberDigitGroupCount = self.data('sparkline-numberdigitgroupcount') || undefined;
- var animatedZooms = self.data('sparkline-animatedzooms') || false;
+ var type = NETDATA.dataAttribute(state.element, 'sparkline-type', 'line');
+ var lineColor = NETDATA.dataAttribute(state.element, 'sparkline-linecolor', state.chartCustomColors()[0]);
+ var fillColor = NETDATA.dataAttribute(state.element, 'sparkline-fillcolor', ((state.chart.chart_type === 'line')?NETDATA.themes.current.background:NETDATA.colorLuminance(lineColor, NETDATA.chartDefaults.fill_luminance)));
+ var chartRangeMin = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemin', undefined);
+ var chartRangeMax = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemax', undefined);
+ var composite = NETDATA.dataAttribute(state.element, 'sparkline-composite', undefined);
+ var enableTagOptions = NETDATA.dataAttribute(state.element, 'sparkline-enabletagoptions', undefined);
+ var tagOptionPrefix = NETDATA.dataAttribute(state.element, 'sparkline-tagoptionprefix', undefined);
+ var tagValuesAttribute = NETDATA.dataAttribute(state.element, 'sparkline-tagvaluesattribute', undefined);
+ var disableHiddenCheck = NETDATA.dataAttribute(state.element, 'sparkline-disablehiddencheck', undefined);
+ var defaultPixelsPerValue = NETDATA.dataAttribute(state.element, 'sparkline-defaultpixelspervalue', undefined);
+ var spotColor = NETDATA.dataAttribute(state.element, 'sparkline-spotcolor', undefined);
+ var minSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-minspotcolor', undefined);
+ var maxSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-maxspotcolor', undefined);
+ var spotRadius = NETDATA.dataAttribute(state.element, 'sparkline-spotradius', undefined);
+ var valueSpots = NETDATA.dataAttribute(state.element, 'sparkline-valuespots', undefined);
+ var highlightSpotColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightspotcolor', undefined);
+ var highlightLineColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightlinecolor', undefined);
+ var lineWidth = NETDATA.dataAttribute(state.element, 'sparkline-linewidth', undefined);
+ var normalRangeMin = NETDATA.dataAttribute(state.element, 'sparkline-normalrangemin', undefined);
+ var normalRangeMax = NETDATA.dataAttribute(state.element, 'sparkline-normalrangemax', undefined);
+ var drawNormalOnTop = NETDATA.dataAttribute(state.element, 'sparkline-drawnormalontop', undefined);
+ var xvalues = NETDATA.dataAttribute(state.element, 'sparkline-xvalues', undefined);
+ var chartRangeClip = NETDATA.dataAttribute(state.element, 'sparkline-chartrangeclip', undefined);
+ var chartRangeMinX = NETDATA.dataAttribute(state.element, 'sparkline-chartrangeminx', undefined);
+ var chartRangeMaxX = NETDATA.dataAttribute(state.element, 'sparkline-chartrangemaxx', undefined);
+ var disableInteraction = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disableinteraction', false);
+ var disableTooltips = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disabletooltips', false);
+ var disableHighlight = NETDATA.dataAttributeBoolean(state.element, 'sparkline-disablehighlight', false);
+ var highlightLighten = NETDATA.dataAttribute(state.element, 'sparkline-highlightlighten', 1.4);
+ var highlightColor = NETDATA.dataAttribute(state.element, 'sparkline-highlightcolor', undefined);
+ var tooltipContainer = NETDATA.dataAttribute(state.element, 'sparkline-tooltipcontainer', undefined);
+ var tooltipClassname = NETDATA.dataAttribute(state.element, 'sparkline-tooltipclassname', undefined);
+ var tooltipFormat = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformat', undefined);
+ var tooltipPrefix = NETDATA.dataAttribute(state.element, 'sparkline-tooltipprefix', undefined);
+ var tooltipSuffix = NETDATA.dataAttribute(state.element, 'sparkline-tooltipsuffix', ' ' + state.units);
+ var tooltipSkipNull = NETDATA.dataAttributeBoolean(state.element, 'sparkline-tooltipskipnull', true);
+ var tooltipValueLookups = NETDATA.dataAttribute(state.element, 'sparkline-tooltipvaluelookups', undefined);
+ var tooltipFormatFieldlist = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformatfieldlist', undefined);
+ var tooltipFormatFieldlistKey = NETDATA.dataAttribute(state.element, 'sparkline-tooltipformatfieldlistkey', undefined);
+ var numberFormatter = NETDATA.dataAttribute(state.element, 'sparkline-numberformatter', function(n){ return n.toFixed(2); });
+ var numberDigitGroupSep = NETDATA.dataAttribute(state.element, 'sparkline-numberdigitgroupsep', undefined);
+ var numberDecimalMark = NETDATA.dataAttribute(state.element, 'sparkline-numberdecimalmark', undefined);
+ var numberDigitGroupCount = NETDATA.dataAttribute(state.element, 'sparkline-numberdigitgroupcount', undefined);
+ var animatedZooms = NETDATA.dataAttributeBoolean(state.element, 'sparkline-animatedzooms', false);
if(spotColor === 'disable') spotColor='';
if(minSpotColor === 'disable') minSpotColor='';
@@ -4104,19 +4452,19 @@ var NETDATA = window.NETDATA || {};
state.setMode('zoom');
state.globalSelectionSyncStop();
state.globalSelectionSyncDelay();
- state.dygraph_user_action = true;
- state.dygraph_force_zoom = true;
+ state.tmp.dygraph_user_action = true;
+ state.tmp.dygraph_force_zoom = true;
state.updateChartPanOrZoom(after, before);
NETDATA.globalPanAndZoom.setMaster(state, after, before);
};
NETDATA.dygraphSetSelection = function(state, t) {
- if(typeof state.dygraph_instance !== 'undefined') {
+ if(typeof state.tmp.dygraph_instance !== 'undefined') {
var r = state.calculateRowForTime(t);
if(r !== -1)
- state.dygraph_instance.setSelection(r);
+ state.tmp.dygraph_instance.setSelection(r);
else {
- state.dygraph_instance.clearSelection();
+ state.tmp.dygraph_instance.clearSelection();
state.legendShowUndefined();
}
}
@@ -4125,8 +4473,8 @@ var NETDATA = window.NETDATA || {};
};
NETDATA.dygraphClearSelection = function(state) {
- if(typeof state.dygraph_instance !== 'undefined') {
- state.dygraph_instance.clearSelection();
+ if(typeof state.tmp.dygraph_instance !== 'undefined') {
+ state.tmp.dygraph_instance.clearSelection();
}
return true;
};
@@ -4181,7 +4529,7 @@ var NETDATA = window.NETDATA || {};
};
NETDATA.dygraphChartUpdate = function(state, data) {
- var dygraph = state.dygraph_instance;
+ var dygraph = state.tmp.dygraph_instance;
if(typeof dygraph === 'undefined')
return NETDATA.dygraphChartCreate(state, data);
@@ -4191,7 +4539,7 @@ var NETDATA = window.NETDATA || {};
// its element size as 0x0.
// this will make it re-appear properly
- if(state.tm.last_unhidden > state.dygraph_last_rendered)
+ if(state.tm.last_unhidden > state.tmp.dygraph_last_rendered)
dygraph.resize();
var options = {
@@ -4202,13 +4550,13 @@ var NETDATA = window.NETDATA || {};
visibility: state.dimensions_visibility.selected2BooleanArray(state.data.dimension_names)
};
- if(state.dygraph_force_zoom === true) {
+ if(state.tmp.dygraph_force_zoom === true) {
if(NETDATA.options.debug.dygraph === true || state.debug === true)
state.log('dygraphChartUpdate() forced zoom update');
options.dateWindow = (state.requested_padding !== null)?[ state.view_after, state.view_before ]:null;
options.isZoomedIgnoreProgrammaticZoom = true;
- state.dygraph_force_zoom = false;
+ state.tmp.dygraph_force_zoom = false;
}
else if(state.current.name !== 'auto') {
if(NETDATA.options.debug.dygraph === true || state.debug === true)
@@ -4222,21 +4570,21 @@ var NETDATA = window.NETDATA || {};
options.isZoomedIgnoreProgrammaticZoom = true;
}
- options.valueRange = state.dygraph_options.valueRange;
+ options.valueRange = state.tmp.dygraph_options.valueRange;
var oldMax = null, oldMin = null;
- if(state.__commonMin !== null) {
- state.data.min = state.dygraph_instance.axes_[0].extremeRange[0];
+ if (state.tmp.__commonMin !== null) {
+ state.data.min = state.tmp.dygraph_instance.axes_[0].extremeRange[0];
oldMin = options.valueRange[0] = NETDATA.commonMin.get(state);
}
- if(state.__commonMax !== null) {
- state.data.max = state.dygraph_instance.axes_[0].extremeRange[1];
+ if (state.tmp.__commonMax !== null) {
+ state.data.max = state.tmp.dygraph_instance.axes_[0].extremeRange[1];
oldMax = options.valueRange[1] = NETDATA.commonMax.get(state);
}
- if(state.dygraph_smooth_eligible === true) {
- if((NETDATA.options.current.smooth_plot === true && state.dygraph_options.plotter !== smoothPlotter)
- || (NETDATA.options.current.smooth_plot === false && state.dygraph_options.plotter === smoothPlotter)) {
+ if(state.tmp.dygraph_smooth_eligible === true) {
+ if((NETDATA.options.current.smooth_plot === true && state.tmp.dygraph_options.plotter !== smoothPlotter)
+ || (NETDATA.options.current.smooth_plot === false && state.tmp.dygraph_options.plotter === smoothPlotter)) {
NETDATA.dygraphChartCreate(state, data);
return;
}
@@ -4245,13 +4593,13 @@ var NETDATA = window.NETDATA || {};
dygraph.updateOptions(options);
var redraw = false;
- if(oldMin !== null && oldMin > state.dygraph_instance.axes_[0].extremeRange[0]) {
- state.data.min = state.dygraph_instance.axes_[0].extremeRange[0];
+ if(oldMin !== null && oldMin > state.tmp.dygraph_instance.axes_[0].extremeRange[0]) {
+ state.data.min = state.tmp.dygraph_instance.axes_[0].extremeRange[0];
options.valueRange[0] = NETDATA.commonMin.get(state);
redraw = true;
}
- if(oldMax !== null && oldMax < state.dygraph_instance.axes_[0].extremeRange[1]) {
- state.data.max = state.dygraph_instance.axes_[0].extremeRange[1];
+ if(oldMax !== null && oldMax < state.tmp.dygraph_instance.axes_[0].extremeRange[1]) {
+ state.data.max = state.tmp.dygraph_instance.axes_[0].extremeRange[1];
options.valueRange[1] = NETDATA.commonMax.get(state);
redraw = true;
}
@@ -4261,7 +4609,7 @@ var NETDATA = window.NETDATA || {};
dygraph.updateOptions(options);
}
- state.dygraph_last_rendered = Date.now();
+ state.tmp.dygraph_last_rendered = Date.now();
return true;
};
@@ -4269,185 +4617,93 @@ var NETDATA = window.NETDATA || {};
if(NETDATA.options.debug.dygraph === true || state.debug === true)
state.log('dygraphChartCreate()');
- var self = $(state.element);
-
- var chart_type = self.data('dygraph-type') || state.chart.chart_type;
+ var chart_type = NETDATA.dataAttribute(state.element, 'dygraph-type', state.chart.chart_type);
if(chart_type === 'stacked' && data.dimensions === 1) chart_type = 'area';
var highlightCircleSize = (NETDATA.chartLibraries.dygraph.isSparkline(state) === true)?3:4;
var smooth = (NETDATA.dygraph.smooth === true)
- ?(self.data('dygraph-smooth') || (chart_type === 'line' && NETDATA.chartLibraries.dygraph.isSparkline(state) === false))
+ ?(NETDATA.dataAttributeBoolean(state.element, 'dygraph-smooth', (chart_type === 'line' && NETDATA.chartLibraries.dygraph.isSparkline(state) === false)))
:false;
- state.dygraph_options = {
- colors: self.data('dygraph-colors') || state.chartColors(),
+ state.tmp.dygraph_options = {
+ colors: NETDATA.dataAttribute(state.element, 'dygraph-colors', state.chartColors()),
// leave a few pixels empty on the right of the chart
- rightGap: self.data('dygraph-rightgap')
- || 5,
-
- showRangeSelector: self.data('dygraph-showrangeselector')
- || false,
-
- showRoller: self.data('dygraph-showroller')
- || false,
-
- title: self.data('dygraph-title')
- || state.title,
-
- titleHeight: self.data('dygraph-titleheight')
- || 19,
-
- legend: self.data('dygraph-legend')
- || 'always', // we need this to get selection events
-
+ rightGap: NETDATA.dataAttribute(state.element, 'dygraph-rightgap', 5),
+ showRangeSelector: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showrangeselector', false),
+ showRoller: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showroller', false),
+ title: NETDATA.dataAttribute(state.element, 'dygraph-title', state.title),
+ titleHeight: NETDATA.dataAttribute(state.element, 'dygraph-titleheight', 19),
+ legend: NETDATA.dataAttribute(state.element, 'dygraph-legend', 'always'), // we need this to get selection events
labels: data.result.labels,
-
- labelsDiv: self.data('dygraph-labelsdiv')
- || state.element_legend_childs.hidden,
-
- labelsDivStyles: self.data('dygraph-labelsdivstyles')
- || { 'fontSize':'1px' },
-
- labelsDivWidth: self.data('dygraph-labelsdivwidth')
- || state.chartWidth() - 70,
-
- labelsSeparateLines: self.data('dygraph-labelsseparatelines')
- || true,
-
- labelsShowZeroValues: self.data('dygraph-labelsshowzerovalues')
- || true,
-
+ labelsDiv: NETDATA.dataAttribute(state.element, 'dygraph-labelsdiv', state.element_legend_childs.hidden),
+ labelsDivStyles: NETDATA.dataAttribute(state.element, 'dygraph-labelsdivstyles', { 'fontSize':'1px' }),
+ labelsDivWidth: NETDATA.dataAttribute(state.element, 'dygraph-labelsdivwidth', state.chartWidth() - 70),
+ labelsSeparateLines: NETDATA.dataAttributeBoolean(state.element, 'dygraph-labelsseparatelines', true),
+ labelsShowZeroValues: NETDATA.dataAttributeBoolean(state.element, 'dygraph-labelsshowzerovalues', true),
labelsKMB: false,
labelsKMG2: false,
-
- showLabelsOnHighlight: self.data('dygraph-showlabelsonhighlight')
- || true,
-
- hideOverlayOnMouseOut: self.data('dygraph-hideoverlayonmouseout')
- || true,
-
- includeZero: self.data('dygraph-includezero')
- || (chart_type === 'stacked'),
-
- xRangePad: self.data('dygraph-xrangepad')
- || 0,
-
- yRangePad: self.data('dygraph-yrangepad')
- || 1,
-
- valueRange: self.data('dygraph-valuerange')
- || [ null, null ],
-
+ showLabelsOnHighlight: NETDATA.dataAttributeBoolean(state.element, 'dygraph-showlabelsonhighlight', true),
+ hideOverlayOnMouseOut: NETDATA.dataAttributeBoolean(state.element, 'dygraph-hideoverlayonmouseout', true),
+ includeZero: NETDATA.dataAttribute(state.element, 'dygraph-includezero', (chart_type === 'stacked')),
+ xRangePad: NETDATA.dataAttribute(state.element, 'dygraph-xrangepad', 0),
+ yRangePad: NETDATA.dataAttribute(state.element, 'dygraph-yrangepad', 1),
+ valueRange: NETDATA.dataAttribute(state.element, 'dygraph-valuerange', [ null, null ]),
ylabel: state.units,
-
- yLabelWidth: self.data('dygraph-ylabelwidth')
- || 12,
+ yLabelWidth: NETDATA.dataAttribute(state.element, 'dygraph-ylabelwidth', 12),
// the function to plot the chart
plotter: null,
// The width of the lines connecting data points.
// This can be used to increase the contrast or some graphs.
- strokeWidth: self.data('dygraph-strokewidth')
- || ((chart_type === 'stacked')?0.1:((smooth === true)?1.5:0.7)),
-
- strokePattern: self.data('dygraph-strokepattern')
- || undefined,
+ strokeWidth: NETDATA.dataAttribute(state.element, 'dygraph-strokewidth', ((chart_type === 'stacked')?0.1:((smooth === true)?1.5:0.7))),
+ strokePattern: NETDATA.dataAttribute(state.element, 'dygraph-strokepattern', undefined),
// The size of the dot to draw on each point in pixels (see drawPoints).
// A dot is always drawn when a point is "isolated",
// i.e. there is a missing point on either side of it.
// This also controls the size of those dots.
- drawPoints: self.data('dygraph-drawpoints')
- || false,
+ drawPoints: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawpoints', false),
// Draw points at the edges of gaps in the data.
// This improves visibility of small data segments or other data irregularities.
- drawGapEdgePoints: self.data('dygraph-drawgapedgepoints')
- || true,
-
- connectSeparatedPoints: self.data('dygraph-connectseparatedpoints')
- || false,
-
- pointSize: self.data('dygraph-pointsize')
- || 1,
+ drawGapEdgePoints: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawgapedgepoints', true),
+ connectSeparatedPoints: NETDATA.dataAttributeBoolean(state.element, 'dygraph-connectseparatedpoints', false),
+ pointSize: NETDATA.dataAttribute(state.element, 'dygraph-pointsize', 1),
// enabling this makes the chart with little square lines
- stepPlot: self.data('dygraph-stepplot')
- || false,
+ stepPlot: NETDATA.dataAttributeBoolean(state.element, 'dygraph-stepplot', false),
// Draw a border around graph lines to make crossing lines more easily
// distinguishable. Useful for graphs with many lines.
- strokeBorderColor: self.data('dygraph-strokebordercolor')
- || NETDATA.themes.current.background,
-
- strokeBorderWidth: self.data('dygraph-strokeborderwidth')
- || (chart_type === 'stacked')?0.0:0.0,
-
- fillGraph: self.data('dygraph-fillgraph')
- || (chart_type === 'area' || chart_type === 'stacked'),
-
- fillAlpha: self.data('dygraph-fillalpha')
- || ((chart_type === 'stacked')
+ strokeBorderColor: NETDATA.dataAttribute(state.element, 'dygraph-strokebordercolor', NETDATA.themes.current.background),
+ strokeBorderWidth: NETDATA.dataAttribute(state.element, 'dygraph-strokeborderwidth', (chart_type === 'stacked')?0.0:0.0),
+ fillGraph: NETDATA.dataAttribute(state.element, 'dygraph-fillgraph', (chart_type === 'area' || chart_type === 'stacked')),
+ fillAlpha: NETDATA.dataAttribute(state.element, 'dygraph-fillalpha',
+ ((chart_type === 'stacked')
?NETDATA.options.current.color_fill_opacity_stacked
- :NETDATA.options.current.color_fill_opacity_area),
-
- stackedGraph: self.data('dygraph-stackedgraph')
- || (chart_type === 'stacked'),
-
- stackedGraphNaNFill: self.data('dygraph-stackedgraphnanfill')
- || 'none',
-
- drawAxis: self.data('dygraph-drawaxis')
- || true,
-
- axisLabelFontSize: self.data('dygraph-axislabelfontsize')
- || 10,
-
- axisLineColor: self.data('dygraph-axislinecolor')
- || NETDATA.themes.current.axis,
-
- axisLineWidth: self.data('dygraph-axislinewidth')
- || 1.0,
-
- drawGrid: self.data('dygraph-drawgrid')
- || true,
-
- gridLinePattern: self.data('dygraph-gridlinepattern')
- || null,
-
- gridLineWidth: self.data('dygraph-gridlinewidth')
- || 1.0,
-
- gridLineColor: self.data('dygraph-gridlinecolor')
- || NETDATA.themes.current.grid,
-
- maxNumberWidth: self.data('dygraph-maxnumberwidth')
- || 8,
-
- sigFigs: self.data('dygraph-sigfigs')
- || null,
-
- digitsAfterDecimal: self.data('dygraph-digitsafterdecimal')
- || 2,
-
- valueFormatter: self.data('dygraph-valueformatter')
- || undefined,
-
- highlightCircleSize: self.data('dygraph-highlightcirclesize')
- || highlightCircleSize,
-
- highlightSeriesOpts: self.data('dygraph-highlightseriesopts')
- || null, // TOO SLOW: { strokeWidth: 1.5 },
-
- highlightSeriesBackgroundAlpha: self.data('dygraph-highlightseriesbackgroundalpha')
- || null, // TOO SLOW: (chart_type === 'stacked')?0.7:0.5,
-
- pointClickCallback: self.data('dygraph-pointclickcallback')
- || undefined,
-
+ :NETDATA.options.current.color_fill_opacity_area)
+ ),
+ stackedGraph: NETDATA.dataAttribute(state.element, 'dygraph-stackedgraph', (chart_type === 'stacked')),
+ stackedGraphNaNFill: NETDATA.dataAttribute(state.element, 'dygraph-stackedgraphnanfill', 'none'),
+ drawAxis: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawaxis', true),
+ axisLabelFontSize: NETDATA.dataAttribute(state.element, 'dygraph-axislabelfontsize', 10),
+ axisLineColor: NETDATA.dataAttribute(state.element, 'dygraph-axislinecolor', NETDATA.themes.current.axis),
+ axisLineWidth: NETDATA.dataAttribute(state.element, 'dygraph-axislinewidth', 1.0),
+ drawGrid: NETDATA.dataAttributeBoolean(state.element, 'dygraph-drawgrid', true),
+ gridLinePattern: NETDATA.dataAttribute(state.element, 'dygraph-gridlinepattern', null),
+ gridLineWidth: NETDATA.dataAttribute(state.element, 'dygraph-gridlinewidth', 1.0),
+ gridLineColor: NETDATA.dataAttribute(state.element, 'dygraph-gridlinecolor', NETDATA.themes.current.grid),
+ maxNumberWidth: NETDATA.dataAttribute(state.element, 'dygraph-maxnumberwidth', 8),
+ sigFigs: NETDATA.dataAttribute(state.element, 'dygraph-sigfigs', null),
+ digitsAfterDecimal: NETDATA.dataAttribute(state.element, 'dygraph-digitsafterdecimal', 2),
+ valueFormatter: NETDATA.dataAttribute(state.element, 'dygraph-valueformatter', undefined),
+ highlightCircleSize: NETDATA.dataAttribute(state.element, 'dygraph-highlightcirclesize', highlightCircleSize),
+ highlightSeriesOpts: NETDATA.dataAttribute(state.element, 'dygraph-highlightseriesopts', null), // TOO SLOW: { strokeWidth: 1.5 },
+ highlightSeriesBackgroundAlpha: NETDATA.dataAttribute(state.element, 'dygraph-highlightseriesbackgroundalpha', null), // TOO SLOW: (chart_type === 'stacked')?0.7:0.5,
+ pointClickCallback: NETDATA.dataAttribute(state.element, 'dygraph-pointclickcallback', undefined),
visibility: state.dimensions_visibility.selected2BooleanArray(state.data.dimension_names),
axes: {
@@ -4495,8 +4751,8 @@ var NETDATA = window.NETDATA || {};
return '';
},
drawCallback: function(dygraph, is_initial) {
- if(state.current.name !== 'auto' && state.dygraph_user_action === true) {
- state.dygraph_user_action = false;
+ if(state.current.name !== 'auto' && state.tmp.dygraph_user_action === true) {
+ state.tmp.dygraph_user_action = false;
var x_range = dygraph.xAxisRange();
var after = Math.round(x_range[0]);
@@ -4520,8 +4776,8 @@ var NETDATA = window.NETDATA || {};
state.setMode('zoom');
// refresh it to the greatest possible zoom level
- state.dygraph_user_action = true;
- state.dygraph_force_zoom = true;
+ state.tmp.dygraph_user_action = true;
+ state.tmp.dygraph_force_zoom = true;
state.updateChartPanOrZoom(minDate, maxDate);
},
highlightCallback: function(event, x, points, row, seriesName) {
@@ -4543,7 +4799,7 @@ var NETDATA = window.NETDATA || {};
// fix legend zIndex using the internal structures of dygraph legend module
// this works, but it is a hack!
- // state.dygraph_instance.plugins_[0].plugin.legend_div_.style.zIndex = 10000;
+ // state.tmp.dygraph_instance.plugins_[0].plugin.legend_div_.style.zIndex = 10000;
},
unhighlightCallback: function(event) {
void(event);
@@ -4559,7 +4815,7 @@ var NETDATA = window.NETDATA || {};
if(NETDATA.options.debug.dygraph === true || state.debug === true)
state.log('interactionModel.mousedown()');
- state.dygraph_user_action = true;
+ state.tmp.dygraph_user_action = true;
state.globalSelectionSyncStop();
if(NETDATA.options.debug.dygraph === true)
@@ -4600,7 +4856,7 @@ var NETDATA = window.NETDATA || {};
state.log('interactionModel.mousemove()');
if(context.isPanning) {
- state.dygraph_user_action = true;
+ state.tmp.dygraph_user_action = true;
state.globalSelectionSyncStop();
state.globalSelectionSyncDelay();
state.setMode('pan');
@@ -4608,7 +4864,7 @@ var NETDATA = window.NETDATA || {};
Dygraph.movePan(event, dygraph, context);
}
else if(context.isZooming) {
- state.dygraph_user_action = true;
+ state.tmp.dygraph_user_action = true;
state.globalSelectionSyncStop();
state.globalSelectionSyncDelay();
state.setMode('zoom');
@@ -4620,12 +4876,12 @@ var NETDATA = window.NETDATA || {};
state.log('interactionModel.mouseup()');
if (context.isPanning) {
- state.dygraph_user_action = true;
+ state.tmp.dygraph_user_action = true;
state.globalSelectionSyncDelay();
Dygraph.endPan(event, dygraph, context);
}
else if (context.isZooming) {
- state.dygraph_user_action = true;
+ state.tmp.dygraph_user_action = true;
state.globalSelectionSyncDelay();
Dygraph.endZoom(event, dygraph, context);
}
@@ -4714,7 +4970,7 @@ var NETDATA = window.NETDATA || {};
}
if(event.altKey || event.shiftKey) {
- state.dygraph_user_action = true;
+ state.tmp.dygraph_user_action = true;
state.globalSelectionSyncStop();
state.globalSelectionSyncDelay();
@@ -4766,7 +5022,7 @@ var NETDATA = window.NETDATA || {};
if(NETDATA.options.debug.dygraph === true || state.debug === true)
state.log('interactionModel.touchstart()');
- state.dygraph_user_action = true;
+ state.tmp.dygraph_user_action = true;
state.setMode('zoom');
state.pauseChart();
@@ -4788,7 +5044,7 @@ var NETDATA = window.NETDATA || {};
if(NETDATA.options.debug.dygraph === true || state.debug === true)
state.log('interactionModel.touchmove()');
- state.dygraph_user_action = true;
+ state.tmp.dygraph_user_action = true;
Dygraph.defaultInteractionModel.touchmove(event, dygraph, context);
state.dygraph_last_touch_move = Date.now();
@@ -4797,7 +5053,7 @@ var NETDATA = window.NETDATA || {};
if(NETDATA.options.debug.dygraph === true || state.debug === true)
state.log('interactionModel.touchend()');
- state.dygraph_user_action = true;
+ state.tmp.dygraph_user_action = true;
Dygraph.defaultInteractionModel.touchend(event, dygraph, context);
// if it didn't move, it is a selection
@@ -4826,41 +5082,48 @@ var NETDATA = window.NETDATA || {};
};
if(NETDATA.chartLibraries.dygraph.isSparkline(state)) {
- state.dygraph_options.drawGrid = false;
- state.dygraph_options.drawAxis = false;
- state.dygraph_options.title = undefined;
- state.dygraph_options.ylabel = undefined;
- state.dygraph_options.yLabelWidth = 0;
- state.dygraph_options.labelsDivWidth = 120;
- state.dygraph_options.labelsDivStyles.width = '120px';
- state.dygraph_options.labelsSeparateLines = true;
- state.dygraph_options.rightGap = 0;
- state.dygraph_options.yRangePad = 1;
+ state.tmp.dygraph_options.drawGrid = false;
+ state.tmp.dygraph_options.drawAxis = false;
+ state.tmp.dygraph_options.title = undefined;
+ state.tmp.dygraph_options.ylabel = undefined;
+ state.tmp.dygraph_options.yLabelWidth = 0;
+ state.tmp.dygraph_options.labelsDivWidth = 120;
+ state.tmp.dygraph_options.labelsDivStyles.width = '120px';
+ state.tmp.dygraph_options.labelsSeparateLines = true;
+ state.tmp.dygraph_options.rightGap = 0;
+ state.tmp.dygraph_options.yRangePad = 1;
}
if(smooth === true) {
- state.dygraph_smooth_eligible = true;
+ state.tmp.dygraph_smooth_eligible = true;
if(NETDATA.options.current.smooth_plot === true)
- state.dygraph_options.plotter = smoothPlotter;
+ state.tmp.dygraph_options.plotter = smoothPlotter;
}
- else state.dygraph_smooth_eligible = false;
+ else state.tmp.dygraph_smooth_eligible = false;
- state.dygraph_instance = new Dygraph(state.element_chart,
- data.result.data, state.dygraph_options);
+ state.tmp.dygraph_instance = new Dygraph(state.element_chart,
+ data.result.data, state.tmp.dygraph_options);
- state.dygraph_force_zoom = false;
- state.dygraph_user_action = false;
- state.dygraph_last_rendered = Date.now();
+ state.tmp.dygraph_force_zoom = false;
+ state.tmp.dygraph_user_action = false;
+ state.tmp.dygraph_last_rendered = Date.now();
- if(typeof state.dygraph_instance.axes_[0].extremeRange !== 'undefined') {
- state.__commonMin = self.data('common-min') || null;
- state.__commonMax = self.data('common-max') || null;
+ if(state.tmp.dygraph_options.valueRange[0] === null && state.tmp.dygraph_options.valueRange[1] === null) {
+ if (typeof state.tmp.dygraph_instance.axes_[0].extremeRange !== 'undefined') {
+ state.tmp.__commonMin = NETDATA.dataAttribute(state.element, 'common-min', null);
+ state.tmp.__commonMax = NETDATA.dataAttribute(state.element, 'common-max', null);
+ }
+ else {
+ state.log('incompatible version of Dygraph detected');
+ state.tmp.__commonMin = null;
+ state.tmp.__commonMax = null;
+ }
}
else {
- state.log('incompatible version of Dygraph detected');
- state.__commonMin = null;
- state.__commonMax = null;
+ // if the user gave a valueRange, respect it
+ state.tmp.__commonMin = null;
+ state.tmp.__commonMax = null;
}
return true;
@@ -5289,7 +5552,7 @@ var NETDATA = window.NETDATA || {};
// ----------------------------------------------------------------------------------------------------------------
- NETDATA.easypiechartPercentFromValueMinMax = function(value, min, max) {
+ NETDATA.easypiechartPercentFromValueMinMax = function(state, value, min, max) {
if(typeof value !== 'number') value = 0;
if(typeof min !== 'number') min = 0;
if(typeof max !== 'number') max = 0;
@@ -5298,8 +5561,9 @@ var NETDATA = window.NETDATA || {};
if(max < value) max = value;
// make sure it is zero based
- if(min > 0) min = 0;
- if(max < 0) max = 0;
+ // but only they have not been set by the user
+ if(state.tmp.easyPieChartMin === null && min > 0) min = 0;
+ if(state.tmp.easyPieChartMax === null && max < 0) max = 0;
var pcent = 0;
if(value >= 0) {
@@ -5347,22 +5611,22 @@ var NETDATA = window.NETDATA || {};
};
NETDATA.easypiechartClearSelection = function(state) {
- if(typeof state.easyPieChartEvent !== 'undefined') {
- if(state.easyPieChartEvent.timer !== undefined) {
- clearTimeout(state.easyPieChartEvent.timer);
+ if(typeof state.tmp.easyPieChartEvent !== 'undefined') {
+ if(state.tmp.easyPieChartEvent.timer !== undefined) {
+ clearTimeout(state.tmp.easyPieChartEvent.timer);
}
- state.easyPieChartEvent.timer = undefined;
+ state.tmp.easyPieChartEvent.timer = undefined;
}
if(state.isAutoRefreshable() === true && state.data !== null) {
NETDATA.easypiechartChartUpdate(state, state.data);
}
else {
- state.easyPieChartLabel.innerText = state.legendFormatValue(null);
- state.easyPieChart_instance.update(0);
+ state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(null);
+ state.tmp.easyPieChart_instance.update(0);
}
- state.easyPieChart_instance.enableAnimation();
+ state.tmp.easyPieChart_instance.enableAnimation();
return true;
};
@@ -5375,8 +5639,8 @@ var NETDATA = window.NETDATA || {};
if(slot < 0 || slot >= state.data.result.length)
return NETDATA.easypiechartClearSelection(state);
- if(typeof state.easyPieChartEvent === 'undefined') {
- state.easyPieChartEvent = {
+ if(typeof state.tmp.easyPieChartEvent === 'undefined') {
+ state.tmp.easyPieChartEvent = {
timer: undefined,
value: 0,
pcent: 0
@@ -5384,20 +5648,20 @@ var NETDATA = window.NETDATA || {};
}
var value = state.data.result[state.data.result.length - 1 - slot];
- var min = (state.easyPieChartMin === null)?NETDATA.commonMin.get(state):state.easyPieChartMin;
- var max = (state.easyPieChartMax === null)?NETDATA.commonMax.get(state):state.easyPieChartMax;
- var pcent = NETDATA.easypiechartPercentFromValueMinMax(value, min, max);
+ var min = (state.tmp.easyPieChartMin === null)?NETDATA.commonMin.get(state):state.tmp.easyPieChartMin;
+ var max = (state.tmp.easyPieChartMax === null)?NETDATA.commonMax.get(state):state.tmp.easyPieChartMax;
+ var pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max);
- state.easyPieChartEvent.value = value;
- state.easyPieChartEvent.pcent = pcent;
- state.easyPieChartLabel.innerText = state.legendFormatValue(value);
+ state.tmp.easyPieChartEvent.value = value;
+ state.tmp.easyPieChartEvent.pcent = pcent;
+ state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value);
- if(state.easyPieChartEvent.timer === undefined) {
- state.easyPieChart_instance.disableAnimation();
+ if(state.tmp.easyPieChartEvent.timer === undefined) {
+ state.tmp.easyPieChart_instance.disableAnimation();
- state.easyPieChartEvent.timer = setTimeout(function() {
- state.easyPieChartEvent.timer = undefined;
- state.easyPieChart_instance.update(state.easyPieChartEvent.pcent);
+ state.tmp.easyPieChartEvent.timer = setTimeout(function() {
+ state.tmp.easyPieChartEvent.timer = undefined;
+ state.tmp.easyPieChart_instance.update(state.tmp.easyPieChartEvent.pcent);
}, NETDATA.options.current.charts_selection_animation_delay);
}
@@ -5413,88 +5677,76 @@ var NETDATA = window.NETDATA || {};
}
else {
value = data.result[0];
- min = (state.easyPieChartMin === null)?NETDATA.commonMin.get(state):state.easyPieChartMin;
- max = (state.easyPieChartMax === null)?NETDATA.commonMax.get(state):state.easyPieChartMax;
- pcent = NETDATA.easypiechartPercentFromValueMinMax(value, min, max);
+ min = (state.tmp.easyPieChartMin === null)?NETDATA.commonMin.get(state):state.tmp.easyPieChartMin;
+ max = (state.tmp.easyPieChartMax === null)?NETDATA.commonMax.get(state):state.tmp.easyPieChartMax;
+ pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max);
}
- state.easyPieChartLabel.innerText = state.legendFormatValue(value);
- state.easyPieChart_instance.update(pcent);
+ state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value);
+ state.tmp.easyPieChart_instance.update(pcent);
return true;
};
NETDATA.easypiechartChartCreate = function(state, data) {
- var self = $(state.element);
var chart = $(state.element_chart);
var value = data.result[0];
- var min = self.data('easypiechart-min-value') || null;
- var max = self.data('easypiechart-max-value') || null;
- var adjust = self.data('easypiechart-adjust') || null;
+ var min = NETDATA.dataAttribute(state.element, 'easypiechart-min-value', null);
+ var max = NETDATA.dataAttribute(state.element, 'easypiechart-max-value', null);
if(min === null) {
min = NETDATA.commonMin.get(state);
- state.easyPieChartMin = null;
+ state.tmp.easyPieChartMin = null;
}
else
- state.easyPieChartMin = min;
+ state.tmp.easyPieChartMin = min;
if(max === null) {
max = NETDATA.commonMax.get(state);
- state.easyPieChartMax = null;
+ state.tmp.easyPieChartMax = null;
}
else
- state.easyPieChartMax = max;
+ state.tmp.easyPieChartMax = max;
- var pcent = NETDATA.easypiechartPercentFromValueMinMax(value, min, max);
+ var pcent = NETDATA.easypiechartPercentFromValueMinMax(state, value, min, max);
chart.data('data-percent', pcent);
- var size;
- switch(adjust) {
- case 'width': size = state.chartHeight(); break;
- case 'min': size = Math.min(state.chartWidth(), state.chartHeight()); break;
- case 'max': size = Math.max(state.chartWidth(), state.chartHeight()); break;
- case 'height':
- default: size = state.chartWidth(); break;
- }
- state.element.style.width = size + 'px';
- state.element.style.height = size + 'px';
-
+ var size = state.chartWidth();
var stroke = Math.floor(size / 22);
if(stroke < 3) stroke = 2;
var valuefontsize = Math.floor((size * 2 / 3) / 5);
var valuetop = Math.round((size - valuefontsize - (size / 40)) / 2);
- state.easyPieChartLabel = document.createElement('span');
- state.easyPieChartLabel.className = 'easyPieChartLabel';
- state.easyPieChartLabel.innerText = state.legendFormatValue(value);
- state.easyPieChartLabel.style.fontSize = valuefontsize + 'px';
- state.easyPieChartLabel.style.top = valuetop.toString() + 'px';
- state.element_chart.appendChild(state.easyPieChartLabel);
+ state.tmp.easyPieChartLabel = document.createElement('span');
+ state.tmp.easyPieChartLabel.className = 'easyPieChartLabel';
+ state.tmp.easyPieChartLabel.innerText = state.legendFormatValue(value);
+ state.tmp.easyPieChartLabel.style.fontSize = valuefontsize + 'px';
+ state.tmp.easyPieChartLabel.style.top = valuetop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.easyPieChartLabel);
var titlefontsize = Math.round(valuefontsize * 1.6 / 3);
var titletop = Math.round(valuetop - (titlefontsize * 2) - (size / 40));
- state.easyPieChartTitle = document.createElement('span');
- state.easyPieChartTitle.className = 'easyPieChartTitle';
- state.easyPieChartTitle.innerText = state.title;
- state.easyPieChartTitle.style.fontSize = titlefontsize + 'px';
- state.easyPieChartTitle.style.lineHeight = titlefontsize + 'px';
- state.easyPieChartTitle.style.top = titletop.toString() + 'px';
- state.element_chart.appendChild(state.easyPieChartTitle);
+ state.tmp.easyPieChartTitle = document.createElement('span');
+ state.tmp.easyPieChartTitle.className = 'easyPieChartTitle';
+ state.tmp.easyPieChartTitle.innerText = state.title;
+ state.tmp.easyPieChartTitle.style.fontSize = titlefontsize + 'px';
+ state.tmp.easyPieChartTitle.style.lineHeight = titlefontsize + 'px';
+ state.tmp.easyPieChartTitle.style.top = titletop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.easyPieChartTitle);
var unitfontsize = Math.round(titlefontsize * 0.9);
var unittop = Math.round(valuetop + (valuefontsize + unitfontsize) + (size / 40));
- state.easyPieChartUnits = document.createElement('span');
- state.easyPieChartUnits.className = 'easyPieChartUnits';
- state.easyPieChartUnits.innerText = state.units;
- state.easyPieChartUnits.style.fontSize = unitfontsize + 'px';
- state.easyPieChartUnits.style.top = unittop.toString() + 'px';
- state.element_chart.appendChild(state.easyPieChartUnits);
-
- var barColor = self.data('easypiechart-barcolor');
+ state.tmp.easyPieChartUnits = document.createElement('span');
+ state.tmp.easyPieChartUnits.className = 'easyPieChartUnits';
+ state.tmp.easyPieChartUnits.innerText = state.units;
+ state.tmp.easyPieChartUnits.style.fontSize = unitfontsize + 'px';
+ state.tmp.easyPieChartUnits.style.top = unittop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.easyPieChartUnits);
+
+ var barColor = NETDATA.dataAttribute(state.element, 'easypiechart-barcolor', undefined);
if(typeof barColor === 'undefined' || barColor === null)
- barColor = state.chartColors()[0];
+ barColor = state.chartCustomColors()[0];
else {
// <div ... data-easypiechart-barcolor="(function(percent){return(percent < 50 ? '#5cb85c' : percent < 85 ? '#f0ad4e' : '#cb3935');})" ...></div>
var tmp = eval(barColor);
@@ -5504,28 +5756,28 @@ var NETDATA = window.NETDATA || {};
chart.easyPieChart({
barColor: barColor,
- trackColor: self.data('easypiechart-trackcolor') || NETDATA.themes.current.easypiechart_track,
- scaleColor: self.data('easypiechart-scalecolor') || NETDATA.themes.current.easypiechart_scale,
- scaleLength: self.data('easypiechart-scalelength') || 5,
- lineCap: self.data('easypiechart-linecap') || 'round',
- lineWidth: self.data('easypiechart-linewidth') || stroke,
- trackWidth: self.data('easypiechart-trackwidth') || undefined,
- size: self.data('easypiechart-size') || size,
- rotate: self.data('easypiechart-rotate') || 0,
- animate: self.data('easypiechart-animate') || {duration: 500, enabled: true},
- easing: self.data('easypiechart-easing') || undefined
+ trackColor: NETDATA.dataAttribute(state.element, 'easypiechart-trackcolor', NETDATA.themes.current.easypiechart_track),
+ scaleColor: NETDATA.dataAttribute(state.element, 'easypiechart-scalecolor', NETDATA.themes.current.easypiechart_scale),
+ scaleLength: NETDATA.dataAttribute(state.element, 'easypiechart-scalelength', 5),
+ lineCap: NETDATA.dataAttribute(state.element, 'easypiechart-linecap', 'round'),
+ lineWidth: NETDATA.dataAttribute(state.element, 'easypiechart-linewidth', stroke),
+ trackWidth: NETDATA.dataAttribute(state.element, 'easypiechart-trackwidth', undefined),
+ size: NETDATA.dataAttribute(state.element, 'easypiechart-size', size),
+ rotate: NETDATA.dataAttribute(state.element, 'easypiechart-rotate', 0),
+ animate: NETDATA.dataAttribute(state.element, 'easypiechart-animate', {duration: 500, enabled: true}),
+ easing: NETDATA.dataAttribute(state.element, 'easypiechart-easing', undefined)
});
// when we just re-create the chart
// do not animate the first update
var animate = true;
- if(typeof state.easyPieChart_instance !== 'undefined')
+ if(typeof state.tmp.easyPieChart_instance !== 'undefined')
animate = false;
- state.easyPieChart_instance = chart.data('easyPieChart');
- if(animate === false) state.easyPieChart_instance.disableAnimation();
- state.easyPieChart_instance.update(pcent);
- if(animate === false) state.easyPieChart_instance.enableAnimation();
+ state.tmp.easyPieChart_instance = chart.data('easyPieChart');
+ if(animate === false) state.tmp.easyPieChart_instance.disableAnimation();
+ state.tmp.easyPieChart_instance.update(pcent);
+ if(animate === false) state.tmp.easyPieChart_instance.enableAnimation();
return true;
};
@@ -5568,8 +5820,8 @@ var NETDATA = window.NETDATA || {};
speed = status;
// console.log('gauge speed ' + speed);
- state.gauge_instance.animationSpeed = speed;
- state.___gaugeOld__.speed = speed;
+ state.tmp.gauge_instance.animationSpeed = speed;
+ state.tmp.___gaugeOld__.speed = speed;
};
NETDATA.gaugeSet = function(state, value, min, max) {
@@ -5601,36 +5853,36 @@ var NETDATA = window.NETDATA || {};
if(pcent < 0.001) pcent = 0.001;
if(pcent > 99.999) pcent = 99.999;
- state.gauge_instance.set(pcent);
+ state.tmp.gauge_instance.set(pcent);
// console.log('gauge set ' + pcent + ', value ' + value + ', min ' + min + ', max ' + max);
- state.___gaugeOld__.value = value;
- state.___gaugeOld__.min = min;
- state.___gaugeOld__.max = max;
+ state.tmp.___gaugeOld__.value = value;
+ state.tmp.___gaugeOld__.min = min;
+ state.tmp.___gaugeOld__.max = max;
};
NETDATA.gaugeSetLabels = function(state, value, min, max) {
- if(state.___gaugeOld__.valueLabel !== value) {
- state.___gaugeOld__.valueLabel = value;
- state.gaugeChartLabel.innerText = state.legendFormatValue(value);
+ if(state.tmp.___gaugeOld__.valueLabel !== value) {
+ state.tmp.___gaugeOld__.valueLabel = value;
+ state.tmp.gaugeChartLabel.innerText = state.legendFormatValue(value);
}
- if(state.___gaugeOld__.minLabel !== min) {
- state.___gaugeOld__.minLabel = min;
- state.gaugeChartMin.innerText = state.legendFormatValue(min);
+ if(state.tmp.___gaugeOld__.minLabel !== min) {
+ state.tmp.___gaugeOld__.minLabel = min;
+ state.tmp.gaugeChartMin.innerText = state.legendFormatValue(min);
}
- if(state.___gaugeOld__.maxLabel !== max) {
- state.___gaugeOld__.maxLabel = max;
- state.gaugeChartMax.innerText = state.legendFormatValue(max);
+ if(state.tmp.___gaugeOld__.maxLabel !== max) {
+ state.tmp.___gaugeOld__.maxLabel = max;
+ state.tmp.gaugeChartMax.innerText = state.legendFormatValue(max);
}
};
NETDATA.gaugeClearSelection = function(state) {
- if(typeof state.gaugeEvent !== 'undefined') {
- if(state.gaugeEvent.timer !== undefined) {
- clearTimeout(state.gaugeEvent.timer);
+ if(typeof state.tmp.gaugeEvent !== 'undefined') {
+ if(state.tmp.gaugeEvent.timer !== undefined) {
+ clearTimeout(state.tmp.gaugeEvent.timer);
}
- state.gaugeEvent.timer = undefined;
+ state.tmp.gaugeEvent.timer = undefined;
}
if(state.isAutoRefreshable() === true && state.data !== null) {
@@ -5654,8 +5906,8 @@ var NETDATA = window.NETDATA || {};
if(slot < 0 || slot >= state.data.result.length)
return NETDATA.gaugeClearSelection(state);
- if(typeof state.gaugeEvent === 'undefined') {
- state.gaugeEvent = {
+ if(typeof state.tmp.gaugeEvent === 'undefined') {
+ state.tmp.gaugeEvent = {
timer: undefined,
value: 0,
min: 0,
@@ -5664,24 +5916,25 @@ var NETDATA = window.NETDATA || {};
}
var value = state.data.result[state.data.result.length - 1 - slot];
- var min = (state.gaugeMin === null)?NETDATA.commonMin.get(state):state.gaugeMin;
- var max = (state.gaugeMax === null)?NETDATA.commonMax.get(state):state.gaugeMax;
+ var min = (state.tmp.gaugeMin === null)?NETDATA.commonMin.get(state):state.tmp.gaugeMin;
+ var max = (state.tmp.gaugeMax === null)?NETDATA.commonMax.get(state):state.tmp.gaugeMax;
// make sure it is zero based
- if(min > 0) min = 0;
- if(max < 0) max = 0;
+ // but only if it has not been set by the user
+ if(state.tmp.gaugeMin === null && min > 0) min = 0;
+ if(state.tmp.gaugeMax === null && max < 0) max = 0;
- state.gaugeEvent.value = value;
- state.gaugeEvent.min = min;
- state.gaugeEvent.max = max;
+ state.tmp.gaugeEvent.value = value;
+ state.tmp.gaugeEvent.min = min;
+ state.tmp.gaugeEvent.max = max;
NETDATA.gaugeSetLabels(state, value, min, max);
- if(state.gaugeEvent.timer === undefined) {
+ if(state.tmp.gaugeEvent.timer === undefined) {
NETDATA.gaugeAnimation(state, false);
- state.gaugeEvent.timer = setTimeout(function() {
- state.gaugeEvent.timer = undefined;
- NETDATA.gaugeSet(state, state.gaugeEvent.value, state.gaugeEvent.min, state.gaugeEvent.max);
+ state.tmp.gaugeEvent.timer = setTimeout(function() {
+ state.tmp.gaugeEvent.timer = undefined;
+ NETDATA.gaugeSet(state, state.tmp.gaugeEvent.value, state.tmp.gaugeEvent.min, state.tmp.gaugeEvent.max);
}, NETDATA.options.current.charts_selection_animation_delay);
}
@@ -5699,14 +5952,15 @@ var NETDATA = window.NETDATA || {};
}
else {
value = data.result[0];
- min = (state.gaugeMin === null)?NETDATA.commonMin.get(state):state.gaugeMin;
- max = (state.gaugeMax === null)?NETDATA.commonMax.get(state):state.gaugeMax;
+ min = (state.tmp.gaugeMin === null)?NETDATA.commonMin.get(state):state.tmp.gaugeMin;
+ max = (state.tmp.gaugeMax === null)?NETDATA.commonMax.get(state):state.tmp.gaugeMax;
if(value < min) min = value;
if(value > max) max = value;
// make sure it is zero based
- if(min > 0) min = 0;
- if(max < 0) max = 0;
+ // but only if it has not been set by the user
+ if(state.tmp.gaugeMin === null && min > 0) min = 0;
+ if(state.tmp.gaugeMax === null && max < 0) max = 0;
NETDATA.gaugeSetLabels(state, value, min, max);
}
@@ -5716,38 +5970,39 @@ var NETDATA = window.NETDATA || {};
};
NETDATA.gaugeChartCreate = function(state, data) {
- var self = $(state.element);
// var chart = $(state.element_chart);
var value = data.result[0];
- var min = self.data('gauge-min-value') || null;
- var max = self.data('gauge-max-value') || null;
- var adjust = self.data('gauge-adjust') || null;
- var pointerColor = self.data('gauge-pointer-color') || NETDATA.themes.current.gauge_pointer;
- var strokeColor = self.data('gauge-stroke-color') || NETDATA.themes.current.gauge_stroke;
- var startColor = self.data('gauge-start-color') || state.chartColors()[0];
- var stopColor = self.data('gauge-stop-color') || void 0;
- var generateGradient = self.data('gauge-generate-gradient') || false;
+ var min = NETDATA.dataAttribute(state.element, 'gauge-min-value', null);
+ var max = NETDATA.dataAttribute(state.element, 'gauge-max-value', null);
+ // var adjust = NETDATA.dataAttribute(state.element, 'gauge-adjust', null);
+ var pointerColor = NETDATA.dataAttribute(state.element, 'gauge-pointer-color', NETDATA.themes.current.gauge_pointer);
+ var strokeColor = NETDATA.dataAttribute(state.element, 'gauge-stroke-color', NETDATA.themes.current.gauge_stroke);
+ var startColor = NETDATA.dataAttribute(state.element, 'gauge-start-color', state.chartCustomColors()[0]);
+ var stopColor = NETDATA.dataAttribute(state.element, 'gauge-stop-color', void 0);
+ var generateGradient = NETDATA.dataAttributeBoolean(state.element, 'gauge-generate-gradient', false);
if(min === null) {
min = NETDATA.commonMin.get(state);
- state.gaugeMin = null;
+ state.tmp.gaugeMin = null;
}
else
- state.gaugeMin = min;
+ state.tmp.gaugeMin = min;
if(max === null) {
max = NETDATA.commonMax.get(state);
- state.gaugeMax = null;
+ state.tmp.gaugeMax = null;
}
else
- state.gaugeMax = max;
+ state.tmp.gaugeMax = max;
// make sure it is zero based
- if(min > 0) min = 0;
- if(max < 0) max = 0;
+ // but only if it has not been set by the user
+ if(state.tmp.gaugeMin === null && min > 0) min = 0;
+ if(state.tmp.gaugeMax === null && max < 0) max = 0;
var width = state.chartWidth(), height = state.chartHeight(); //, ratio = 1.5;
+ // console.log('gauge width: ' + width.toString() + ', height: ' + height.toString());
//switch(adjust) {
// case 'width': width = height * ratio; break;
// case 'height':
@@ -5760,12 +6015,12 @@ var NETDATA = window.NETDATA || {};
var options = {
lines: 12, // The number of lines to draw
- angle: 0.15, // The span of the gauge arc
- lineWidth: 0.50, // The line thickness
- radiusScale: 0.85, // Relative radius
+ angle: 0.14, // The span of the gauge arc
+ lineWidth: 0.57, // The line thickness
+ radiusScale: 1.0, // Relative radius
pointer: {
- length: 0.8, // 0.9 The radius of the inner circle
- strokeWidth: 0.035, // The rotation offset
+ length: 0.85, // 0.9 The radius of the inner circle
+ strokeWidth: 0.045, // The rotation offset
color: pointerColor // Fill color
},
limitMax: true, // If false, the max value of the gauge will be updated if value surpass max
@@ -5789,7 +6044,7 @@ var NETDATA = window.NETDATA || {};
var len = generateGradient.length;
while(len--) {
var pcent = generateGradient[len];
- var color = self.attr('data-gauge-gradient-percent-color-' + pcent.toString()) || false;
+ var color = NETDATA.dataAttribute(state.element, 'gauge-gradient-percent-color-' + pcent.toString(), false);
if(color !== false) {
var a = [];
a[0] = pcent / 100;
@@ -5816,57 +6071,57 @@ var NETDATA = window.NETDATA || {};
[1.0, NETDATA.colorLuminance(startColor, 0.0)]];
}
- state.gauge_canvas = document.createElement('canvas');
- state.gauge_canvas.id = 'gauge-' + state.uuid + '-canvas';
- state.gauge_canvas.className = 'gaugeChart';
- state.gauge_canvas.width = width;
- state.gauge_canvas.height = height;
- state.element_chart.appendChild(state.gauge_canvas);
+ state.tmp.gauge_canvas = document.createElement('canvas');
+ state.tmp.gauge_canvas.id = 'gauge-' + state.uuid + '-canvas';
+ state.tmp.gauge_canvas.className = 'gaugeChart';
+ state.tmp.gauge_canvas.width = width;
+ state.tmp.gauge_canvas.height = height;
+ state.element_chart.appendChild(state.tmp.gauge_canvas);
- var valuefontsize = Math.floor(height / 6);
- var valuetop = Math.round((height - valuefontsize - (height / 6)) / 2);
- state.gaugeChartLabel = document.createElement('span');
- state.gaugeChartLabel.className = 'gaugeChartLabel';
- state.gaugeChartLabel.style.fontSize = valuefontsize + 'px';
- state.gaugeChartLabel.style.top = valuetop.toString() + 'px';
- state.element_chart.appendChild(state.gaugeChartLabel);
+ var valuefontsize = Math.floor(height / 5);
+ var valuetop = Math.round((height - valuefontsize) / 3.2);
+ state.tmp.gaugeChartLabel = document.createElement('span');
+ state.tmp.gaugeChartLabel.className = 'gaugeChartLabel';
+ state.tmp.gaugeChartLabel.style.fontSize = valuefontsize + 'px';
+ state.tmp.gaugeChartLabel.style.top = valuetop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartLabel);
- var titlefontsize = Math.round(valuefontsize / 2);
+ var titlefontsize = Math.round(valuefontsize / 2.1);
var titletop = 0;
- state.gaugeChartTitle = document.createElement('span');
- state.gaugeChartTitle.className = 'gaugeChartTitle';
- state.gaugeChartTitle.innerText = state.title;
- state.gaugeChartTitle.style.fontSize = titlefontsize + 'px';
- state.gaugeChartTitle.style.lineHeight = titlefontsize + 'px';
- state.gaugeChartTitle.style.top = titletop.toString() + 'px';
- state.element_chart.appendChild(state.gaugeChartTitle);
+ state.tmp.gaugeChartTitle = document.createElement('span');
+ state.tmp.gaugeChartTitle.className = 'gaugeChartTitle';
+ state.tmp.gaugeChartTitle.innerText = state.title;
+ state.tmp.gaugeChartTitle.style.fontSize = titlefontsize + 'px';
+ state.tmp.gaugeChartTitle.style.lineHeight = titlefontsize + 'px';
+ state.tmp.gaugeChartTitle.style.top = titletop.toString() + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartTitle);
var unitfontsize = Math.round(titlefontsize * 0.9);
- state.gaugeChartUnits = document.createElement('span');
- state.gaugeChartUnits.className = 'gaugeChartUnits';
- state.gaugeChartUnits.innerText = state.units;
- state.gaugeChartUnits.style.fontSize = unitfontsize + 'px';
- state.element_chart.appendChild(state.gaugeChartUnits);
-
- state.gaugeChartMin = document.createElement('span');
- state.gaugeChartMin.className = 'gaugeChartMin';
- state.gaugeChartMin.style.fontSize = Math.round(valuefontsize * 0.75).toString() + 'px';
- state.element_chart.appendChild(state.gaugeChartMin);
-
- state.gaugeChartMax = document.createElement('span');
- state.gaugeChartMax.className = 'gaugeChartMax';
- state.gaugeChartMax.style.fontSize = Math.round(valuefontsize * 0.75).toString() + 'px';
- state.element_chart.appendChild(state.gaugeChartMax);
+ state.tmp.gaugeChartUnits = document.createElement('span');
+ state.tmp.gaugeChartUnits.className = 'gaugeChartUnits';
+ state.tmp.gaugeChartUnits.innerText = state.units;
+ state.tmp.gaugeChartUnits.style.fontSize = unitfontsize + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartUnits);
+
+ state.tmp.gaugeChartMin = document.createElement('span');
+ state.tmp.gaugeChartMin.className = 'gaugeChartMin';
+ state.tmp.gaugeChartMin.style.fontSize = Math.round(valuefontsize * 0.75).toString() + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartMin);
+
+ state.tmp.gaugeChartMax = document.createElement('span');
+ state.tmp.gaugeChartMax.className = 'gaugeChartMax';
+ state.tmp.gaugeChartMax.style.fontSize = Math.round(valuefontsize * 0.75).toString() + 'px';
+ state.element_chart.appendChild(state.tmp.gaugeChartMax);
// when we just re-create the chart
// do not animate the first update
var animate = true;
- if(typeof state.gauge_instance !== 'undefined')
+ if(typeof state.tmp.gauge_instance !== 'undefined')
animate = false;
- state.gauge_instance = new Gauge(state.gauge_canvas).setOptions(options); // create sexy gauge!
+ state.tmp.gauge_instance = new Gauge(state.tmp.gauge_canvas).setOptions(options); // create sexy gauge!
- state.___gaugeOld__ = {
+ state.tmp.___gaugeOld__ = {
value: value,
min: min,
max: max,
@@ -5876,8 +6131,8 @@ var NETDATA = window.NETDATA || {};
};
// we will always feed a percentage
- state.gauge_instance.minValue = 0;
- state.gauge_instance.maxValue = 100;
+ state.tmp.gauge_instance.minValue = 0;
+ state.tmp.gauge_instance.maxValue = 100;
NETDATA.gaugeAnimation(state, animate);
NETDATA.gaugeSet(state, value, min, max);
@@ -5895,8 +6150,8 @@ var NETDATA = window.NETDATA || {};
create: NETDATA.dygraphChartCreate,
update: NETDATA.dygraphChartUpdate,
resize: function(state) {
- if(typeof state.dygraph_instance.resize === 'function')
- state.dygraph_instance.resize();
+ if(typeof state.tmp.dygraph_instance.resize === 'function')
+ state.tmp.dygraph_instance.resize();
},
setSelection: NETDATA.dygraphSetSelection,
clearSelection: NETDATA.dygraphClearSelection,
@@ -5915,11 +6170,16 @@ var NETDATA = window.NETDATA || {};
return (this.isSparkline(state) === false)?3:2;
},
isSparkline: function(state) {
- if(typeof state.dygraph_sparkline === 'undefined') {
- var t = $(state.element).data('dygraph-theme');
- state.dygraph_sparkline = (t === 'sparkline');
+ if(typeof state.tmp.dygraph_sparkline === 'undefined') {
+ var t = NETDATA.dataAttribute(state.element, 'dygraph-theme', undefined);
+ state.tmp.dygraph_sparkline = (t === 'sparkline');
}
- return state.dygraph_sparkline;
+ return state.tmp.dygraph_sparkline;
+ },
+ container_class: function(state) {
+ if(this.legend(state) !== null)
+ return 'netdata-container-with-legend';
+ return 'netdata-container';
}
},
"sparkline": {
@@ -5938,7 +6198,8 @@ var NETDATA = window.NETDATA || {};
autoresize: function(state) { void(state); return false; },
max_updates_to_recreate: function(state) { void(state); return 5000; },
track_colors: function(state) { void(state); return false; },
- pixels_per_point: function(state) { void(state); return 3; }
+ pixels_per_point: function(state) { void(state); return 3; },
+ container_class: function(state) { void(state); return 'netdata-container'; }
},
"peity": {
initialize: NETDATA.peityInitialize,
@@ -5956,7 +6217,8 @@ var NETDATA = window.NETDATA || {};
autoresize: function(state) { void(state); return false; },
max_updates_to_recreate: function(state) { void(state); return 5000; },
track_colors: function(state) { void(state); return false; },
- pixels_per_point: function(state) { void(state); return 3; }
+ pixels_per_point: function(state) { void(state); return 3; },
+ container_class: function(state) { void(state); return 'netdata-container'; }
},
"morris": {
initialize: NETDATA.morrisInitialize,
@@ -5974,7 +6236,8 @@ var NETDATA = window.NETDATA || {};
autoresize: function(state) { void(state); return false; },
max_updates_to_recreate: function(state) { void(state); return 50; },
track_colors: function(state) { void(state); return false; },
- pixels_per_point: function(state) { void(state); return 15; }
+ pixels_per_point: function(state) { void(state); return 15; },
+ container_class: function(state) { void(state); return 'netdata-container'; }
},
"google": {
initialize: NETDATA.googleInitialize,
@@ -5992,7 +6255,8 @@ var NETDATA = window.NETDATA || {};
autoresize: function(state) { void(state); return false; },
max_updates_to_recreate: function(state) { void(state); return 300; },
track_colors: function(state) { void(state); return false; },
- pixels_per_point: function(state) { void(state); return 4; }
+ pixels_per_point: function(state) { void(state); return 4; },
+ container_class: function(state) { void(state); return 'netdata-container'; }
},
"raphael": {
initialize: NETDATA.raphaelInitialize,
@@ -6010,7 +6274,8 @@ var NETDATA = window.NETDATA || {};
autoresize: function(state) { void(state); return false; },
max_updates_to_recreate: function(state) { void(state); return 5000; },
track_colors: function(state) { void(state); return false; },
- pixels_per_point: function(state) { void(state); return 3; }
+ pixels_per_point: function(state) { void(state); return 3; },
+ container_class: function(state) { void(state); return 'netdata-container'; }
},
"c3": {
initialize: NETDATA.c3Initialize,
@@ -6028,7 +6293,8 @@ var NETDATA = window.NETDATA || {};
autoresize: function(state) { void(state); return false; },
max_updates_to_recreate: function(state) { void(state); return 5000; },
track_colors: function(state) { void(state); return false; },
- pixels_per_point: function(state) { void(state); return 15; }
+ pixels_per_point: function(state) { void(state); return 15; },
+ container_class: function(state) { void(state); return 'netdata-container'; }
},
"d3": {
initialize: NETDATA.d3Initialize,
@@ -6046,7 +6312,8 @@ var NETDATA = window.NETDATA || {};
autoresize: function(state) { void(state); return false; },
max_updates_to_recreate: function(state) { void(state); return 5000; },
track_colors: function(state) { void(state); return false; },
- pixels_per_point: function(state) { void(state); return 3; }
+ pixels_per_point: function(state) { void(state); return 3; },
+ container_class: function(state) { void(state); return 'netdata-container'; }
},
"easypiechart": {
initialize: NETDATA.easypiechartInitialize,
@@ -6065,7 +6332,8 @@ var NETDATA = window.NETDATA || {};
max_updates_to_recreate: function(state) { void(state); return 5000; },
track_colors: function(state) { void(state); return true; },
pixels_per_point: function(state) { void(state); return 3; },
- aspect_ratio: 100
+ aspect_ratio: 100,
+ container_class: function(state) { void(state); return 'netdata-container-easypiechart'; }
},
"gauge": {
initialize: NETDATA.gaugeInitialize,
@@ -6084,7 +6352,8 @@ var NETDATA = window.NETDATA || {};
max_updates_to_recreate: function(state) { void(state); return 5000; },
track_colors: function(state) { void(state); return true; },
pixels_per_point: function(state) { void(state); return 3; },
- aspect_ratio: 70
+ aspect_ratio: 60,
+ container_class: function(state) { void(state); return 'netdata-container-gauge'; }
}
};
@@ -6203,7 +6472,7 @@ var NETDATA = window.NETDATA || {};
NETDATA.alarms = {
onclick: null, // the callback to handle the click - it will be called with the alarm log entry
- chart_div_offset: 100, // give that space above the chart when scrolling to it
+ chart_div_offset: -50, // give that space above the chart when scrolling to it
chart_div_id_prefix: 'chart_', // the chart DIV IDs have this prefix (they should be NETDATA.name2id(chart.id))
chart_div_animation_duration: 0,// the duration of the animation while scrolling to a chart
@@ -6349,7 +6618,7 @@ var NETDATA = window.NETDATA || {};
if(typeof chart_id === 'string') {
var offset = $('#' + NETDATA.alarms.chart_div_id_prefix + NETDATA.name2id(chart_id)).offset();
if(typeof offset !== 'undefined') {
- $('html, body').animate({ scrollTop: offset.top - NETDATA.alarms.chart_div_offset }, NETDATA.alarms.chart_div_animation_duration);
+ $('html, body').animate({ scrollTop: offset.top + NETDATA.alarms.chart_div_offset }, NETDATA.alarms.chart_div_animation_duration);
return true;
}
}
diff --git a/web/dashboard.slate.css b/web/dashboard.slate.css
index 36ea6dc6..f12a6aab 100644
--- a/web/dashboard.slate.css
+++ b/web/dashboard.slate.css
@@ -63,6 +63,38 @@ code {
/* width and height is given per chart with data-width and data-height */
}
+.netdata-container-gauge {
+ display: inline-block;
+ overflow: hidden;
+
+ /* required for child elements to have absolute position */
+ position: relative;
+
+ /* width and height is given per chart with data-width and data-height */
+}
+
+.netdata-container-gauge:after {
+ padding-top: 60%;
+ display: block;
+ content: '';
+}
+
+.netdata-container-easypiechart {
+ display: inline-block;
+ overflow: hidden;
+
+ /* required for child elements to have absolute position */
+ position: relative;
+
+ /* width and height is given per chart with data-width and data-height */
+}
+
+.netdata-container-easypiechart:after {
+ padding-top: 100%;
+ display: block;
+ content: '';
+}
+
.netdata-aspect {
position: relative;
width: 100%;
@@ -142,12 +174,15 @@ code {
.netdata-message {
display: inline-block;
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
text-align: left;
vertical-align: top;
font-weight: bold;
font-size: x-small;
- width: 100%;
- height: 100%;
overflow: hidden;
background: inherit;
z-index: 0;
@@ -417,7 +452,7 @@ code {
margin-left: 18%;
text-align: center;
color: #676b70;
- font-weight: normal;
+ font-weight: bold;
}
.easyPieChartUnits {
@@ -441,6 +476,8 @@ code {
position: absolute;
top: 0;
left: 0;
+ bottom: 0;
+ right: 0;
z-index: 0;
}
@@ -489,7 +526,7 @@ code {
position: absolute;
float: left;
left: 0;
- bottom: 10%;
+ bottom: 8%;
width: 92%;
margin-left: 8%;
text-align: left;
@@ -502,7 +539,7 @@ code {
position: absolute;
float: left;
left: 0;
- bottom: 10%;
+ bottom: 8%;
width: 95%;
margin-right: 5%;
text-align: right;
diff --git a/web/dashboard_info.js b/web/dashboard_info.js
index c348da30..91e007a1 100644
--- a/web/dashboard_info.js
+++ b/web/dashboard_info.js
@@ -61,6 +61,12 @@ netdataDashboard.menu = {
info: 'Performance metrics of the netfilter components.'
},
+ 'ipfw': {
+ title: 'Firewall (ipfw)',
+ icon: '<i class="fa fa-shield" aria-hidden="true"></i>',
+ info: 'Counters and memory usage for the ipfw rules.'
+ },
+
'cpu': {
title: 'CPUs',
icon: '<i class="fa fa-bolt" aria-hidden="true"></i>',
@@ -91,6 +97,12 @@ netdataDashboard.menu = {
info: 'The Intelligent Platform Management Interface (IPMI) is a set of computer interface specifications for an autonomous computer subsystem that provides management and monitoring capabilities independently of the host system\'s CPU, firmware (BIOS or UEFI) and operating system.'
},
+ 'samba': {
+ title: 'Samba',
+ icon: "<i class=\"fa fa-folder-open\" aria-hidden=\"true\"></i>",
+ info: 'Performance metrics of the Samba file share operations of this system. Samba is a implementation of Windows services, including Windows SMB protocol file shares.'
+ },
+
'nfsd': {
title: 'NFS Server',
icon: '<i class="fa fa-folder-open" aria-hidden="true"></i>',
@@ -103,6 +115,12 @@ netdataDashboard.menu = {
info: 'Performance metrics of the NFS operations of this system, acting as an NFS client.'
},
+ 'zfs': {
+ title: 'ZFS filesystem',
+ icon: '<i class="fa fa-folder-open" aria-hidden="true"></i>',
+ info: 'Performance metrics of the ZFS filesystem. The following charts visualize all metrics reported by <a href="https://github.com/zfsonlinux/zfs/blob/master/cmd/arcstat/arcstat.py" target="_blank">arcstat.py</a> and <a href="https://github.com/zfsonlinux/zfs/blob/master/cmd/arc_summary/arc_summary.py" target="_blank">arc_summary.py</a>.'
+ },
+
'apps': {
title: 'Applications',
icon: '<i class="fa fa-heartbeat" aria-hidden="true"></i>',
@@ -225,10 +243,16 @@ netdataDashboard.menu = {
info: undefined
},
+ 'lighttpd': {
+ title: 'Lighttpd',
+ icon: '<i class="fa fa-eye" aria-hidden="true"></i>',
+ info: undefined
+ },
+
'web_log': {
title: undefined,
icon: '<i class="fa fa-file-text-o" aria-hidden="true"></i>',
- info: 'Information extracted from a web server log file. <code>web_log</code> plugin incrementally parses the web server log file to provide, in real-time, a break down of key web server performance metrics. An extended log file format may optionally be used (for <code>nginx</code> and <code>apache</code>) offering timing information and bandwidth for both requests and responses. <code>web_log</code> plugin may also be configured to provide a break down of requests per URL pattern (check <a href="https://github.com/firehol/netdata/blob/master/conf.d/python.d/web_log.conf" target="_blank"><code>/etc/netdata/python.d/web_log.conf</code></a>).'
+ info: 'Information extracted from a server log file. <code>web_log</code> plugin incrementally parses the server log file to provide, in real-time, a break down of key server performance metrics. For web servers, an extended log file format may optionally be used (for <code>nginx</code> and <code>apache</code>) offering timing information and bandwidth for both requests and responses. <code>web_log</code> plugin may also be configured to provide a break down of requests per URL pattern (check <a href="https://github.com/firehol/netdata/blob/master/conf.d/python.d/web_log.conf" target="_blank"><code>/etc/netdata/python.d/web_log.conf</code></a>).'
},
'named': {
@@ -261,10 +285,22 @@ netdataDashboard.menu = {
info: undefined
},
+ 'fronius': {
+ title: 'Fronius',
+ icon: '<i class="fa fa-sun-o" aria-hidden="true"></i>',
+ info: undefined
+ },
+
'snmp': {
title: 'SNMP',
icon: '<i class="fa fa-random" aria-hidden="true"></i>',
info: undefined
+ },
+
+ 'go_expvar': {
+ title: 'Go - expvars',
+ icon: '<i class="fa fa-eye" aria-hidden="true"></i>',
+ info: 'Statistics about running Go applications exposed by the <a href="https://golang.org/pkg/expvar/" target="_blank">expvar package</a>.'
}
};
@@ -277,6 +313,44 @@ netdataDashboard.menu = {
// information about the submenus
netdataDashboard.submenu = {
+ 'web_log.squid_bandwidth': {
+ title: 'bandwidth',
+ info: 'Bandwidth of responses (<code>sent</code>) by squid. This chart may present unusual spikes, since the bandwidth is accounted at the time the log line is saved by the server, even if the time needed to serve it spans across a longer duration. We suggest to use QoS (e.g. <a href="http://firehol.org/#fireqos" target="_blank">FireQOS</a>) for accurate accounting of the server bandwidth.'
+ },
+
+ 'web_log.squid_responses': {
+ title: 'responses',
+ info: 'Information related to the responses sent by squid.'
+ },
+
+ 'web_log.squid_requests': {
+ title: 'requests',
+ info: 'Information related to the requests squid has received.'
+ },
+
+ 'web_log.squid_hierarchy': {
+ title: 'hierarchy',
+ info: 'Performance metrics for the squid hierarchy used to serve the requests.'
+ },
+
+ 'web_log.squid_squid_transport': {
+ title: 'transport'
+ },
+
+ 'web_log.squid_squid_cache': {
+ title: 'cache',
+ info: 'Performance metrics for the performance of the squid cache.'
+ },
+
+ 'web_log.squid_timings': {
+ title: 'timings',
+ info: 'Duration of squid requests. Unrealistic spikes may be reported, since squid logs the total time of the requests, when they complete. Especially for HTTPS, the clients get a tunnel from the proxy and exchange requests directly with the upstream servers, so squid cannot evaluate the individual requests and reports the total time the tunnel was open.'
+ },
+
+ 'web_log.squid_clients': {
+ title: 'clients'
+ },
+
'web_log.bandwidth': {
info: 'Bandwidth of requests (<code>received</code>) and responses (<code>sent</code>). <code>received</code> requires an extended log format (without it, the web server log does not have this information). This chart may present unusual spikes, since the bandwidth is accounted at the time the log line is saved by the web server, even if the time needed to serve it spans across a longer duration. We suggest to use QoS (e.g. <a href="http://firehol.org/#fireqos" target="_blank">FireQOS</a>) for accurate accounting of the web server bandwidth.'
},
@@ -321,6 +395,11 @@ netdataDashboard.submenu = {
info: 'DDoS protection performance metrics. <a href="https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY" target="_blank">SYNPROXY</a> is a TCP SYN packets proxy. It is used to protect any TCP server (like a web server) from SYN floods and similar DDoS attacks. It is a netfilter module, in the Linux kernel (since version 3.12). It is optimized to handle millions of packets per second utilizing all CPUs available without any concurrency locking between the connections. It can be used for any kind of TCP traffic (even encrypted), since it does not interfere with the content itself.'
},
+ 'ipfw.dynamic_rules': {
+ title: 'dynamic rules',
+ info: 'Number of dynamic rules, created by correspondent stateful firewall rules.'
+ },
+
'system.softnet_stat': {
title: 'softnet',
info: function(os) {
@@ -339,6 +418,11 @@ netdataDashboard.submenu = {
else
return 'Statistics for per CPUs core SoftIRQs related to network receive work. Total for all CPU cores can be found at <a href="#menu_system_submenu_softnet_stat">System / softnet statistics</a>.';
}
+ },
+
+ 'go_expvar.memstats': {
+ title: 'Memory statistics',
+ info: 'Go runtime memory statistics. See <a href="https://golang.org/pkg/runtime/#MemStats" target="_blank">runtime.MemStats</a> documentation for more info about each chart and the values.'
}
};
@@ -420,7 +504,6 @@ netdataDashboard.context = {
},
'system.idlejitter': {
- colors: '#5555AA',
info: 'Idle jitter is calculated by netdata. A thread is spawned that requests to sleep for a few microseconds. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the <b>idle jitter</b>. This number is useful in real-time environments, where CPU jitter can affect the quality of the service (like VoIP media gateways).'
},
@@ -441,6 +524,30 @@ netdataDashboard.context = {
},
// ------------------------------------------------------------------------
+ // CPU charts
+
+ 'cpu.cpu': {
+ commonMin: true,
+ commonMax: true,
+ valueRange: "[0, 100]"
+ },
+
+ 'cpu.interrupts': {
+ commonMin: true,
+ commonMax: true
+ },
+
+ 'cpu.softirqs': {
+ commonMin: true,
+ commonMax: true
+ },
+
+ 'cpu.softnet_stat': {
+ commonMin: true,
+ commonMax: true
+ },
+
+ // ------------------------------------------------------------------------
// MEMORY
'mem.ksm_savings': {
@@ -783,6 +890,66 @@ netdataDashboard.context = {
// ------------------------------------------------------------------------
+ // LIGHTTPD
+
+ 'lighttpd.connections': {
+ colors: NETDATA.colors[4],
+ mainheads: [
+ netdataDashboard.gaugeChart('Connections', '12%', '', NETDATA.colors[4])
+ ]
+ },
+
+ 'lighttpd.requests': {
+ colors: NETDATA.colors[0],
+ mainheads: [
+ netdataDashboard.gaugeChart('Requests', '12%', '', NETDATA.colors[0])
+ ]
+ },
+
+ 'lighttpd.net': {
+ colors: NETDATA.colors[3],
+ mainheads: [
+ netdataDashboard.gaugeChart('Bandwidth', '12%', '', NETDATA.colors[3])
+ ]
+ },
+
+ 'lighttpd.workers': {
+ mainheads: [
+ function(os, id) {
+ void(os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="busy"'
+ + ' data-append-options="percentage"'
+ + ' data-gauge-max-value="100"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Servers Utilization"'
+ + ' data-units="percentage %"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'lighttpd.bytesperreq': {
+ colors: NETDATA.colors[3],
+ height: 0.5
+ },
+
+ 'lighttpd.reqpersec': {
+ colors: NETDATA.colors[4],
+ height: 0.5
+ },
+
+ 'lighttpd.bytespersec': {
+ colors: NETDATA.colors[6],
+ height: 0.5
+ },
+
+ // ------------------------------------------------------------------------
// NGINX
'nginx.connections': {
@@ -853,6 +1020,9 @@ netdataDashboard.context = {
height: 0.5
},
+ // ------------------------------------------------------------------------
+ // web_log
+
'web_log.response_statuses': {
info: 'Web server responses by type. <code>success</code> includes <b>1xx</b>, <b>2xx</b> and <b>304</b>, <code>error</code> includes <b>5xx</b>, <code>redirect</code> includes <b>3xx</b> except <b>304</b>, <code>bad</code> includes <b>4xx</b>, <code>other</code> are all the other responses.',
mainheads: [
@@ -931,7 +1101,14 @@ netdataDashboard.context = {
},
'web_log.response_codes': {
- info: 'Web server responses by code family. According to the standards <code>1xx</code> are informational responses, <code>2xx</code> are successful responses, <code>3xx</code> are redirects (although they include <b>304</b> which is used as "<b>not modified</b>"), <code>4xx</code> are bad requests, <code>5xx</code> are internal server errors, <code>other</code> are non-standard responses, <code>unmatched</code> counts the lines in the log file that are not matched by the plugin (<a href="https://github.com/firehol/netdata/issues/new?title=web_log%20reports%20unmatched%20lines&body=web_log%20plugin%20reports%20unmatched%20lines.%0A%0AThis%20is%20my%20log:%0A%0A%60%60%60txt%0A%0Aplease%20paste%20your%20web%20server%20log%20here%0A%0A%60%60%60" target="_blank">let us know</a> if you have any unmatched).'
+ info: 'Web server responses by code family. ' +
+ 'According to the standards <code>1xx</code> are informational responses, ' +
+ '<code>2xx</code> are successful responses, ' +
+ '<code>3xx</code> are redirects (although they include <b>304</b> which is used as "<b>not modified</b>"), ' +
+ '<code>4xx</code> are bad requests, ' +
+ '<code>5xx</code> are internal server errors, ' +
+ '<code>other</code> are non-standard responses, ' +
+ '<code>unmatched</code> counts the lines in the log file that are not matched by the plugin (<a href="https://github.com/firehol/netdata/issues/new?title=web_log%20reports%20unmatched%20lines&body=web_log%20plugin%20reports%20unmatched%20lines.%0A%0AThis%20is%20my%20log:%0A%0A%60%60%60txt%0A%0Aplease%20paste%20your%20web%20server%20log%20here%0A%0A%60%60%60" target="_blank">let us know</a> if you have any unmatched).'
},
'web_log.response_time': {
@@ -969,6 +1146,212 @@ netdataDashboard.context = {
'web_log.clients_all': {
info: 'Unique client IPs accessing the web server since the last restart of netdata. This plugin keeps in memory all the unique IPs that have accessed the web server. On very busy web servers (several millions of unique IPs) you may want to disable this chart (check <a href="https://github.com/firehol/netdata/blob/master/conf.d/python.d/web_log.conf" target="_blank"><code>/etc/netdata/python.d/web_log.conf</code></a>).'
- }
+ },
+
+ // ------------------------------------------------------------------------
+ // web_log for squid
+
+ 'web_log.squid_response_statuses': {
+ info: 'Squid responses by type. ' +
+ '<code>success</code> includes <b>1xx</b>, <b>2xx</b>, <b>000</b>, <b>304</b>, ' +
+ '<code>error</code> includes <b>5xx</b> and <b>6xx</b>, ' +
+ '<code>redirect</code> includes <b>3xx</b> except <b>304</b>, ' +
+ '<code>bad</code> includes <b>4xx</b>, ' +
+ '<code>other</code> are all the other responses.',
+ mainheads: [
+ function(os, id) {
+ void(os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="success"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Successful"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[0] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ },
+
+ function(os, id) {
+ void(os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="redirect"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Redirects"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[2] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ },
+ function(os, id) {
+ void(os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="bad"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Bad Requests"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[3] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ },
+
+ function(os, id) {
+ void(os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="error"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Server Errors"'
+ + ' data-units="requests/s"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-common-max="' + id + '"'
+ + ' data-colors="' + NETDATA.colors[1] + '"'
+ + ' data-decimal-digits="0"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'web_log.squid_response_codes': {
+ info: 'Web server responses by code family. ' +
+ 'According to HTTP standards <code>1xx</code> are informational responses, ' +
+ '<code>2xx</code> are successful responses, ' +
+ '<code>3xx</code> are redirects (although they include <b>304</b> which is used as "<b>not modified</b>"), ' +
+ '<code>4xx</code> are bad requests, ' +
+ '<code>5xx</code> are internal server errors. ' +
+ 'Squid also defines <code>000</code> mostly for UDP requests, and ' +
+ '<code>6xx</code> for broken upstream servers sending wrong headers. ' +
+ 'Finally, <code>other</code> are non-standard responses, and ' +
+ '<code>unmatched</code> counts the lines in the log file that are not matched by the plugin (<a href="https://github.com/firehol/netdata/issues/new?title=web_log%20reports%20unmatched%20lines&body=web_log%20plugin%20reports%20unmatched%20lines.%0A%0AThis%20is%20my%20log:%0A%0A%60%60%60txt%0A%0Aplease%20paste%20your%20web%20server%20log%20here%0A%0A%60%60%60" target="_blank">let us know</a> if you have any unmatched).'
+ },
+
+ 'web_log.squid_duration': {
+ mainheads: [
+ function(os, id) {
+ void(os);
+ return '<div data-netdata="' + id + '"'
+ + ' data-dimensions="avg"'
+ + ' data-chart-library="gauge"'
+ + ' data-title="Average Response Time"'
+ + ' data-units="milliseconds"'
+ + ' data-gauge-adjust="width"'
+ + ' data-width="12%"'
+ + ' data-before="0"'
+ + ' data-after="-CHART_DURATION"'
+ + ' data-points="CHART_DURATION"'
+ + ' data-colors="' + NETDATA.colors[4] + '"'
+ + ' data-decimal-digits="2"'
+ + ' role="application"></div>';
+ }
+ ]
+ },
+
+ 'web_log.squid_detailed_response_codes': {
+ info: 'Number of responses for each response code individually.'
+ },
+
+ 'web_log.squid_clients': {
+ info: 'Unique client IPs accessing squid, within each data collection iteration. If data collection is <b>per second</b>, this chart shows <b>unique client IPs per second</b>.'
+ },
+
+ 'web_log.squid_clients_all': {
+ info: 'Unique client IPs accessing squid since the last restart of netdata. This plugin keeps in memory all the unique IPs that have accessed the server. On very busy squid servers (several millions of unique IPs) you may want to disable this chart (check <a href="https://github.com/firehol/netdata/blob/master/conf.d/python.d/web_log.conf" target="_blank"><code>/etc/netdata/python.d/web_log.conf</code></a>).'
+ },
+
+ 'web_log.squid_transport_methods': {
+ info: 'Break down per delivery method: <code>TCP</code> are requests on the HTTP port (usually 3128), ' +
+ '<code>UDP</code> are requests on the ICP port (usually 3130), or HTCP port (usually 4128). ' +
+ 'If ICP logging was disabled using the log_icp_queries option, no ICP replies will be logged. ' +
+ '<code>NONE</code> are used to state that squid delivered an unusual response or no response at all. ' +
+ 'Seen with cachemgr requests and errors, usually when the transaction fails before being classified into one of the above outcomes. ' +
+ 'Also seen with responses to <code>CONNECT</code> requests.'
+ },
+
+ 'web_log.squid_code': {
+ info: 'These are combined squid result status codes. A break down per component is given in the following charts. ' +
+ 'Check the <a href="http://wiki.squid-cache.org/SquidFaq/SquidLogs">squid documentation about them</a>.'
+ },
+
+ 'web_log.squid_handling_opts': {
+ info: 'These tags are optional and describe why the particular handling was performed or where the request came from. ' +
+ '<code>CLIENT</code> means that the client request placed limits affecting the response. Usually seen with client issued a <b>no-cache</b>, or analogous cache control command along with the request. Thus, the cache has to validate the object.' +
+ '<code>IMS</code> states that the client sent a revalidation (conditional) request. ' +
+ '<code>ASYNC</code>, is used when the request was generated internally by Squid. Usually this is background fetches for cache information exchanges, background revalidation from stale-while-revalidate cache controls, or ESI sub-objects being loaded. ' +
+ '<code>SWAPFAIL</code> is assigned when the object was believed to be in the cache, but could not be accessed. A new copy was requested from the server. ' +
+ '<code>REFRESH</code> when a revalidation (conditional) request was sent to the server. ' +
+ '<code>SHARED</code> when this request was combined with an existing transaction by collapsed forwarding. NOTE: the existing request is not marked as SHARED. ' +
+ '<code>REPLY</code> when particular handling was requested in the HTTP reply from server or peer. Usually seen on DENIED due to http_reply_access ACLs preventing delivery of servers response object to the client.'
+ },
+
+ 'web_log.squid_object_types': {
+ info: 'These tags are optional and describe what type of object was produced. ' +
+ '<code>NEGATIVE</code> is only seen on HIT responses, indicating the response was a cached error response. e.g. <b>404 not found</b>. ' +
+ '<code>STALE</code> means the object was cached and served stale. This is usually caused by stale-while-revalidate or stale-if-error cache controls. ' +
+ '<code>OFFLINE</code> when the requested object was retrieved from the cache during offline_mode. The offline mode never validates any object. ' +
+ '<code>INVALID</code> when an invalid request was received. An error response was delivered indicating what the problem was. ' +
+ '<code>FAIL</code> is only seen on <code>REFRESH</code> to indicate the revalidation request failed. The response object may be the server provided network error or the stale object which was being revalidated depending on stale-if-error cache control. ' +
+ '<code>MODIFIED</code> is only seen on <code>REFRESH</code> responses to indicate revalidation produced a new modified object. ' +
+ '<code>UNMODIFIED</code> is only seen on <code>REFRESH</code> responses to indicate revalidation produced a <b>304</b> (Not Modified) status, which was relayed to the client. ' +
+ '<code>REDIRECT</code> when squid generated an HTTP redirect response to this request.'
+ },
+
+ 'web_log.squid_cache_events': {
+ info: 'These tags are optional and describe whether the response was loaded from cache, network, or otherwise. ' +
+ '<code>HIT</code> when the response object delivered was the local cache object. ' +
+ '<code>MEM</code> when the response object came from memory cache, avoiding disk accesses. Only seen on HIT responses. ' +
+ '<code>MISS</code> when the response object delivered was the network response object. ' +
+ '<code>DENIED</code> when the request was denied by access controls. ' +
+ '<code>NOFETCH</code> an ICP specific type, indicating service is alive, but not to be used for this request (sent during "-Y" startup, or during frequent failures, a cache in hit only mode will return either UDP_HIT or UDP_MISS_NOFETCH. Neighbours will thus only fetch hits). ' +
+ '<code>TUNNEL</code> when a binary tunnel was established for this transaction.'
+ },
+
+ 'web_log.squid_transport_errors': {
+ info: 'These tags are optional and describe some error conditions which occured during response delivery (if any). ' +
+ '<code>ABORTED</code> when the response was not completed due to the connection being aborted (usually by the client). ' +
+ '<code>TIMEOUT</code>, when the response was not completed due to a connection timeout.'
+ },
+
+ // ------------------------------------------------------------------------
+ // Fronius Solar Power
+
+ 'fronius.power': {
+ info: 'Positive <code>Grid</code> values mean that power is coming from the grid. Negative values are excess power that is going back into the grid, possibly selling it. ' +
+ '<code>Photovoltaics</code> is the power generated from the solar panels. ' +
+ '<code>Accumulator</code> is the stored power in the accumulator, if one is present.'
+ },
+
+ 'fronius.autonomy': {
+ commonMin: true,
+ commonMax: true,
+ valueRange: "[0, 100]",
+ info: 'The <code>Autonomy</code> is the percentage of how autonomous the installation is. An autonomy of 100 % means that the installation is producing more energy than it is needed. ' +
+ 'The <code>Self consumption</code> indicates the ratio between the current power generated and the current load. When it reaches 100 %, the <code>Autonomy</code> declines, since the solar panels can not produce enough energy and need support from the grid.'
+ },
+
+ 'fronius.energy.today': {
+ commonMin: true,
+ commonMax: true,
+ valueRange: "[0, null]"
+ }
};
diff --git a/web/index.html b/web/index.html
index 250dbfed..be944e34 100644
--- a/web/index.html
+++ b/web/index.html
@@ -592,6 +592,43 @@
}
// --------------------------------------------------------------------
+ // natural sorting
+ // http://www.davekoelle.com/files/alphanum.js - LGPL
+
+ function naturalSortChunkify(t) {
+ var tz = [];
+ var x = 0, y = -1, n = 0, i, j;
+
+ while (i = (j = t.charAt(x++)).charCodeAt(0)) {
+ var m = (i === 46 || (i >= 48 && i <= 57));
+ if (m !== n) {
+ tz[++y] = "";
+ n = m;
+ }
+ tz[y] += j;
+ }
+
+ return tz;
+ }
+
+ function naturalSortCompare(a, b) {
+ var aa = naturalSortChunkify(a.toLowerCase());
+ var bb = naturalSortChunkify(b.toLowerCase());
+
+ for (var x = 0; aa[x] && bb[x]; x++) {
+ if (aa[x] !== bb[x]) {
+ var c = Number(aa[x]), d = Number(bb[x]);
+ if (c.toString() === aa[x] && d.toString() === bb[x])
+ return c - d;
+ else
+ return (aa[x] > bb[x]) ? 1 : -1;
+ }
+ }
+
+ return aa.length - bb.length;
+ }
+
+ // --------------------------------------------------------------------
// registry call back to render my-netdata menu
var netdataRegistryCallback = function(machines_array) {
@@ -616,16 +653,14 @@
var master = options.hosts[0].hostname;
var sorted = options.hosts.sort(function(a, b) {
if(a.hostname === master) return -1;
- if(a.hostname === b.hostname) return 0;
- else if(a.hostname > b.hostname) return 1;
- return -1;
+ return naturalSortCompare(a.hostname, b.hostname);
});
i = 0;
len = sorted.length;
while(len--) {
hostname = sorted[i].hostname;
- if(hostname == master) {
+ if(hostname === master) {
url = base + "/";
icon = "home";
}
@@ -656,14 +691,13 @@
saveLocalStorage("registryCallback", JSON.stringify(machines_array));
var machines = machines_array.sort(function (a, b) {
- if (a.name > b.name) return -1;
- if (a.name < b.name) return 1;
- return 0;
+ return naturalSortCompare(a.name, b.name);
});
+ i = 0;
len = machines.length;
while(len--) {
- var u = machines[len];
+ var u = machines[i++];
found++;
el += '<li id="registry_server_' + u.guid + '"><a class="registry_link" href="' + u.url + '" onClick="return gotoServerModalHandler(\'' + u.guid + '\');">' + u.name + '</a></li>';
a1 += '<li id="registry_action_' + u.guid + '"><a href="#" onclick="deleteRegistryModalHandler(\'' + u.guid + '\',\'' + u.name + '\',\'' + u.url + '\'); return false;"><i class="fa fa-trash-o" aria-hidden="true" style="color: #999;"></i></a></li>';
@@ -936,6 +970,8 @@
};
function chartsPerRow(total) {
+ void(total);
+
if(options.chartsPerRow === 0) {
return 1;
//var width = Math.floor(total / options.chartsMinWidth);
@@ -948,8 +984,7 @@
function prioritySort(a, b) {
if(a.priority < b.priority) return -1;
if(a.priority > b.priority) return 1;
- if(a.name < b.name) return -1;
- return 1;
+ return naturalSortCompare(a.name, b.name);
}
function sortObjectByPriority(object) {
@@ -968,8 +1003,7 @@
sorted.sort(function(a, b) {
if(idx[a].priority < idx[b].priority) return -1;
if(idx[a].priority > idx[b].priority) return 1;
- if(a < b) return -1;
- return 1;
+ return naturalSortCompare(a, b);
});
return sorted;
@@ -980,10 +1014,10 @@
// scroll to a section, without changing the browser history
function scrollToId(hash) {
- if(hash && hash != '' && document.getElementById(hash) !== null) {
+ if(hash && hash !== '' && document.getElementById(hash) !== null) {
var offset = $('#' + hash).offset();
if(typeof offset !== 'undefined')
- $('html, body').animate({ scrollTop: offset.top }, 0);
+ $('html, body').animate({ scrollTop: offset.top - 30 }, 0);
}
// we must return false to prevent the default action
@@ -1035,7 +1069,7 @@
key = key + '.' + this.sparklines_registry[key].count;
- return prefix + '<div data-netdata="' + chart + '" data-after="-120" data-width="25%" data-height="15px" data-chart-library="dygraph" data-dygraph-theme="sparkline" data-dimensions="' + dimension + '" data-show-value-of-' + dimension + '-at="' + key + '"></div> (<span id="' + key + '" style="display: inline-block; min-width: 50px; text-align: right;">X</span>' + units + ')' + suffix;
+ return prefix + '<div class="netdata-container" data-netdata="' + chart + '" data-after="-120" data-width="25%" data-height="15px" data-chart-library="dygraph" data-dygraph-theme="sparkline" data-dimensions="' + dimension + '" data-show-value-of-' + dimension + '-at="' + key + '"></div> (<span id="' + key + '" style="display: inline-block; min-width: 50px; text-align: right;">X</span>' + units + ')' + suffix;
},
gaugeChart: function(title, width, dimensions, colors) {
@@ -1045,7 +1079,7 @@
if(typeof dimensions === 'undefined')
dimensions = '';
- return '<div data-netdata="CHART_UNIQUE_ID"'
+ return '<div class="netdata-container" data-netdata="CHART_UNIQUE_ID"'
+ ' data-dimensions="' + dimensions + '"'
+ ' data-chart-library="gauge"'
+ ' data-gauge-adjust="width"'
@@ -1164,6 +1198,7 @@
case 'ap':
case 'net':
case 'disk':
+ case 'statsd':
chart.menu = tmp;
break;
@@ -1191,6 +1226,14 @@
chart.menu_pattern = 'cgroup';
break;
+ case 'go':
+ chart.menu = chart.type;
+ if(parts.length > 2 && parts[1] === 'expvar')
+ chart.menu_pattern = tmp + '_' + parts[1];
+ else if(parts.length > 1)
+ chart.menu_pattern = tmp;
+ break;
+
case 'isc':
chart.menu = chart.type;
if(parts.length > 2 && parts[1] === 'dhcpd')
@@ -1260,14 +1303,14 @@
var head = '';
if(typeof charts['system.swap'] !== 'undefined')
- head += '<div style="margin-right: 10px;" data-netdata="system.swap"'
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.swap"'
+ ' data-dimensions="used"'
+ ' data-append-options="percentage"'
+ ' data-chart-library="easypiechart"'
+ ' data-title="Used Swap"'
+ ' data-units="%"'
+ ' data-easypiechart-max-value="100"'
- + ' data-width="8%"'
+ + ' data-width="9%"'
+ ' data-before="0"'
+ ' data-after="-' + duration.toString() + '"'
+ ' data-points="' + duration.toString() + '"'
@@ -1275,21 +1318,21 @@
+ ' role="application"></div>';
if(typeof charts['system.io'] !== 'undefined') {
- head += '<div style="margin-right: 10px;" data-netdata="system.io"'
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.io"'
+ ' data-dimensions="in"'
+ ' data-chart-library="easypiechart"'
+ ' data-title="Disk Read"'
- + ' data-width="10%"'
+ + ' data-width="11%"'
+ ' data-before="0"'
+ ' data-after="-' + duration.toString() + '"'
+ ' data-points="' + duration.toString() + '"'
+ ' role="application"></div>';
- head += '<div style="margin-right: 10px;" data-netdata="system.io"'
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.io"'
+ ' data-dimensions="out"'
+ ' data-chart-library="easypiechart"'
+ ' data-title="Disk Write"'
- + ' data-width="10%"'
+ + ' data-width="11%"'
+ ' data-before="0"'
+ ' data-after="-' + duration.toString() + '"'
+ ' data-points="' + duration.toString() + '"'
@@ -1297,56 +1340,56 @@
}
if(typeof charts['system.cpu'] !== 'undefined')
- head += '<div data-netdata="system.cpu"'
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.cpu"'
+ ' data-chart-library="gauge"'
+ ' data-title="CPU"'
+ ' data-units="%"'
+ ' data-gauge-max-value="100"'
- + ' data-width="18%"'
+ + ' data-width="20%"'
+ ' data-after="-' + duration.toString() + '"'
+ ' data-points="' + duration.toString() + '"'
+ ' data-colors="' + NETDATA.colors[12] + '"'
+ ' role="application"></div>';
if(typeof charts['system.ipv4'] !== 'undefined') {
- head += '<div style="margin-right: 10px;" data-netdata="system.ipv4"'
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ipv4"'
+ ' data-dimensions="received"'
+ ' data-chart-library="easypiechart"'
+ ' data-title="IPv4 Inbound"'
- + ' data-width="10%"'
+ + ' data-width="11%"'
+ ' data-before="0"'
+ ' data-after="-' + duration.toString() + '"'
+ ' data-points="' + duration.toString() + '"'
+ ' role="application"></div>';
- head += '<div style="margin-right: 10px;" data-netdata="system.ipv4"'
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ipv4"'
+ ' data-dimensions="sent"'
+ ' data-chart-library="easypiechart"'
+ ' data-title="IPv4 Outbound"'
- + ' data-width="10%"'
+ + ' data-width="11%"'
+ ' data-before="0"'
+ ' data-after="-' + duration.toString() + '"'
+ ' data-points="' + duration.toString() + '"'
+ ' role="application"></div>';
}
else if(typeof charts['system.ipv6'] !== 'undefined') {
- head += '<div style="margin-right: 10px;" data-netdata="system.ipv6"'
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ipv6"'
+ ' data-dimensions="received"'
+ ' data-chart-library="easypiechart"'
+ ' data-title="IPv6 Inbound"'
+ ' data-units="kbps"'
- + ' data-width="10%"'
+ + ' data-width="11%"'
+ ' data-before="0"'
+ ' data-after="-' + duration.toString() + '"'
+ ' data-points="' + duration.toString() + '"'
+ ' role="application"></div>';
- head += '<div style="margin-right: 10px;" data-netdata="system.ipv6"'
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ipv6"'
+ ' data-dimensions="sent"'
+ ' data-chart-library="easypiechart"'
+ ' data-title="IPv6 Outbound"'
+ ' data-units="kbps"'
- + ' data-width="10%"'
+ + ' data-width="11%"'
+ ' data-before="0"'
+ ' data-after="-' + duration.toString() + '"'
+ ' data-points="' + duration.toString() + '"'
@@ -1354,14 +1397,14 @@
}
if(typeof charts['system.ram'] !== 'undefined')
- head += '<div style="margin-right: 10px;" data-netdata="system.ram"'
+ head += '<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ram"'
+ ' data-dimensions="used|buffers|active|wired"' // active and wired are FreeBSD stats
+ ' data-append-options="percentage"'
+ ' data-chart-library="easypiechart"'
+ ' data-title="Used RAM"'
+ ' data-units="%"'
+ ' data-easypiechart-max-value="100"'
- + ' data-width="8%"'
+ + ' data-width="9%"'
+ ' data-after="-' + duration.toString() + '"'
+ ' data-points="' + duration.toString() + '"'
+ ' data-colors="' + NETDATA.colors[7] + '"'
@@ -1446,8 +1489,24 @@
mhead += generateHeadCharts('mainheads', chart, duration);
head += generateHeadCharts('heads', chart, duration);
+ function chartCommonMin(family, context, units) {
+ var x = netdataDashboard.anyAttribute(netdataDashboard.context, 'commonMin', context, undefined);
+ if(typeof x !== 'undefined')
+ return ' data-common-min="' + family + '/' + context + '/' + units + '"';
+ else
+ return '';
+ }
+
+ function chartCommonMax(family, context, units) {
+ var x = netdataDashboard.anyAttribute(netdataDashboard.context, 'commonMax', context, undefined);
+ if(typeof x !== 'undefined')
+ return ' data-common-max="' + family + '/' + context + '/' + units + '"';
+ else
+ return '';
+ }
+
// generate the chart
- chtml += netdataDashboard.contextInfo(chart.context) + '<div id="chart_' + NETDATA.name2id(chart.id) + '" data-netdata="' + chart.id + '"'
+ chtml += netdataDashboard.contextInfo(chart.context) + '<div class="netdata-container" id="chart_' + NETDATA.name2id(chart.id) + '" data-netdata="' + chart.id + '"'
+ ' data-width="' + pcent_width.toString() + '%"'
+ ' data-height="' + netdataDashboard.contextHeight(chart.context, options.chartsHeight).toString() + 'px"'
+ ' data-dygraph-valuerange="' + netdataDashboard.contextValueRange(chart.context) + '"'
@@ -1455,6 +1514,8 @@
+ ' data-after="-' + duration.toString() + '"'
+ ' data-id="' + NETDATA.name2id(options.hostname + '/' + chart.id) + '"'
+ ' data-colors="' + netdataDashboard.anyAttribute(netdataDashboard.context, 'colors', chart.context, '') + '"'
+ + chartCommonMin(chart.family, chart.context, chart.units)
+ + chartCommonMax(chart.family, chart.context, chart.units)
+ ' role="application"></div>';
// console.log(' \------- ' + chart.id + ' (' + chart.priority + '): ' + chart.context + ' height: ' + menus[menu].submenus[submenu].height);
@@ -1471,7 +1532,7 @@
sidebar += '<li class="" style="padding-top:15px;"><a href="https://github.com/firehol/netdata/wiki/Add-more-charts-to-netdata" target="_blank"><i class="fa fa-plus" aria-hidden="true"></i> add more charts</a></li>';
sidebar += '<li class=""><a href="https://github.com/firehol/netdata/wiki/Add-more-alarms-to-netdata" target="_blank"><i class="fa fa-plus" aria-hidden="true"></i> add more alarms</a></li>';
- sidebar += '<li class="" style="margin:20px;color:#666;"><small>netdata on <b>' + data.hostname.toString() + '</b>, collects every ' + ((data.update_every == 1)?'second':data.update_every.toString() + ' seconds') + ' <b>' + data.dimensions_count.toLocaleString() + '</b> metrics, presented as <b>' + data.charts_count.toLocaleString() + '</b> charts and monitored by <b>' + data.alarms_count.toLocaleString() + '</b> alarms, using ' + Math.round(data.rrd_memory_bytes / 1024 / 1024).toLocaleString() + ' MB of memory for ' + seconds4human(data.update_every * data.history) + ' of real-time history.<br/>&nbsp;<br/><b>netdata</b><br/>v' + data.version.toString() +'</small></li>';
+ sidebar += '<li class="" style="margin:20px;color:#666;"><small>netdata on <b>' + data.hostname.toString() + '</b>, collects every ' + ((data.update_every === 1)?'second':data.update_every.toString() + ' seconds') + ' <b>' + data.dimensions_count.toLocaleString() + '</b> metrics, presented as <b>' + data.charts_count.toLocaleString() + '</b> charts and monitored by <b>' + data.alarms_count.toLocaleString() + '</b> alarms, using ' + Math.round(data.rrd_memory_bytes / 1024 / 1024).toLocaleString() + ' MB of memory for ' + seconds4human(data.update_every * data.history) + ' of real-time history.<br/>&nbsp;<br/><b>netdata</b><br/>v' + data.version.toString() +'</small></li>';
sidebar += '</ul>';
div.innerHTML = html;
document.getElementById('sidebar').innerHTML = sidebar;
@@ -1619,7 +1680,7 @@
var t = new Date(timestamp * 1000);
var now = new Date();
- if(t.toDateString() == now.toDateString())
+ if(t.toDateString() === now.toDateString())
return t.toLocaleTimeString();
return t.toLocaleDateString() + space + t.toLocaleTimeString();
@@ -1676,8 +1737,8 @@
}
var delay = '';
- if((alarm.delay_up_duration > 0 || alarm.delay_down_duration > 0) && alarm.delay_multiplier != 0 && alarm.delay_max_duration > 0) {
- if(alarm.delay_up_duration == alarm.delay_down_duration) {
+ if((alarm.delay_up_duration > 0 || alarm.delay_down_duration > 0) && alarm.delay_multiplier !== 0 && alarm.delay_max_duration > 0) {
+ if(alarm.delay_up_duration === alarm.delay_down_duration) {
delay += '<small><br/>hysteresis ' + seconds4human(alarm.delay_up_duration, { negative_suffix: '' });
}
else {
@@ -1689,7 +1750,7 @@
delay += 'on&nbsp;recovery&nbsp;<code>' + seconds4human(alarm.delay_down_duration, { negative_suffix: '' }) + '</code>, ';
}
}
- if(alarm.delay_multiplier != 1.0) {
+ if(alarm.delay_multiplier !== 1.0) {
delay += 'multiplied&nbsp;by&nbsp;<code>' + alarm.delay_multiplier.toString() + '</code>';
delay += ',&nbsp;up&nbsp;to&nbsp;<code>' + seconds4human(alarm.delay_max_duration, { negative_suffix: '' }) + '</code>';
}
@@ -1762,15 +1823,16 @@
// sort the families, like the dashboard menu does
var families_sorted = families_sort.sort(function (a, b) {
- if (a.priority > b.priority) return -1;
- if (a.priority < b.priority) return 1;
- return 0;
+ if (a.priority < b.priority) return -1;
+ if (a.priority > b.priority) return 1;
+ return naturalSortCompare(a.name, b.name);
});
+ var i = 0;
var fc = 0;
var len = families_sorted.length;
while(len--) {
- family = families_sorted[len].name;
+ family = families_sorted[i++].name;
var active_family_added = false;
var expanded = 'true';
var collapsed = '';
@@ -2253,7 +2315,7 @@
if(hours > 1) txt += hours.toString() + options.space + options.hours;
else if(hours === 1) txt += hours.toString() + options.space + options.hour;
- if(hours > 0 && minutes > 0 && seconds == 0)
+ if(hours > 0 && minutes > 0 && seconds === 0)
txt += options.space + options.and + options.space;
else if(hours > 0 && minutes > 0 && seconds > 0)
txt += ',' + options.space;
@@ -2293,7 +2355,7 @@
options.version = data.version;
netdataDashboard.os = data.os;
- if(typeof data.hosts != 'undefined')
+ if(typeof data.hosts !=='undefined')
options.hosts = data.hosts;
// update the dashboard hostname
@@ -2328,7 +2390,7 @@
netdata_url = NETDATA.serverDefault;
// initialize clickable alarms
- NETDATA.alarms.chart_div_offset = 100;
+ NETDATA.alarms.chart_div_offset = -50;
NETDATA.alarms.chart_div_id_prefix = 'chart_';
NETDATA.alarms.chart_div_animation_duration = 0;
@@ -2337,7 +2399,7 @@
// download all the charts the server knows
NETDATA.chartRegistry.downloadAll(netdata_url, function(data) {
- if(data != null) {
+ if(data !== null) {
if(typeof data.custom_info !== 'undefined' && data.custom_info !== "") {
loadJs(data.custom_info, function () {
$.extend(true, netdataDashboard, customDashboard);
@@ -2362,7 +2424,7 @@
var s = options.version.split('-');
if(s.length !== 3) return null;
- if(s[2][0] == 'g') {
+ if(s[2][0] === 'g') {
var v = s[2].split('_')[0].substring(1, 8);
if(v.length === 7) {
versionLog('Installed git commit id of netdata is ' + v);
@@ -2566,17 +2628,17 @@
var tagName = null;
for (var i = 0, r = 0; r <= config.showChars; i++) {
- if (content[i] == '<' && !inTag) {
+ if (content[i] === '<' && !inTag) {
inTag = true;
// This could be "tag" or "/tag"
tagName = content.substring(i + 1, content.indexOf('>', i));
// If its a closing tag
- if (tagName[0] == '/') {
+ if (tagName[0] === '/') {
- if (tagName != '/' + openTags[0]) {
+ if (tagName !== ('/' + openTags[0])) {
config.errMsg = 'ERROR en HTML: the top of the stack should be the tag that closes';
} else {
openTags.shift(); // Pops the last tag from the open tag stack (the tag is closed in the retult HTML!)
@@ -2584,12 +2646,12 @@
} else {
// There are some nasty tags that don't have a close tag like <br/>
- if (tagName.toLowerCase() != 'br') {
+ if (tagName.toLowerCase() !== 'br') {
openTags.unshift(tagName); // Add to start the name of the tag that opens
}
}
}
- if (inTag && content[i] == '>') {
+ if (inTag && content[i] === '>') {
inTag = false;
}
@@ -2668,17 +2730,21 @@
});
/* activate bootstrap scrollspy (needed for sidebar) */
+ var scrollspyOffset = $(window).height() / 5;
+ if(scrollspyOffset > 200) scrollspyOffset = 200;
+ if(scrollspyOffset < 50) scrollspyOffset = 50;
$(document.body).scrollspy({
target: '#sidebar',
- offset: $(window).height() / 5 // controls the diff of the <hX> element to the top, to select it
+ offset: scrollspyOffset // controls the diff of the <hX> element to the top, to select it
});
// change the URL based on the current position of the screen
$sidebar.on('activate.bs.scrollspy', function (e) {
- // console.log(e);
+ //console.log(e);
var el = $(e.target);
- //if(el.find('ul').size() == 0) {
+ //if(el.find('ul').size() === 0) {
var hash = el.find('a').attr('href');
+ // console.log(hash);
if(typeof hash === 'string' && hash.substring(0, 1) === '#' && urlOptions.hash.startsWith(hash + '_submenu_') === false) {
urlOptions.hash = hash;
//console.log(urlOptions.hash);
@@ -2895,7 +2961,7 @@
});
NETDATA.requiredJs.push({
- url: NETDATA.serverDefault + 'dashboard_info.js?v20170308-1',
+ url: NETDATA.serverDefault + 'dashboard_info.js?v20170530-1',
async: false,
isAlreadyLoaded: function() { return false; }
});
@@ -2993,7 +3059,7 @@
<b>Hover</b> on them too!
</div>
<div class="col-md-6">
- <div data-netdata="system.intr" data-chart-library="dygraph" data-dygraph-theme="sparkline" data-dygraph-type="line" data-dygraph-strokewidth="3" data-dygraph-smooth="true" data-dygraph-highlightcirclesize="6" data-after="-90" data-height="60px" data-colors="#C66"></div>
+ <div class="netdata-container" data-netdata="system.intr" data-chart-library="dygraph" data-dygraph-theme="sparkline" data-dygraph-type="line" data-dygraph-strokewidth="3" data-dygraph-smooth="true" data-dygraph-highlightcirclesize="6" data-after="-90" data-height="60px" data-colors="#C66"></div>
</div>
</div>
</div>
@@ -3523,4 +3589,4 @@
</div>
</body>
</html>
-<script type="text/javascript" src="dashboard.js?v20170211-2"></script>
+<script type="text/javascript" src="dashboard.js?v20170715-1"></script>
diff --git a/web/infographic.html b/web/infographic.html
new file mode 100644
index 00000000..0bb57187
--- /dev/null
+++ b/web/infographic.html
@@ -0,0 +1,170 @@
+<!doctype html>
+<html lang=en-us>
+<head>
+ <meta charset=utf-8>
+ <title>NetData: Get control of your Linux Servers. Simple. Effective. Awesome.</title>
+ <meta name=author content="Costa Tsaousis">
+ <meta name=description content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms.">
+
+ <meta name=viewport content="width=device-width,initial-scale=1">
+ <link rel=apple-touch-icon href=apple-touch-icon.png>
+ <link rel="icon" type="image/png" sizes="32x32" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAACNklEQVRYhcXXv2tUQRAH8M+FEIJISBHCIWIhIQSUILERi4AiiqCggiIiomAjlhaC4j+ghYWISgqNohZaCBZBC8Ei8QdEUCutFBsxCBqDYkgci/cunkfuJffjJQPD8mZm5/vd2WV2HzlJ0Bs8CvrywsgCHwy+BpGOg0sJfjj4nYKX9FdwKG9gwZlgtgK8pLOpPxfw1mCoCnClDgWtzQTvCEYWCV7SkWAlFBoEb8dlDKBF8t2bMWUSH/AHr3CiEfz5CPUusPJLkRCdk5ZqyeqUrQv4R7E5TwK7M3zTeIKduRAIitiWEfIY69GdCwGcRFuG/xqONRkzkaA7+J5x+MaDtWmHvJ4HgeEM8Nn0bridfv9HoOFyBAdwJCPkqqTzHWwUaz7wgeBHxupfBKuCj2W25mxBsCGYyAB/FxTT27HcPlyep64tCLbjKbqqhLzBlgKfF8pVE4FgRXABI+ioEnYfOyzcFWsCbg+OV+xlpU4ER4O+4HVwL51b3xYEXcGu4Ao+YQhr5gmdxHmsQyfG0b/YxbWmLfRWmnxa0s06VbTMCpnBS9zFzQKTwR5cXCzwHIE02Sl8wSZsRI/kgLVJqjSd+t9LVjiG1diPszhdK3A5gR48k5zYMTwscC59sfT799CYKvA8EttbSeXgTr3gJQKl91kR+yTlvyG5uUbLYh9gb+ovltkb6qYtNSRo3kOygsBSzGlKsubf43USWLYK5CLLXoFWyU/CtzLbVDpW2n+m40yN9ukqdvAX9ac/EIgOapcAAAAASUVORK5CYII=">
+
+ <meta property="og:url" content="https://my-netdata.io/infographic.html" />
+ <meta property="og:type" content="website" />
+ <meta property="og:title" content="netdata infographic" />
+ <meta property="og:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
+ <meta property="og:image" content="https://cloud.githubusercontent.com/assets/2662304/25580009/bf7016a4-2e85-11e7-9a7a-b36c57db7b91.png" />
+ <meta property="og:image:type" content="image/png" />
+ <meta property="fb:app_id" content="1200089276712916" />
+
+ <meta name="twitter:card" content="summary" />
+ <meta name="twitter:site" content="@linuxnetdata" />
+ <meta name="twitter:title" content="netdata infographic" />
+ <meta name="twitter:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
+ <meta name="twitter:image" content="https://cloud.githubusercontent.com/assets/2662304/25580009/bf7016a4-2e85-11e7-9a7a-b36c57db7b91.png" />
+
+ <meta name="google-site-verification" content="3Xmk2kyCvai8p9HEnYHoQ9RBW20-b1NvPAgu07Fkkds" />
+ <meta name="msvalidate.01" content="896DCA31C9A664CE359FCF1A645DD476" />
+
+ <style>/*! normalize.css v4.1.1 | MIT License | github.com/necolas/normalize.css */
+ html {
+ line-height: 1.15;
+ -ms-text-size-adjust: 100%;
+ -webkit-text-size-adjust: 100%;
+ font: 17px/1.4 'Open Sans', sans-serif;
+ text-align: center
+ }
+ </style>
+</head>
+<body>
+<div style="width: 100%;text-align:center;">
+ <div>
+ <p style="font-size: 16pt">
+ <b>Interactive infographic of netdata features and functions</b>
+ </p>
+ <p>
+ Hover and click on the infographic, to open the related wiki page.
+ <br/>
+ <small>
+ The links and the docs are still a work in progress.
+ The interactive infographic is a feature of <b>draw.io</b>.
+ </small>
+ </p>
+ </div>
+ <div class=site-footer role=contentinfo>
+ <p>
+ <div style="display: inline-block;">
+ <div style="vertical-align:top;display:inline-block; height: 34px;margin-top:3px;"><a class=twitter-share-button href=https://twitter.com/share data-count=none data-lang=en data-via=linuxnetdata data-size=small data-text="Get control of your Linux servers. Simple. Effective. Awesome." data-url=https://my-netdata.io/infographic.html >Tweet</a></div>
+ <div style="vertical-align:top;display:inline-block; height: 34px;margin-top:3px;"><a class=twitter-follow-button href=https://twitter.com/linuxnetdata data-show-count=false data-lang=en data-size=small>Follow @linuxnetdata</a></div>
+ </div>
+ <div style="display: inline-block;">
+ <div class="fb-like" data-href="https://my-netdata.io/" data-layout="button" data-action="like" data-show-faces="false" data-share="false" style="vertical-align:top;display:inline-block; height: 34px;"></div>
+ <div class="fb-share-button" data-href="https://my-netdata.io/" data-layout="button" data-size="small" data-mobile-iframe="true"><a class="fb-xfbml-parse-ignore" target="_blank" href="https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Fmy-netdata.io%2Finfographic.html&amp;src=sdkpreparse" style="vertical-align:top;display:inline-block; height: 34px;">Share</a></div>
+ </div>
+ </div>
+ <div>
+ <p style="font-size: 14pt">
+ <b>New to netdata?</b> <a href="//my-netdata.io" target="_blank">Have a look at a netdata demo</a>. You will love it!
+ </p>
+ <p>
+ <embed style="padding-top: 10px; padding-botton: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=null&value_color=blue&precision=0&v42" type="image/svg+xml" height="20" />
+ <embed style="padding-top: 10px; padding-botton: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=null&value_color=orange&precision=0&v42" type="image/svg+xml" height="20" />
+ <embed style="padding-top: 10px; padding-botton: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=null&value_color=yellowgreen&precision=0&v42" type="image/svg+xml" height="20" />
+ </p>
+ <hr/>
+ </div>
+ <div style="width:90%;display:inline-block;max-width:1300px;text-align:left;">
+ <div id="drawing" class="mxgraph" style="max-width:100%;border:1px solid transparent;"></div>
+ </div>
+</div>
+</body>
+
+<script>
+ var opts = {
+ "highlight":"#0000ff",
+ "target":"blank",
+ "lightbox":false,
+ "nav":false,
+ "resize":true,
+ "toolbar":"",
+ "auto-fit":true,
+ "check-visible-state":false,
+ "edit":"https://raw.githubusercontent.com/ktsaou/netdata/master/diagrams/netdata-overview.xml",
+ "url":"https://raw.githubusercontent.com/ktsaou/netdata/master/diagrams/netdata-overview.xml"
+ };
+ document.getElementById("drawing").dataset.mxgraph = JSON.stringify(opts);
+</script>
+
+<script>
+ if(window.location.hostname != 'my-netdata.io' || window.location.protocol != 'https:') {
+ var canonical = document.createElement('link');
+ canonical.rel = 'canonical';
+ canonical.href = 'https://my-netdata.io/infographic.html';
+ document.head.appendChild(canonical);
+ }
+</script>
+
+<script>!function (t, e) {
+ "use strict";
+ function a(t, n) {
+ return t.hasAttribute(n) === !0 ? t : t.parentNode !== r.body ? a(t.parentNode, n) : e
+ }
+
+ function n(n) {
+ var o, i, r, c, g, u = a(n.target, "data-ga-action"), l = !1;
+ u !== e && (o = u.getAttribute("data-ga-action") || e, i = u.getAttribute("data-ga-category") || e, r = u.getAttribute("data-ga-label") || e, c = u.getAttribute("href"), g = parseInt(u.getAttribute("data-ga-value"), 10) || e, ga !== e && i !== e && o !== e && (n.preventDefault(), "Download" !== i && n.ctrlKey !== !0 && n.metaKey !== !0 && 2 !== n.which || (l = !0, t.open(c)), function (a) {
+ var n;
+ ga("send", "event", i, o, r, g, {
+ hitCallback: function () {
+ l === !1 && (n !== e && clearTimeout(n), t.location = a)
+ }
+ }), n = setTimeout(function () {
+ l === !1 && (t.location.href = a)
+ }, 1e3)
+ }(c)))
+ }
+
+ function o() {
+ !function (t, e, a, n, o, i) {
+ t.GoogleAnalyticsObject = n, t[n] || (t[n] = function () {
+ (t[n].q = t[n].q || []).push(arguments)
+ }), t[n].l = +new Date, o = e.createElement(a), i = e.getElementsByTagName(a)[0], o.src = "//www.google-analytics.com/analytics.js", i.parentNode.insertBefore(o, i)
+ }(t, r, "script", "ga"), ga("create", "UA-64295674-3", "auto"), ga("send", "pageview"), t.document.addEventListener("click", n)
+ }
+
+ function i() {
+ !function (t, e, a) {
+ var n, o = t.getElementsByTagName(e)[0];
+ t.getElementById(a) || (n = t.createElement(e), n.id = a, n.src = "//platform.twitter.com/widgets.js", o.parentNode.insertBefore(n, o))
+ }(r, "script", "twitter-wjs")
+ }
+
+ var r = t.document;
+ o(), t.onload = i
+}(window)</script>
+
+<!-- facebook sdk -->
+<div id="fb-root"></div>
+<script>
+ window.fbAsyncInit = function() {
+ FB.init({
+ appId : '1200089276712916',
+ xfbml : true,
+ version : 'v2.8'
+ });
+ };
+
+ (function(d, s, id){
+ var js, fjs = d.getElementsByTagName(s)[0];
+ if (d.getElementById(id)) {return;}
+ js = d.createElement(s); js.id = id;
+ js.src = "//connect.facebook.net/en_US/sdk.js";
+ fjs.parentNode.insertBefore(js, fjs);
+ }(document, 'script', 'facebook-jssdk'));
+</script>
+
+<script type="text/javascript" src="https://www.draw.io/embed2.js?s=arrows2;mscae/cloud;azure;office/users;office/servers&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Ffirehol%2Fnetdata%2Fmaster%2Fdiagrams%2Fnetdata-overview.xml"></script>
+
+</html>
+
diff --git a/web/version.txt b/web/version.txt
index a7ffee85..4c72a5b0 100644
--- a/web/version.txt
+++ b/web/version.txt
@@ -1 +1 @@
-f5fa346a188e906a8f2cce3c2cf32a88ce81c666
+4016e2d9e3c2fcf5f6d59827bf5f81083d6645ba