summaryrefslogtreecommitdiffstats
path: root/collectors/charts.d.plugin
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--collectors/charts.d.plugin/.keep0
-rw-r--r--collectors/charts.d.plugin/Makefile.am61
-rw-r--r--collectors/charts.d.plugin/README.md195
-rw-r--r--collectors/charts.d.plugin/ap/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/ap/README.md86
-rw-r--r--collectors/charts.d.plugin/ap/ap.chart.sh179
-rw-r--r--collectors/charts.d.plugin/ap/ap.conf23
-rw-r--r--collectors/charts.d.plugin/apache/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/apache/README.md129
-rw-r--r--collectors/charts.d.plugin/apache/apache.chart.sh251
-rw-r--r--collectors/charts.d.plugin/apache/apache.conf30
-rw-r--r--collectors/charts.d.plugin/apcupsd/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/apcupsd/README.md7
-rw-r--r--collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh202
-rw-r--r--collectors/charts.d.plugin/apcupsd/apcupsd.conf25
-rw-r--r--collectors/charts.d.plugin/charts.d.conf63
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.dryrun-helper.sh72
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.plugin.in691
-rw-r--r--collectors/charts.d.plugin/cpu_apps/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/cpu_apps/README.md6
-rw-r--r--collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh70
-rw-r--r--collectors/charts.d.plugin/cpu_apps/cpu_apps.conf19
-rw-r--r--collectors/charts.d.plugin/cpufreq/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/cpufreq/README.md6
-rw-r--r--collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh88
-rw-r--r--collectors/charts.d.plugin/cpufreq/cpufreq.conf24
-rw-r--r--collectors/charts.d.plugin/example/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/example/README.md6
-rw-r--r--collectors/charts.d.plugin/example/example.chart.sh123
-rw-r--r--collectors/charts.d.plugin/example/example.conf21
-rw-r--r--collectors/charts.d.plugin/exim/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/exim/README.md6
-rw-r--r--collectors/charts.d.plugin/exim/exim.chart.sh46
-rw-r--r--collectors/charts.d.plugin/exim/exim.conf24
-rw-r--r--collectors/charts.d.plugin/hddtemp/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/hddtemp/README.md30
-rw-r--r--collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh77
-rw-r--r--collectors/charts.d.plugin/hddtemp/hddtemp.conf23
-rw-r--r--collectors/charts.d.plugin/libreswan/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/libreswan/README.md44
-rw-r--r--collectors/charts.d.plugin/libreswan/libreswan.chart.sh172
-rw-r--r--collectors/charts.d.plugin/libreswan/libreswan.conf29
-rw-r--r--collectors/charts.d.plugin/load_average/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/load_average/README.md6
-rw-r--r--collectors/charts.d.plugin/load_average/load_average.chart.sh69
-rw-r--r--collectors/charts.d.plugin/load_average/load_average.conf22
-rw-r--r--collectors/charts.d.plugin/loopsleepms.sh.inc219
-rw-r--r--collectors/charts.d.plugin/mem_apps/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/mem_apps/README.md6
-rw-r--r--collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh62
-rw-r--r--collectors/charts.d.plugin/mem_apps/mem_apps.conf19
-rw-r--r--collectors/charts.d.plugin/mysql/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/mysql/README.md83
-rw-r--r--collectors/charts.d.plugin/mysql/mysql.chart.sh511
-rw-r--r--collectors/charts.d.plugin/mysql/mysql.conf23
-rw-r--r--collectors/charts.d.plugin/nginx/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/nginx/README.md6
-rw-r--r--collectors/charts.d.plugin/nginx/nginx.chart.sh141
-rw-r--r--collectors/charts.d.plugin/nginx/nginx.conf23
-rw-r--r--collectors/charts.d.plugin/nut/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/nut/README.md61
-rw-r--r--collectors/charts.d.plugin/nut/nut.chart.sh232
-rw-r--r--collectors/charts.d.plugin/nut/nut.conf33
-rw-r--r--collectors/charts.d.plugin/opensips/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/opensips/README.md7
-rw-r--r--collectors/charts.d.plugin/opensips/opensips.chart.sh324
-rw-r--r--collectors/charts.d.plugin/opensips/opensips.conf21
-rw-r--r--collectors/charts.d.plugin/phpfpm/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/phpfpm/README.md6
-rw-r--r--collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh169
-rw-r--r--collectors/charts.d.plugin/phpfpm/phpfpm.conf27
-rw-r--r--collectors/charts.d.plugin/postfix/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/postfix/README.md28
-rw-r--r--collectors/charts.d.plugin/postfix/postfix.chart.sh87
-rw-r--r--collectors/charts.d.plugin/postfix/postfix.conf25
-rw-r--r--collectors/charts.d.plugin/sensors/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/sensors/README.md55
-rw-r--r--collectors/charts.d.plugin/sensors/sensors.chart.sh250
-rw-r--r--collectors/charts.d.plugin/sensors/sensors.conf32
-rw-r--r--collectors/charts.d.plugin/squid/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/squid/README.md67
-rw-r--r--collectors/charts.d.plugin/squid/squid.chart.sh141
-rw-r--r--collectors/charts.d.plugin/squid/squid.conf26
-rw-r--r--collectors/charts.d.plugin/tomcat/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/tomcat/README.md6
-rw-r--r--collectors/charts.d.plugin/tomcat/tomcat.chart.sh152
-rw-r--r--collectors/charts.d.plugin/tomcat/tomcat.conf38
87 files changed, 6065 insertions, 0 deletions
diff --git a/collectors/charts.d.plugin/.keep b/collectors/charts.d.plugin/.keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/charts.d.plugin/.keep
diff --git a/collectors/charts.d.plugin/Makefile.am b/collectors/charts.d.plugin/Makefile.am
new file mode 100644
index 0000000..2989b4b
--- /dev/null
+++ b/collectors/charts.d.plugin/Makefile.am
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ charts.d.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ charts.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ charts.d.dryrun-helper.sh \
+ charts.d.plugin \
+ loopsleepms.sh.inc \
+ $(NULL)
+
+dist_noinst_DATA = \
+ charts.d.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_charts_SCRIPTS = \
+ $(NULL)
+
+dist_charts_DATA = \
+ $(NULL)
+
+userchartsconfigdir=$(configdir)/charts.d
+dist_userchartsconfig_DATA = \
+ .keep \
+ $(NULL)
+
+chartsconfigdir=$(libconfigdir)/charts.d
+dist_chartsconfig_DATA = \
+ $(NULL)
+
+include ap/Makefile.inc
+include apache/Makefile.inc
+include apcupsd/Makefile.inc
+include cpu_apps/Makefile.inc
+include cpufreq/Makefile.inc
+include example/Makefile.inc
+include exim/Makefile.inc
+include hddtemp/Makefile.inc
+include libreswan/Makefile.inc
+include load_average/Makefile.inc
+include mem_apps/Makefile.inc
+include mysql/Makefile.inc
+include nginx/Makefile.inc
+include nut/Makefile.inc
+include opensips/Makefile.inc
+include phpfpm/Makefile.inc
+include postfix/Makefile.inc
+include sensors/Makefile.inc
+include squid/Makefile.inc
+include tomcat/Makefile.inc
diff --git a/collectors/charts.d.plugin/README.md b/collectors/charts.d.plugin/README.md
new file mode 100644
index 0000000..3d318f2
--- /dev/null
+++ b/collectors/charts.d.plugin/README.md
@@ -0,0 +1,195 @@
+# charts.d.plugin
+
+`charts.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `BASH` v4+.
+
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by netdata
+3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon)
+4. Supports any number of data collection **modules**
+
+`charts.d.plugin` has been designed so that the actual script that will do data collection will be permanently in
+memory, collecting data with as little overheads as possible
+(i.e. initialize once, repeatedly collect values with minimal overhead).
+
+`charts.d.plugin` looks for scripts in `/usr/lib/netdata/charts.d`.
+The scripts should have the filename suffix: `.chart.sh`.
+
+## Configuration
+
+`charts.d.plugin` itself can be configured using the configuration file `/etc/netdata/charts.d.conf`
+(to edit it on your system run `/etc/netdata/edit-config charts.d.conf`). This file is also a BASH script.
+
+In this file, you can place statements like this:
+
+```
+enable_all_charts="yes"
+X="yes"
+Y="no"
+```
+
+where `X` and `Y` are the names of individual charts.d collector scripts.
+When set to `yes`, charts.d will evaluate the collector script (see below).
+When set to `no`, charts.d will ignore the collector script.
+
+The variable `enable_all_charts` sets the default enable/disable state for all charts.
+
+## A charts.d module
+
+A `charts.d.plugin` module is a BASH script defining a few functions.
+
+For a module called `X`, the following criteria must be met:
+
+1. The module script must be called `X.chart.sh` and placed in `/usr/libexec/netdata/charts.d`.
+
+2. If the module needs a configuration, it should be called `X.conf` and placed in `/etc/netdata/charts.d`.
+ The configuration file `X.conf` is also a BASH script itself.
+ To edit the default files supplied by netdata run `/etc/netdata/edit-config charts.d/X.conf`,
+ where `X` is the name of the module.
+
+3. All functions and global variables defined in the script and its configuration, must begin with `X_`.
+
+4. The following functions must be defined:
+
+ - `X_check()` - returns 0 or 1 depending on whether the module is able to run or not
+ (following the standard Linux command line return codes: 0 = OK, the collector can operate and 1 = FAILED,
+ the collector cannot be used).
+
+ - `X_create()` - creates the netdata charts, following the standard netdata plugin guides as described in
+ **[External Plugins](../plugins.d/)** (commands `CHART` and `DIMENSION`).
+ The return value does matter: 0 = OK, 1 = FAILED.
+
+ - `X_update()` - collects the values for the defined charts, following the standard netdata plugin guides
+ as described in **[External Plugins](../plugins.d/)** (commands `BEGIN`, `SET`, `END`).
+ The return value also matters: 0 = OK, 1 = FAILED.
+
+5. The following global variables are available to be set:
+ - `X_update_every` - is the data collection frequency for the module script, in seconds.
+
+The module script may use more functions or variables. But all of them must begin with `X_`.
+
+The standard netdata plugin variables are also available (check **[External Plugins](../plugins.d/)**).
+
+### X_check()
+
+The purpose of the BASH function `X_check()` is to check if the module can collect data (or check its config).
+
+For example, if the module is about monitoring a local mysql database, the `X_check()` function may attempt to
+connect to a local mysql database to find out if it can read the values it needs.
+
+`X_check()` is run only once for the lifetime of the module.
+
+### X_create()
+
+The purpose of the BASH function `X_create()` is to create the charts and dimensions using the standard netdata
+plugin guides (**[External Plugins](../plugins.d/)**).
+
+`X_create()` will be called just once and only after `X_check()` was successful.
+You can however call it yourself when there is need for it (for example to add a new dimension to an existing chart).
+
+A non-zero return value will disable the collector.
+
+### X_update()
+
+`X_update()` will be called repeatedly every `X_update_every` seconds, to collect new values and send them to netdata,
+following the netdata plugin guides (**[External Plugins](../plugins.d/)**).
+
+The function will be called with one parameter: microseconds since the last time it was run. This value should be
+appended to the `BEGIN` statement of every chart updated by the collector script.
+
+A non-zero return value will disable the collector.
+
+### Useful functions charts.d provides
+
+Module scripts can use the following charts.d functions:
+
+#### require_cmd command
+
+`require_cmd()` will check if a command is available in the running system.
+
+For example, your `X_check()` function may use it like this:
+
+```sh
+mysql_check() {
+ require_cmd mysql || return 1
+ return 0
+}
+```
+
+Using the above, if the command `mysql` is not available in the system, the `mysql` module will be disabled.
+
+#### fixid "string"
+
+`fixid()` will get a string and return a properly formatted id for a chart or dimension.
+
+This is an expensive function that should not be used in `X_update()`.
+You can keep the generated id in a BASH associative array to have the values availables in `X_update()`, like this:
+
+```sh
+declare -A X_ids=()
+X_create() {
+ local name="a very bad name for id"
+
+ X_ids[$name]="$(fixid "$name")"
+}
+
+X_update() {
+ local microseconds="$1"
+
+ ...
+ local name="a very bad name for id"
+ ...
+
+ echo "BEGIN ${X_ids[$name]} $microseconds"
+ ...
+}
+```
+
+### Debugging your collectors
+
+You can run `charts.d.plugin` by hand with something like this:
+
+```sh
+# become user netdata
+sudo su -s /bin/sh netdata
+
+# run the plugin in debug mode
+/usr/libexec/netdata/plugins.d/charts.d.plugin debug 1 X Y Z
+```
+
+Charts.d will run in `debug` mode, with an update frequency of `1`, evaluating only the collector scripts
+`X`, `Y` and `Z`. You can define zero or more module scripts. If none is defined, charts.d will evaluate all
+module scripts available.
+
+Keep in mind that if your configs are not in `/etc/netdata`, you should do the following before running
+`charts.d.plugin`:
+
+```sh
+export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata"
+```
+
+Also, remember that netdata runs `chart.d.plugin` as user `netdata` (or any other user netdata is configured to run as).
+
+
+## Running multiple instances of charts.d.plugin
+
+`charts.d.plugin` will call the `X_update()` function one after another. This means that a delay in collector `X`
+will also delay the collection of `Y` and `Z`.
+
+You can have multiple `charts.d.plugin` running to overcome this problem.
+
+This is what you need to do:
+
+1. Decide a new name for the new charts.d instance: example `charts2.d`.
+
+2. Create/edit the files `/etc/netdata/charts.d.conf` and `/etc/netdata/charts2.d.conf` and enable / disable the
+ module you want each to run. Remember to set `enable_all_charts="no"` to both of them, and enable the individual
+ modules for each.
+
+3. link `/usr/libexec/netdata/plugins.d/charts.d.plugin` to `/usr/libexec/netdata/plugins.d/charts2.d.plugin`.
+ Netdata will spawn a new charts.d process.
+
+Execute the above in this order, since netdata will (by default) attempt to start new plugins soon after they are
+created in `/usr/libexec/netdata/plugins.d/`.
+
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/ap/Makefile.inc b/collectors/charts.d.plugin/ap/Makefile.inc
new file mode 100644
index 0000000..a2dd375
--- /dev/null
+++ b/collectors/charts.d.plugin/ap/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += ap/ap.chart.sh
+dist_chartsconfig_DATA += ap/ap.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ap/README.md ap/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md
new file mode 100644
index 0000000..962a856
--- /dev/null
+++ b/collectors/charts.d.plugin/ap/README.md
@@ -0,0 +1,86 @@
+# Access Point Plugin (ap)
+
+The `ap` collector visualizes data related to access points.
+
+## Example netdata charts
+
+![image](https://cloud.githubusercontent.com/assets/2662304/12377654/9f566e88-bd2d-11e5-855a-e0ba96b8fd98.png)
+
+## How it works
+
+It does the following:
+
+1. Runs `iw dev` searching for interfaces that have `type AP`.
+
+ From the same output it collects the SSIDs each AP supports by looking for lines `ssid NAME`.
+
+ Example:
+```sh
+# iw dev
+phy#0
+ Interface wlan0
+ ifindex 3
+ wdev 0x1
+ addr 7c:dd:90:77:34:2a
+ ssid TSAOUSIS
+ type AP
+ channel 7 (2442 MHz), width: 20 MHz, center1: 2442 MHz
+```
+
+
+2. For each interface found, it runs `iw INTERFACE station dump`.
+
+ From the output is collects:
+
+ - rx/tx bytes
+ - rx/tx packets
+ - tx retries
+ - tx failed
+ - signal strength
+ - rx/tx bitrate
+ - expected throughput
+
+ Example:
+
+```sh
+# iw wlan0 station dump
+Station 40:b8:37:5a:ed:5e (on wlan0)
+ inactive time: 910 ms
+ rx bytes: 15588897
+ rx packets: 127772
+ tx bytes: 52257763
+ tx packets: 95802
+ tx retries: 2162
+ tx failed: 28
+ signal: -43 dBm
+ signal avg: -43 dBm
+ tx bitrate: 65.0 MBit/s MCS 7
+ rx bitrate: 1.0 MBit/s
+ expected throughput: 32.125Mbps
+ authorized: yes
+ authenticated: yes
+ preamble: long
+ WMM/WME: yes
+ MFP: no
+ TDLS peer: no
+```
+
+3. For each interface found, it creates 6 charts:
+
+ - Number of Connected clients
+ - Bandwidth for all clients
+ - Packets for all clients
+ - Transmit Issues for all clients
+ - Average Signal among all clients
+ - Average Bitrate (including average expected throughput) among all clients
+
+## Configuration
+
+You can only set `ap_update_every=NUMBER` to `/etc/netdata/charts.d/ap.conf`, to give the data collection frequency.
+To edit this file on your system run `/etc/netdata/edit-config charts.d/ap.conf`.
+
+## Auto-detection
+
+The plugin is able to auto-detect if you are running access points on your linux box.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fap%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/ap/ap.chart.sh b/collectors/charts.d.plugin/ap/ap.chart.sh
new file mode 100644
index 0000000..a2d04c0
--- /dev/null
+++ b/collectors/charts.d.plugin/ap/ap.chart.sh
@@ -0,0 +1,179 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+ap_update_every=
+ap_priority=6900
+
+declare -A ap_devs=()
+
+# _check is called once, to find out if this chart should be enabled or not
+ap_check() {
+ require_cmd iw || return 1
+ local ev
+ ev=$(run iw dev | awk '
+ BEGIN {
+ i = "";
+ ssid = "";
+ ap = 0;
+ }
+ /^[ \t]+Interface / {
+ if( ap == 1 ) {
+ print "ap_devs[" i "]=\"" ssid "\""
+ }
+
+ i = $2;
+ ssid = "";
+ ap = 0;
+ }
+ /^[ \t]+ssid / { ssid = $2; }
+ /^[ \t]+type AP$/ { ap = 1; }
+ END {
+ if( ap == 1 ) {
+ print "ap_devs[" i "]=\"" ssid "\""
+ }
+ }
+ ')
+ eval "${ev}"
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ [ ${#ap_devs[@]} -gt 0 ] && return 0
+ error "no devices found in AP mode, with 'iw dev'"
+ return 1
+}
+
+# _create is called once, to create the charts
+ap_create() {
+ local ssid dev
+
+ for dev in "${!ap_devs[@]}"; do
+ ssid="${ap_devs[${dev}]}"
+
+ # create the chart with 3 dimensions
+ cat <<EOF
+CHART ap_clients.${dev} '' "Connected clients to ${ssid} on ${dev}" "clients" ${dev} ap.clients line $((ap_priority + 1)) $ap_update_every
+DIMENSION clients '' absolute 1 1
+
+CHART ap_bandwidth.${dev} '' "Bandwidth for ${ssid} on ${dev}" "kilobits/s" ${dev} ap.net area $((ap_priority + 2)) $ap_update_every
+DIMENSION received '' incremental 8 1024
+DIMENSION sent '' incremental -8 1024
+
+CHART ap_packets.${dev} '' "Packets for ${ssid} on ${dev}" "packets/s" ${dev} ap.packets line $((ap_priority + 3)) $ap_update_every
+DIMENSION received '' incremental 1 1
+DIMENSION sent '' incremental -1 1
+
+CHART ap_issues.${dev} '' "Transmit Issues for ${ssid} on ${dev}" "issues/s" ${dev} ap.issues line $((ap_priority + 4)) $ap_update_every
+DIMENSION retries 'tx retries' incremental 1 1
+DIMENSION failures 'tx failures' incremental -1 1
+
+CHART ap_signal.${dev} '' "Average Signal for ${ssid} on ${dev}" "dBm" ${dev} ap.signal line $((ap_priority + 5)) $ap_update_every
+DIMENSION signal 'average signal' absolute 1 1000
+
+CHART ap_bitrate.${dev} '' "Bitrate for ${ssid} on ${dev}" "Mbps" ${dev} ap.bitrate line $((ap_priority + 6)) $ap_update_every
+DIMENSION receive '' absolute 1 1000
+DIMENSION transmit '' absolute -1 1000
+DIMENSION expected 'expected throughput' absolute 1 1000
+EOF
+ done
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+ap_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ for dev in "${!ap_devs[@]}"; do
+ echo
+ echo "DEVICE ${dev}"
+ iw "${dev}" station dump
+ done | awk '
+ function zero_data() {
+ dev = "";
+ c = 0;
+ rb = 0;
+ tb = 0;
+ rp = 0;
+ tp = 0;
+ tr = 0;
+ tf = 0;
+ tt = 0;
+ rt = 0;
+ s = 0;
+ g = 0;
+ e = 0;
+ }
+ function print_device() {
+ if(dev != "" && length(dev) > 0) {
+ print "BEGIN ap_clients." dev;
+ print "SET clients = " c;
+ print "END";
+ print "BEGIN ap_bandwidth." dev;
+ print "SET received = " rb;
+ print "SET sent = " tb;
+ print "END";
+ print "BEGIN ap_packets." dev;
+ print "SET received = " rp;
+ print "SET sent = " tp;
+ print "END";
+ print "BEGIN ap_issues." dev;
+ print "SET retries = " tr;
+ print "SET failures = " tf;
+ print "END";
+
+ if( c == 0 ) c = 1;
+ print "BEGIN ap_signal." dev;
+ print "SET signal = " int(s / c);
+ print "END";
+ print "BEGIN ap_bitrate." dev;
+ print "SET receive = " int(rt / c);
+ print "SET transmit = " int(tt / c);
+ print "SET expected = " int(e / c);
+ print "END";
+ }
+ zero_data();
+ }
+ BEGIN {
+ zero_data();
+ }
+ /^DEVICE / {
+ print_device();
+ dev = $2;
+ }
+ /^Station/ { c++; }
+ /^[ \t]+rx bytes:/ { rb += $3; }
+ /^[ \t]+tx bytes:/ { tb += $3; }
+ /^[ \t]+rx packets:/ { rp += $3; }
+ /^[ \t]+tx packets:/ { tp += $3; }
+ /^[ \t]+tx retries:/ { tr += $3; }
+ /^[ \t]+tx failed:/ { tf += $3; }
+ /^[ \t]+signal:/ { x = $2; s += x * 1000; }
+ /^[ \t]+rx bitrate:/ { x = $3; rt += x * 1000; }
+ /^[ \t]+tx bitrate:/ { x = $3; tt += x * 1000; }
+ /^[ \t]+expected throughput:(.*)Mbps/ {
+ x=$3;
+ sub(/Mbps/, "", x);
+ e += x * 1000;
+ }
+ END {
+ print_device();
+ }
+ '
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/ap/ap.conf b/collectors/charts.d.plugin/ap/ap.conf
new file mode 100644
index 0000000..38fc157
--- /dev/null
+++ b/collectors/charts.d.plugin/ap/ap.conf
@@ -0,0 +1,23 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# nothing fancy to configure.
+# this module will run
+# iw dev - to find wireless devices in AP mode
+# iw ${dev} station dump - to get connected clients
+# based on the above, it generates several charts
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#ap_update_every=
+
+# the charts priority on the dashboard
+#ap_priority=6900
+
+# the number of retries to do in case of failure
+# before disabling the module
+#ap_retries=10
diff --git a/collectors/charts.d.plugin/apache/Makefile.inc b/collectors/charts.d.plugin/apache/Makefile.inc
new file mode 100644
index 0000000..4b360ea
--- /dev/null
+++ b/collectors/charts.d.plugin/apache/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += apache/apache.chart.sh
+dist_chartsconfig_DATA += apache/apache.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += apache/README.md apache/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/apache/README.md b/collectors/charts.d.plugin/apache/README.md
new file mode 100644
index 0000000..2739791
--- /dev/null
+++ b/collectors/charts.d.plugin/apache/README.md
@@ -0,0 +1,129 @@
+# Apache
+
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/apache) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+---
+
+The `apache` collector visualizes key performance data for an apache web server.
+
+## Example netdata charts
+
+For apache 2.2:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/12530273/421c4d14-c1e2-11e5-9fb6-ca6d6dd3b1dd.png)
+
+For apache 2.4:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/12530376/29ec26de-c1e6-11e5-9af1-e48aaf781795.png)
+
+## How it works
+
+It runs `curl "http://apache.host/server-status?auto` to fetch the current status of apache.
+
+It has been tested with apache 2.2 and apache 2.4. The latter also provides connections information (total and break down by status).
+
+Apache 2.2 response:
+
+```sh
+$ curl "http://127.0.0.1/server-status?auto"
+Total Accesses: 80057
+Total kBytes: 223017
+CPULoad: .018287
+Uptime: 64472
+ReqPerSec: 1.24173
+BytesPerSec: 3542.15
+BytesPerReq: 2852.59
+BusyWorkers: 1
+IdleWorkers: 49
+Scoreboard: _________________________......................................._W_______________________.......................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................
+```
+
+Apache 2.4 response:
+
+```sh
+$ curl "http://127.0.0.1/server-status?auto"
+127.0.0.1
+ServerVersion: Apache/2.4.18 (Unix)
+ServerMPM: event
+Server Built: Dec 14 2015 08:05:54
+CurrentTime: Saturday, 23-Jan-2016 14:42:06 EET
+RestartTime: Saturday, 23-Jan-2016 04:57:13 EET
+ParentServerConfigGeneration: 2
+ParentServerMPMGeneration: 1
+ServerUptimeSeconds: 35092
+ServerUptime: 9 hours 44 minutes 52 seconds
+Load1: 0.32
+Load5: 0.32
+Load15: 0.27
+Total Accesses: 32403
+Total kBytes: 34464
+CPUUser: 30.37
+CPUSystem: 29.55
+CPUChildrenUser: 0
+CPUChildrenSystem: 0
+CPULoad: .170751
+Uptime: 35092
+ReqPerSec: .923373
+BytesPerSec: 1005.67
+BytesPerReq: 1089.13
+BusyWorkers: 1
+IdleWorkers: 99
+ConnsTotal: 0
+ConnsAsyncWriting: 0
+ConnsAsyncKeepAlive: 0
+ConnsAsyncClosing: 0
+Scoreboard: __________________________________________________________________________________________W_________............................................................................................................................................................................................................................................................................................................
+```
+
+From the apache status output it collects:
+
+ - total accesses (incremental value, rendered as requests/s)
+ - total bandwidth (incremental value, rendered as bandwidth/s)
+ - requests per second (this appears to be calculated by apache as an average for its lifetime, while the one calculated by netdata using the total accesses counter is real-time)
+ - bytes per second (average for the lifetime of the apache server)
+ - bytes per request (average for the lifetime of the apache server)
+ - workers by status (`busy` and `idle`)
+ - total connections (currently active connections - offered by apache 2.4+)
+ - async connections per status (`keepalive`, `writing`, `closing` - offered by apache 2.4+)
+
+## Configuration
+
+The configuration is stored in `/etc/netdata/charts.d/apache.conf`.
+To edit this file on your system run `/etc/netdata/edit-config charts.d/apache.conf`.
+
+The internal default is:
+
+```sh
+# the URL your apache server is responding with mod_status information.
+apache_url="http://127.0.0.1:80/server-status?auto"
+
+# use this to set custom curl options you may need
+apache_curl_opts=
+
+# set this to a NUMBER to overwrite the update frequency
+# it is in seconds
+apache_update_every=
+```
+
+The default `apache_update_every` is configured in netdata.
+
+## Auto-detection
+
+If you have configured your apache server to offer server-status information on localhost clients, the defaults should work fine.
+
+## Apache Configuration
+
+Apache configuration differs between distributions. Please check your distribution's documentation for information on enabling apache's `mod_status` module.
+
+If you are able to run successfully, by hand this command:
+
+```sh
+curl "http://127.0.0.1:80/server-status?auto"
+```
+
+netdata will be able to do it too.
+
+Notice: You may need to have the default `000-default.conf ` website enabled in order for the status mod to work.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fapache%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/apache/apache.chart.sh b/collectors/charts.d.plugin/apache/apache.chart.sh
new file mode 100644
index 0000000..7d09ee6
--- /dev/null
+++ b/collectors/charts.d.plugin/apache/apache.chart.sh
@@ -0,0 +1,251 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# the URL to download apache status info
+apache_url="http://127.0.0.1:80/server-status?auto"
+apache_curl_opts=
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+apache_update_every=
+
+apache_priority=60000
+
+# convert apache floating point values
+# to integer using this multiplier
+# this only affects precision - the values
+# will be in the proper units
+apache_decimal_detail=1000000
+
+declare -a apache_response=()
+apache_accesses=0
+apache_kbytes=0
+apache_reqpersec=0
+apache_bytespersec=0
+apache_bytesperreq=0
+apache_busyworkers=0
+apache_idleworkers=0
+apache_connstotal=0
+apache_connsasyncwriting=0
+apache_connsasynckeepalive=0
+apache_connsasyncclosing=0
+
+apache_keys_detected=0
+apache_has_conns=0
+apache_key_accesses=
+apache_key_kbytes=
+apache_key_reqpersec=
+apache_key_bytespersec=
+apache_key_bytesperreq=
+apache_key_busyworkers=
+apache_key_idleworkers=
+apache_key_scoreboard=
+apache_key_connstotal=
+apache_key_connsasyncwriting=
+apache_key_connsasynckeepalive=
+apache_key_connsasyncclosing=
+apache_detect() {
+ local i=0
+ for x in "${@}"; do
+ case "${x}" in
+ 'Total Accesses') apache_key_accesses=$((i + 1)) ;;
+ 'Total kBytes') apache_key_kbytes=$((i + 1)) ;;
+ 'ReqPerSec') apache_key_reqpersec=$((i + 1)) ;;
+ 'BytesPerSec') apache_key_bytespersec=$((i + 1)) ;;
+ 'BytesPerReq') apache_key_bytesperreq=$((i + 1)) ;;
+ 'BusyWorkers') apache_key_busyworkers=$((i + 1)) ;;
+ 'IdleWorkers') apache_key_idleworkers=$((i + 1)) ;;
+ 'ConnsTotal') apache_key_connstotal=$((i + 1)) ;;
+ 'ConnsAsyncWriting') apache_key_connsasyncwriting=$((i + 1)) ;;
+ 'ConnsAsyncKeepAlive') apache_key_connsasynckeepalive=$((i + 1)) ;;
+ 'ConnsAsyncClosing') apache_key_connsasyncclosing=$((i + 1)) ;;
+ 'Scoreboard') apache_key_scoreboard=$((i)) ;;
+ esac
+
+ i=$((i + 1))
+ done
+
+ # we will not check of the Conns*
+ # keys, since these are apache 2.4 specific
+ [ -z "${apache_key_accesses}" ] && error "missing 'Total Accesses' from apache server: ${*}" && return 1
+ [ -z "${apache_key_kbytes}" ] && error "missing 'Total kBytes' from apache server: ${*}" && return 1
+ [ -z "${apache_key_reqpersec}" ] && error "missing 'ReqPerSec' from apache server: ${*}" && return 1
+ [ -z "${apache_key_bytespersec}" ] && error "missing 'BytesPerSec' from apache server: ${*}" && return 1
+ [ -z "${apache_key_bytesperreq}" ] && error "missing 'BytesPerReq' from apache server: ${*}" && return 1
+ [ -z "${apache_key_busyworkers}" ] && error "missing 'BusyWorkers' from apache server: ${*}" && return 1
+ [ -z "${apache_key_idleworkers}" ] && error "missing 'IdleWorkers' from apache server: ${*}" && return 1
+ [ -z "${apache_key_scoreboard}" ] && error "missing 'Scoreboard' from apache server: ${*}" && return 1
+
+ if [ ! -z "${apache_key_connstotal}" ] &&
+ [ ! -z "${apache_key_connsasyncwriting}" ] &&
+ [ ! -z "${apache_key_connsasynckeepalive}" ] &&
+ [ ! -z "${apache_key_connsasyncclosing}" ]; then
+ apache_has_conns=1
+ else
+ apache_has_conns=0
+ fi
+
+ return 0
+}
+
+apache_get() {
+ local oIFS="${IFS}" ret
+ # shellcheck disable=2207
+ IFS=$':\n' apache_response=($(run curl -Ss ${apache_curl_opts} "${apache_url}"))
+ ret=$?
+ IFS="${oIFS}"
+
+ if [ $ret -ne 0 ] || [ "${#apache_response[@]}" -eq 0 ]; then
+ return 1
+ fi
+
+ # the last line on the apache output is "Scoreboard"
+ # we use this label to detect that the output has a new word count
+ if [ ${apache_keys_detected} -eq 0 ] || [ "${apache_response[${apache_key_scoreboard}]}" != "Scoreboard" ]; then
+ apache_detect "${apache_response[@]}" || return 1
+ apache_keys_detected=1
+ fi
+
+ apache_accesses="${apache_response[${apache_key_accesses}]}"
+ apache_kbytes="${apache_response[${apache_key_kbytes}]}"
+
+ float2int "${apache_response[${apache_key_reqpersec}]}" ${apache_decimal_detail}
+ apache_reqpersec=${FLOAT2INT_RESULT}
+
+ float2int "${apache_response[${apache_key_bytespersec}]}" ${apache_decimal_detail}
+ apache_bytespersec=${FLOAT2INT_RESULT}
+
+ float2int "${apache_response[${apache_key_bytesperreq}]}" ${apache_decimal_detail}
+ apache_bytesperreq=${FLOAT2INT_RESULT}
+
+ apache_busyworkers="${apache_response[${apache_key_busyworkers}]}"
+ apache_idleworkers="${apache_response[${apache_key_idleworkers}]}"
+
+ if
+ [ -z "${apache_accesses}" ] ||
+ [ -z "${apache_kbytes}" ] ||
+ [ -z "${apache_reqpersec}" ] ||
+ [ -z "${apache_bytespersec}" ] ||
+ [ -z "${apache_bytesperreq}" ] ||
+ [ -z "${apache_busyworkers}" ]
+ [ -z "${apache_idleworkers}" ]
+ then
+ error "empty values got from apache server: ${apache_response[*]}"
+ return 1
+ fi
+
+ if [ ${apache_has_conns} -eq 1 ]; then
+ apache_connstotal="${apache_response[${apache_key_connstotal}]}"
+ apache_connsasyncwriting="${apache_response[${apache_key_connsasyncwriting}]}"
+ apache_connsasynckeepalive="${apache_response[${apache_key_connsasynckeepalive}]}"
+ apache_connsasyncclosing="${apache_response[${apache_key_connsasyncclosing}]}"
+ fi
+
+ return 0
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+apache_check() {
+
+ apache_get
+ # shellcheck disable=2181
+ if [ $? -ne 0 ]; then
+ # shellcheck disable=2154
+ error "cannot find stub_status on URL '${apache_url}'. Please set apache_url='http://apache.server:80/server-status?auto' in $confd/apache.conf"
+ return 1
+ fi
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ return 0
+}
+
+# _create is called once, to create the charts
+apache_create() {
+ cat <<EOF
+CHART apache_local.bytesperreq '' "apache Lifetime Avg. Response Size" "bytes/request" statistics apache.bytesperreq area $((apache_priority + 8)) $apache_update_every
+DIMENSION size '' absolute 1 ${apache_decimal_detail}
+CHART apache_local.workers '' "apache Workers" "workers" workers apache.workers stacked $((apache_priority + 5)) $apache_update_every
+DIMENSION idle '' absolute 1 1
+DIMENSION busy '' absolute 1 1
+CHART apache_local.reqpersec '' "apache Lifetime Avg. Requests/s" "requests/s" statistics apache.reqpersec line $((apache_priority + 6)) $apache_update_every
+DIMENSION requests '' absolute 1 ${apache_decimal_detail}
+CHART apache_local.bytespersec '' "apache Lifetime Avg. Bandwidth/s" "kilobits/s" statistics apache.bytespersec area $((apache_priority + 7)) $apache_update_every
+DIMENSION sent '' absolute 8 $((apache_decimal_detail * 1000))
+CHART apache_local.requests '' "apache Requests" "requests/s" requests apache.requests line $((apache_priority + 1)) $apache_update_every
+DIMENSION requests '' incremental 1 1
+CHART apache_local.net '' "apache Bandwidth" "kilobits/s" bandwidth apache.net area $((apache_priority + 3)) $apache_update_every
+DIMENSION sent '' incremental 8 1
+EOF
+
+ if [ ${apache_has_conns} -eq 1 ]; then
+ cat <<EOF2
+CHART apache_local.connections '' "apache Connections" "connections" connections apache.connections line $((apache_priority + 2)) $apache_update_every
+DIMENSION connections '' absolute 1 1
+CHART apache_local.conns_async '' "apache Async Connections" "connections" connections apache.conns_async stacked $((apache_priority + 4)) $apache_update_every
+DIMENSION keepalive '' absolute 1 1
+DIMENSION closing '' absolute 1 1
+DIMENSION writing '' absolute 1 1
+EOF2
+ fi
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+apache_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ apache_get || return 1
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN apache_local.requests $1
+SET requests = $((apache_accesses))
+END
+BEGIN apache_local.net $1
+SET sent = $((apache_kbytes))
+END
+BEGIN apache_local.reqpersec $1
+SET requests = $((apache_reqpersec))
+END
+BEGIN apache_local.bytespersec $1
+SET sent = $((apache_bytespersec))
+END
+BEGIN apache_local.bytesperreq $1
+SET size = $((apache_bytesperreq))
+END
+BEGIN apache_local.workers $1
+SET idle = $((apache_idleworkers))
+SET busy = $((apache_busyworkers))
+END
+VALUESEOF
+
+ if [ ${apache_has_conns} -eq 1 ]; then
+ cat <<VALUESEOF2
+BEGIN apache_local.connections $1
+SET connections = $((apache_connstotal))
+END
+BEGIN apache_local.conns_async $1
+SET keepalive = $((apache_connsasynckeepalive))
+SET closing = $((apache_connsasyncclosing))
+SET writing = $((apache_connsasyncwriting))
+END
+VALUESEOF2
+ fi
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/apache/apache.conf b/collectors/charts.d.plugin/apache/apache.conf
new file mode 100644
index 0000000..50914cf
--- /dev/null
+++ b/collectors/charts.d.plugin/apache/apache.conf
@@ -0,0 +1,30 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+# the URL to download apache status info
+#apache_url="http://127.0.0.1:80/server-status?auto"
+#apache_curl_opts=
+
+# convert apache floating point values
+# to integer using this multiplier
+# this only affects precision - the values
+# will be in the proper units
+#apache_decimal_detail=1000000
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#apache_update_every=
+
+# the charts priority on the dashboard
+#apache_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#apache_retries=10
diff --git a/collectors/charts.d.plugin/apcupsd/Makefile.inc b/collectors/charts.d.plugin/apcupsd/Makefile.inc
new file mode 100644
index 0000000..19cb9ca
--- /dev/null
+++ b/collectors/charts.d.plugin/apcupsd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += apcupsd/apcupsd.chart.sh
+dist_chartsconfig_DATA += apcupsd/apcupsd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += apcupsd/README.md apcupsd/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/apcupsd/README.md b/collectors/charts.d.plugin/apcupsd/README.md
new file mode 100644
index 0000000..59739ef
--- /dev/null
+++ b/collectors/charts.d.plugin/apcupsd/README.md
@@ -0,0 +1,7 @@
+# apcupsd
+
+*Under construction*
+
+Collects UPS metrics
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fapcupsd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
new file mode 100644
index 0000000..b4b92cd
--- /dev/null
+++ b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
@@ -0,0 +1,202 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+apcupsd_ip=
+apcupsd_port=
+
+declare -A apcupsd_sources=(
+ ["local"]="127.0.0.1:3551"
+)
+
+# how frequently to collect UPS data
+apcupsd_update_every=10
+
+apcupsd_timeout=3
+
+# the priority of apcupsd related to other charts
+apcupsd_priority=90000
+
+apcupsd_get() {
+ run -t $apcupsd_timeout apcaccess status "$1"
+}
+
+apcupsd_check() {
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ require_cmd apcaccess || return 1
+
+ # backwards compatibility
+ if [ "${apcupsd_ip}:${apcupsd_port}" != ":" ]; then
+ apcupsd_sources["local"]="${apcupsd_ip}:${apcupsd_port}"
+ fi
+
+ local host working=0 failed=0
+ for host in "${!apcupsd_sources[@]}"; do
+ run apcupsd_get "${apcupsd_sources[${host}]}" >/dev/null
+ # shellcheck disable=2181
+ if [ $? -ne 0 ]; then
+ error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}."
+ failed=$((failed + 1))
+ elif [ "$(apcupsd_get "${apcupsd_sources[${host}]}" | awk '/^STATUS.*/{ print $3 }')" != "ONLINE" ]; then
+ error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online."
+ failed=$((failed + 1))
+ else
+ working=$((working + 1))
+ fi
+ done
+
+ if [ ${working} -eq 0 ]; then
+ error "No APC UPSes found available."
+ return 1
+ fi
+
+ return 0
+}
+
+apcupsd_create() {
+ local host src
+ for host in "${!apcupsd_sources[@]}"; do
+ src=${apcupsd_sources[${host}]}
+
+ # create the charts
+ cat <<EOF
+CHART apcupsd_${host}.charge '' "UPS Charge for ${host} on ${src}" "percentage" ups apcupsd.charge area $((apcupsd_priority + 1)) $apcupsd_update_every
+DIMENSION battery_charge charge absolute 1 100
+
+CHART apcupsd_${host}.battery_voltage '' "UPS Battery Voltage for ${host} on ${src}" "Volts" ups apcupsd.battery.voltage line $((apcupsd_priority + 3)) $apcupsd_update_every
+DIMENSION battery_voltage voltage absolute 1 100
+DIMENSION battery_voltage_nominal nominal absolute 1 100
+
+CHART apcupsd_${host}.input_voltage '' "UPS Input Voltage for ${host} on ${src}" "Volts" input apcupsd.input.voltage line $((apcupsd_priority + 4)) $apcupsd_update_every
+DIMENSION input_voltage voltage absolute 1 100
+DIMENSION input_voltage_min min absolute 1 100
+DIMENSION input_voltage_max max absolute 1 100
+
+CHART apcupsd_${host}.input_frequency '' "UPS Input Frequency for ${host} on ${src}" "Hz" input apcupsd.input.frequency line $((apcupsd_priority + 5)) $apcupsd_update_every
+DIMENSION input_frequency frequency absolute 1 100
+
+CHART apcupsd_${host}.output_voltage '' "UPS Output Voltage for ${host} on ${src}" "Volts" output apcupsd.output.voltage line $((apcupsd_priority + 6)) $apcupsd_update_every
+DIMENSION output_voltage voltage absolute 1 100
+DIMENSION output_voltage_nominal nominal absolute 1 100
+
+CHART apcupsd_${host}.load '' "UPS Load for ${host} on ${src}" "percentage" ups apcupsd.load area $((apcupsd_priority)) $apcupsd_update_every
+DIMENSION load load absolute 1 100
+
+CHART apcupsd_${host}.temp '' "UPS Temperature for ${host} on ${src}" "Celsius" ups apcupsd.temperature line $((apcupsd_priority + 7)) $apcupsd_update_every
+DIMENSION temp temp absolute 1 100
+
+CHART apcupsd_${host}.time '' "UPS Time Remaining for ${host} on ${src}" "Minutes" ups apcupsd.time area $((apcupsd_priority + 2)) $apcupsd_update_every
+DIMENSION time time absolute 1 100
+
+CHART apcupsd_${host}.online '' "UPS ONLINE flag for ${host} on ${src}" "boolean" ups apcupsd.online line $((apcupsd_priority + 8)) $apcupsd_update_every
+DIMENSION online online absolute 0 1
+
+EOF
+ done
+ return 0
+}
+
+apcupsd_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ local host working=0 failed=0
+ for host in "${!apcupsd_sources[@]}"; do
+ apcupsd_get "${apcupsd_sources[${host}]}" | awk "
+
+BEGIN {
+ battery_charge = 0;
+ battery_voltage = 0;
+ battery_voltage_nominal = 0;
+ input_voltage = 0;
+ input_voltage_min = 0;
+ input_voltage_max = 0;
+ input_frequency = 0;
+ output_voltage = 0;
+ output_voltage_nominal = 0;
+ load = 0;
+ temp = 0;
+ time = 0;
+}
+/^BCHARGE.*/ { battery_charge = \$3 * 100 };
+/^BATTV.*/ { battery_voltage = \$3 * 100 };
+/^NOMBATTV.*/ { battery_voltage_nominal = \$3 * 100 };
+/^LINEV.*/ { input_voltage = \$3 * 100 };
+/^MINLINEV.*/ { input_voltage_min = \$3 * 100 };
+/^MAXLINEV.*/ { input_voltage_max = \$3 * 100 };
+/^LINEFREQ.*/ { input_frequency = \$3 * 100 };
+/^OUTPUTV.*/ { output_voltage = \$3 * 100 };
+/^NOMOUTV.*/ { output_voltage_nominal = \$3 * 100 };
+/^LOADPCT.*/ { load = \$3 * 100 };
+/^ITEMP.*/ { temp = \$3 * 100 };
+/^TIMELEFT.*/ { time = \$3 * 100 };
+/^STATUS.*/ { online=(\$3 == \"ONLINE\")?1:0 };
+END {
+ print \"BEGIN apcupsd_${host}.online $1\";
+ print \"SET online = \" online;
+ print \"END\"
+
+ if (online == 1) {
+ print \"BEGIN apcupsd_${host}.charge $1\";
+ print \"SET battery_charge = \" battery_charge;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.battery_voltage $1\";
+ print \"SET battery_voltage = \" battery_voltage;
+ print \"SET battery_voltage_nominal = \" battery_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.input_voltage $1\";
+ print \"SET input_voltage = \" input_voltage;
+ print \"SET input_voltage_min = \" input_voltage_min;
+ print \"SET input_voltage_max = \" input_voltage_max;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.input_frequency $1\";
+ print \"SET input_frequency = \" input_frequency;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.output_voltage $1\";
+ print \"SET output_voltage = \" output_voltage;
+ print \"SET output_voltage_nominal = \" output_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.load $1\";
+ print \"SET load = \" load;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.temp $1\";
+ print \"SET temp = \" temp;
+ print \"END\"
+
+ print \"BEGIN apcupsd_${host}.time $1\";
+ print \"SET time = \" time;
+ print \"END\"
+ }
+}"
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]; then
+ failed=$((failed + 1))
+ error "failed to get values for APC UPS ${host} on ${apcupsd_sources[${host}]}" && return 1
+ else
+ working=$((working + 1))
+ fi
+ done
+
+ [ $working -eq 0 ] && error "failed to get values from all APC UPSes" && return 1
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.conf b/collectors/charts.d.plugin/apcupsd/apcupsd.conf
new file mode 100644
index 0000000..679c0d6
--- /dev/null
+++ b/collectors/charts.d.plugin/apcupsd/apcupsd.conf
@@ -0,0 +1,25 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# add all your APC UPSes in this array - uncomment it too
+#declare -A apcupsd_sources=(
+# ["local"]="127.0.0.1:3551"
+#)
+
+# how long to wait for apcupsd to respond
+#apcupsd_timeout=3
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#apcupsd_update_every=10
+
+# the charts priority on the dashboard
+#apcupsd_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#apcupsd_retries=10
diff --git a/collectors/charts.d.plugin/charts.d.conf b/collectors/charts.d.plugin/charts.d.conf
new file mode 100644
index 0000000..acb2a6f
--- /dev/null
+++ b/collectors/charts.d.plugin/charts.d.conf
@@ -0,0 +1,63 @@
+# This is the configuration for charts.d.plugin
+
+# Each of its collectors can read configuration eiher from this file
+# or a NAME.conf file (where NAME is the collector name).
+# The collector specific file has higher precedence.
+
+# This file is a shell script too.
+
+# -----------------------------------------------------------------------------
+
+# number of seconds to run without restart
+# after this time, charts.d.plugin will exit
+# netdata will restart it, but a small gap
+# will appear in the charts.d.plugin charts.
+#restart_timeout=$[3600 * 4]
+
+# when making iterations, charts.d can loop more frequently
+# to prevent plugins missing iterations.
+# this is a percentage relative to update_every to align its
+# iterations.
+# The minimum is 10%, the maximum 100%.
+# So, if update_every is 1 second and time_divisor is 50,
+# charts.d will iterate every 500ms.
+# Charts will be called to collect data only if the time
+# passed since the last time the collected data is equal or
+# above their update_every.
+#time_divisor=50
+
+# -----------------------------------------------------------------------------
+
+# the default enable/disable for all charts.d collectors
+# the default is "yes"
+# enable_all_charts="yes"
+
+# BY DEFAULT ENABLED MODULES
+# ap=yes
+# nut=yes
+# opensips=yes
+
+# -----------------------------------------------------------------------------
+# THESE NEED TO BE SET TO "force" TO BE ENABLED
+
+# Nothing useful.
+# Just an example charts.d plugin you can use as a template.
+# example=force
+
+# OLD MODULES THAT ARE NOW SERVED BY python.d.plugin
+# apache=force
+# cpufreq=force
+# exim=force
+# hddtemp=force
+# mysql=force
+# nginx=force
+# phpfpm=force
+# postfix=force
+# sensors=force
+# squid=force
+# tomcat=force
+
+# OLD MODULES THAT ARE NOW SERVED BY NETDATA DAEMON
+# cpu_apps=force
+# mem_apps=force
+# load_average=force
diff --git a/collectors/charts.d.plugin/charts.d.dryrun-helper.sh b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
new file mode 100755
index 0000000..91af2c5
--- /dev/null
+++ b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck disable=SC2181
+
+# will stop the script for any error
+set -e
+
+me="$0"
+name="$1"
+chart="$2"
+conf="$3"
+
+can_diff=1
+
+tmp1="$(mktemp)"
+tmp2="$(mktemp)"
+
+myset() {
+ set | grep -v "^_=" | grep -v "^PIPESTATUS=" | grep -v "^BASH_LINENO="
+}
+
+# save 2 'set'
+myset >"$tmp1"
+myset >"$tmp2"
+
+# make sure they don't differ
+diff "$tmp1" "$tmp2" >/dev/null 2>&1
+if [ $? -ne 0 ]; then
+ # they differ, we cannot do the check
+ echo >&2 "$me: cannot check with diff."
+ can_diff=0
+fi
+
+# do it again, now including the script
+myset >"$tmp1"
+
+# include the plugin and its config
+if [ -f "$conf" ]; then
+ # shellcheck source=/dev/null
+ . "$conf"
+ if [ $? -ne 0 ]; then
+ echo >&2 "$me: cannot load config file $conf"
+ rm "$tmp1" "$tmp2"
+ exit 1
+ fi
+fi
+
+# shellcheck source=/dev/null
+. "$chart"
+if [ $? -ne 0 ]; then
+ echo >&2 "$me: cannot load chart file $chart"
+ rm "$tmp1" "$tmp2"
+ exit 1
+fi
+
+# remove all variables starting with the plugin name
+myset | grep -v "^$name" >"$tmp2"
+
+if [ $can_diff -eq 1 ]; then
+ # check if they are different
+ # make sure they don't differ
+ diff "$tmp1" "$tmp2" >&2
+ if [ $? -ne 0 ]; then
+ # they differ
+ rm "$tmp1" "$tmp2"
+ exit 1
+ fi
+fi
+
+rm "$tmp1" "$tmp2"
+exit 0
diff --git a/collectors/charts.d.plugin/charts.d.plugin.in b/collectors/charts.d.plugin/charts.d.plugin.in
new file mode 100755
index 0000000..05a6387
--- /dev/null
+++ b/collectors/charts.d.plugin/charts.d.plugin.in
@@ -0,0 +1,691 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+# charts.d.plugin allows easy development of BASH plugins
+#
+# if you need to run parallel charts.d processes, link this file to a different name
+# in the same directory, with a .plugin suffix and netdata will start both of them,
+# each will have a different config file and modules configuration directory.
+#
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
+
+PROGRAM_FILE="$0"
+PROGRAM_NAME="$(basename $0)"
+PROGRAM_NAME="${PROGRAM_NAME/.plugin/}"
+MODULE_NAME="main"
+
+# -----------------------------------------------------------------------------
+# create temp dir
+
+debug=0
+TMP_DIR=
+chartsd_cleanup() {
+ trap '' EXIT QUIT HUP INT TERM
+
+ if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]; then
+ [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..."
+ rm -rf "$TMP_DIR"
+ fi
+ exit 0
+}
+trap chartsd_cleanup EXIT QUIT HUP INT TERM
+
+if [ $UID = "0" ]; then
+ TMP_DIR="$(mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX)"
+else
+ TMP_DIR="$(mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX)"
+fi
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ echo "DISABLE"
+ exit 1
+}
+
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
+# check a few commands
+
+require_cmd() {
+ local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null)
+ if [ -z "${x}" -o ! -x "${x}" ]; then
+ warning "command '${1}' is not found in ${PATH}."
+ eval "${1^^}_CMD=\"\""
+ return 1
+ fi
+
+ eval "${1^^}_CMD=\"${x}\""
+ return 0
+}
+
+require_cmd date || exit 1
+require_cmd sed || exit 1
+require_cmd basename || exit 1
+require_cmd dirname || exit 1
+require_cmd cat || exit 1
+require_cmd grep || exit 1
+require_cmd egrep || exit 1
+require_cmd mktemp || exit 1
+require_cmd awk || exit 1
+require_cmd timeout || exit 1
+require_cmd curl || exit 1
+
+# -----------------------------------------------------------------------------
+
+[ $((BASH_VERSINFO[0])) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade."
+
+info "started from '$PROGRAM_FILE' with options: $*"
+
+# -----------------------------------------------------------------------------
+# internal defaults
+# netdata exposes a few environment variables for us
+
+[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
+
+pluginsd="${NETDATA_PLUGINS_DIR}"
+stockconfd="${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}"
+userconfd="${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}"
+olduserconfd="${NETDATA_USER_CONFIG_DIR}"
+chartsd="$pluginsd/../charts.d"
+
+minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}"
+update_every=${minimum_update_frequency} # this will be overwritten by the command line
+
+# work around for non BASH shells
+charts_create="_create"
+charts_update="_update"
+charts_check="_check"
+charts_undescore="_"
+
+# when making iterations, charts.d can loop more frequently
+# to prevent plugins missing iterations.
+# this is a percentage relative to update_every to align its
+# iterations.
+# The minimum is 10%, the maximum 100%.
+# So, if update_every is 1 second and time_divisor is 50,
+# charts.d will iterate every 500ms.
+# Charts will be called to collect data only if the time
+# passed since the last time the collected data is equal or
+# above their update_every.
+time_divisor=50
+
+# number of seconds to run without restart
+# after this time, charts.d.plugin will exit
+# netdata will restart it
+restart_timeout=$((3600 * 4))
+
+# check if the charts.d plugins are using global variables
+# they should not.
+# It does not currently support BASH v4 arrays, so it is
+# disabled
+dryrunner=0
+
+# check for timeout command
+check_for_timeout=1
+
+# the default enable/disable value for all charts
+enable_all_charts="yes"
+
+# -----------------------------------------------------------------------------
+# parse parameters
+
+check=0
+chart_only=
+while [ ! -z "$1" ]; do
+ if [ "$1" = "check" ]; then
+ check=1
+ shift
+ continue
+ fi
+
+ if [ "$1" = "debug" -o "$1" = "all" ]; then
+ debug=1
+ shift
+ continue
+ fi
+
+ if [ -f "$chartsd/$1.chart.sh" ]; then
+ debug=1
+ chart_only="$(echo $1.chart.sh | sed "s/\.chart\.sh$//g")"
+ shift
+ continue
+ fi
+
+ if [ -f "$chartsd/$1" ]; then
+ debug=1
+ chart_only="$(echo $1 | sed "s/\.chart\.sh$//g")"
+ shift
+ continue
+ fi
+
+ # number check
+ n="$1"
+ x=$((n))
+ if [ "$x" = "$n" ]; then
+ shift
+ update_every=$x
+ [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency
+ continue
+ fi
+
+ fatal "Cannot understand parameter $1. Aborting."
+done
+
+# -----------------------------------------------------------------------------
+# loop control
+
+# default sleep function
+LOOPSLEEPMS_HIGHRES=0
+now_ms=
+current_time_ms_default() {
+ now_ms="$(date +'%s')000"
+}
+current_time_ms="current_time_ms_default"
+current_time_ms_accuracy=1
+mysleep="sleep"
+
+# if found and included, this file overwrites loopsleepms()
+# and current_time_ms() with a high resolution timer function
+# for precise looping.
+source "$pluginsd/loopsleepms.sh.inc"
+[ $? -ne 0 ] && error "Failed to load '$pluginsd/loopsleepms.sh.inc'."
+
+# -----------------------------------------------------------------------------
+# load my configuration
+
+for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf"; do
+ if [ -f "$myconfig" ]; then
+ source "$myconfig"
+ if [ $? -ne 0 ]; then
+ error "Config file '$myconfig' loaded with errors."
+ else
+ info "Configuration file '$myconfig' loaded."
+ fi
+ else
+ warning "Configuration file '$myconfig' not found."
+ fi
+done
+
+# make sure time_divisor is right
+time_divisor=$((time_divisor))
+[ $time_divisor -lt 10 ] && time_divisor=10
+[ $time_divisor -gt 100 ] && time_divisor=100
+
+# we check for the timeout command, after we load our
+# configuration, so that the user may overwrite the
+# timeout command we use, providing a function that
+# can emulate the timeout command we need:
+# > timeout SECONDS command ...
+if [ $check_for_timeout -eq 1 ]; then
+ require_cmd timeout || exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# internal checks
+
+# netdata passes the requested update frequency as the first argument
+update_every=$((update_every + 1 - 1)) # makes sure it is a number
+test $update_every -eq 0 && update_every=1 # if it is zero, make it 1
+
+# check the charts.d directory
+[ ! -d "$chartsd" ] && fatal "cannot find charts directory '$chartsd'"
+
+# -----------------------------------------------------------------------------
+# library functions
+
+fixid() {
+ echo "$*" |
+ tr -c "[A-Z][a-z][0-9]" "_" |
+ sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |
+ tr "[A-Z]" "[a-z]"
+}
+
+run() {
+ local ret pid="${BASHPID}" t
+
+ if [ "z${1}" = "z-t" -a "${2}" != "0" ]; then
+ t="${2}"
+ shift 2
+ timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}"
+ ret=$?
+ else
+ "${@}" 2>"${TMP_DIR}/run.${pid}"
+ ret=$?
+ fi
+
+ if [ ${ret} -ne 0 ]; then
+ {
+ printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '"
+ printf "%q " "${@}"
+ printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n"
+ cat "${TMP_DIR}/run.${pid}"
+ printf " --- END TRACE ---\n"
+ } >&2
+ fi
+ rm "${TMP_DIR}/run.${pid}"
+
+ return ${ret}
+}
+
+# convert any floating point number
+# to integer, give a multiplier
+# the result is stored in ${FLOAT2INT_RESULT}
+# so that no fork is necessary
+# the multiplier must be a power of 10
+float2int() {
+ local f m="$2" a b l v=($1)
+ f=${v[0]}
+
+ # the length of the multiplier - 1
+ l=$((${#m} - 1))
+
+ # check if the number is in scientific notation
+ if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]]; then
+ # convert it to decimal
+ # unfortunately, this fork cannot be avoided
+ # if you know of a way to avoid it, please let me know
+ f=$(printf "%0.${l}f" ${f})
+ fi
+
+ # split the floating point number
+ # in integer (a) and decimal (b)
+ a=${f/.*/}
+ b=${f/*./}
+
+ # if the integer part is missing
+ # set it to zero
+ [ -z "${a}" ] && a="0"
+
+ # strip leading zeros from the integer part
+ # base 10 convertion
+ a=$((10#$a))
+
+ # check the length of the decimal part
+ # against the length of the multiplier
+ if [ ${#b} -gt ${l} ]; then
+ # too many digits - take the most significant
+ b=${b:0:l}
+
+ elif [ ${#b} -lt ${l} ]; then
+ # too few digits - pad with zero on the right
+ local z="00000000000000000000000" r=$((l - ${#b}))
+ b="${b}${z:0:r}"
+ fi
+
+ # strip leading zeros from the decimal part
+ # base 10 convertion
+ b=$((10#$b))
+
+ # store the result
+ FLOAT2INT_RESULT=$(((a * m) + b))
+}
+
+# -----------------------------------------------------------------------------
+# charts check functions
+
+all_charts() {
+ cd "$chartsd"
+ [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1
+
+ ls *.chart.sh | sed "s/\.chart\.sh$//g"
+}
+
+declare -A charts_enable_keyword=(
+ ['apache']="force"
+ ['cpu_apps']="force"
+ ['cpufreq']="force"
+ ['example']="force"
+ ['exim']="force"
+ ['hddtemp']="force"
+ ['load_average']="force"
+ ['mem_apps']="force"
+ ['mysql']="force"
+ ['nginx']="force"
+ ['phpfpm']="force"
+ ['postfix']="force"
+ ['sensors']="force"
+ ['squid']="force"
+ ['tomcat']="force"
+)
+
+all_enabled_charts() {
+ local charts= enabled= required=
+
+ # find all enabled charts
+
+ for chart in $(all_charts); do
+ MODULE_NAME="${chart}"
+
+ eval "enabled=\$$chart"
+ if [ -z "${enabled}" ]; then
+ enabled="${enable_all_charts}"
+ fi
+
+ required="${charts_enable_keyword[${chart}]}"
+ [ -z "${required}" ] && required="yes"
+
+ if [ ! "${enabled}" = "${required}" ]; then
+ info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)."
+ else
+ debug "is enabled for auto-detection."
+ local charts="$charts $chart"
+ fi
+ done
+ MODULE_NAME="main"
+
+ local charts2=
+ for chart in $charts; do
+ MODULE_NAME="${chart}"
+
+ # check the enabled charts
+ local check="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()")"
+ if [ -z "$check" ]; then
+ error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it."
+ continue
+ fi
+
+ local create="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()")"
+ if [ -z "$create" ]; then
+ error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it."
+ continue
+ fi
+
+ local update="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()")"
+ if [ -z "$update" ]; then
+ error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it."
+ continue
+ fi
+
+ # check its config
+ #if [ -f "$userconfd/$chart.conf" ]
+ #then
+ # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ]
+ # then
+ # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it."
+ # continue
+ # fi
+ #fi
+
+ #if [ $dryrunner -eq 1 ]
+ # then
+ # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null
+ # if [ $? -ne 0 ]
+ # then
+ # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it."
+ # continue
+ # fi
+ #fi
+
+ local charts2="$charts2 $chart"
+ done
+ MODULE_NAME="main"
+
+ echo $charts2
+ debug "enabled charts: $charts2"
+}
+
+# -----------------------------------------------------------------------------
+# load the charts
+
+suffix_retries="_retries"
+suffix_update_every="_update_every"
+active_charts=
+for chart in $(all_enabled_charts); do
+ MODULE_NAME="${chart}"
+
+ debug "loading module: '$chartsd/$chart.chart.sh'"
+
+ source "$chartsd/$chart.chart.sh"
+ [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors."
+
+ # first load the stock config
+ if [ -f "$stockconfd/$chart.conf" ]; then
+ debug "loading module configuration: '$stockconfd/$chart.conf'"
+ source "$stockconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors."
+ else
+ debug "not found module configuration: '$stockconfd/$chart.conf'"
+ fi
+
+ # then load the user config (it overwrites the stock)
+ if [ -f "$userconfd/$chart.conf" ]; then
+ debug "loading module configuration: '$userconfd/$chart.conf'"
+ source "$userconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors."
+ else
+ debug "not found module configuration: '$userconfd/$chart.conf'"
+
+ if [ -f "$olduserconfd/$chart.conf" ]; then
+ # support for very old netdata that had the charts.d module configs in /etc/netdata
+ info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'"
+ source "$olduserconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors."
+ fi
+ fi
+
+ eval "dt=\$$chart$suffix_update_every"
+ dt=$((dt + 1 - 1)) # make sure it is a number
+ if [ $dt -lt $update_every ]; then
+ eval "$chart$suffix_update_every=$update_every"
+ fi
+
+ $chart$charts_check
+ if [ $? -eq 0 ]; then
+ debug "module '$chart' activated"
+ active_charts="$active_charts $chart"
+ else
+ error "module's '$chart' check() function reports failure."
+ fi
+done
+MODULE_NAME="main"
+debug "activated modules: $active_charts"
+
+# -----------------------------------------------------------------------------
+# check overwrites
+
+# enable work time reporting
+debug_time=
+test $debug -eq 1 && debug_time=tellwork
+
+# if we only need a specific chart, remove all the others
+if [ ! -z "${chart_only}" ]; then
+ debug "requested to run only for: '${chart_only}'"
+ check_charts=
+ for chart in $active_charts; do
+ if [ "$chart" = "$chart_only" ]; then
+ check_charts="$chart"
+ break
+ fi
+ done
+ active_charts="$check_charts"
+fi
+debug "activated charts: $active_charts"
+
+# stop if we just need a pre-check
+if [ $check -eq 1 ]; then
+ info "CHECK RESULT"
+ info "Will run the charts: $active_charts"
+ exit 0
+fi
+
+# -----------------------------------------------------------------------------
+
+cd "${TMP_DIR}" || exit 1
+
+# -----------------------------------------------------------------------------
+# create charts
+
+run_charts=
+for chart in $active_charts; do
+ MODULE_NAME="${chart}"
+
+ debug "calling '$chart$charts_create()'..."
+ $chart$charts_create
+ if [ $? -eq 0 ]; then
+ run_charts="$run_charts $chart"
+ debug "'$chart' initialized."
+ else
+ error "module's '$chart' function '$chart$charts_create()' reports failure."
+ fi
+done
+MODULE_NAME="main"
+debug "run_charts='$run_charts'"
+
+# -----------------------------------------------------------------------------
+# update dimensions
+
+[ -z "$run_charts" ] && fatal "No charts to collect data from."
+
+declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=()
+global_update() {
+ local exit_at \
+ c=0 dt ret last_ms exec_start_ms exec_end_ms \
+ chart now_charts=() next_charts=($run_charts) \
+ next_ms x seconds millis
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+
+ exit_at=$((now_ms + (restart_timeout * 1000)))
+
+ for chart in $run_charts; do
+ eval "charts_update_every[$chart]=\$$chart$suffix_update_every"
+ test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every
+
+ eval "charts_retries[$chart]=\$$chart$suffix_retries"
+ test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10
+
+ charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000))))
+ charts_next_update[$chart]=$((charts_last_update[$chart] + (charts_update_every[$chart] * 1000)))
+ charts_run_counter[$chart]=0
+ charts_serial_failures[$chart]=0
+
+ echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}"
+ echo "DIMENSION run_time 'run time' absolute 1 1"
+ done
+
+ # the main loop
+ while [ "${#next_charts[@]}" -gt 0 ]; do
+ c=$((c + 1))
+ now_charts=("${next_charts[@]}")
+ next_charts=()
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+
+ for chart in "${now_charts[@]}"; do
+ MODULE_NAME="${chart}"
+
+ if [ ${now_ms} -ge ${charts_next_update[$chart]} ]; then
+ last_ms=${charts_last_update[$chart]}
+ dt=$((now_ms - last_ms))
+
+ charts_last_update[$chart]=${now_ms}
+
+ while [ ${charts_next_update[$chart]} -lt ${now_ms} ]; do
+ charts_next_update[$chart]=$((charts_next_update[$chart] + (charts_update_every[$chart] * 1000)))
+ done
+
+ # the first call should not give a duration
+ # so that netdata calibrates to current time
+ dt=$((dt * 1000))
+ charts_run_counter[$chart]=$((charts_run_counter[$chart] + 1))
+ if [ ${charts_run_counter[$chart]} -eq 1 ]; then
+ dt=
+ fi
+
+ exec_start_ms=$now_ms
+ $chart$charts_update $dt
+ ret=$?
+
+ # return the current time in ms in $now_ms
+ ${current_time_ms}
+ exec_end_ms=$now_ms
+
+ echo "BEGIN netdata.plugin_chartsd_$chart $dt"
+ echo "SET run_time = $((exec_end_ms - exec_start_ms))"
+ echo "END"
+
+ if [ $ret -eq 0 ]; then
+ charts_serial_failures[$chart]=0
+ next_charts+=($chart)
+ else
+ charts_serial_failures[$chart]=$((charts_serial_failures[$chart] + 1))
+
+ if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ]; then
+ error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it."
+ else
+ error "module's '$chart' update() function reports failure. Will keep trying for a while."
+ next_charts+=($chart)
+ fi
+ fi
+ else
+ next_charts+=($chart)
+ fi
+ done
+ MODULE_NAME="${chart}"
+
+ # wait the time you are required to
+ next_ms=$((now_ms + (update_every * 1000 * 100)))
+ for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done
+ next_ms=$((next_ms - now_ms))
+
+ if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ]; then
+ next_ms=$((next_ms + current_time_ms_accuracy))
+ seconds=$((next_ms / 1000))
+ millis=$((next_ms % 1000))
+ if [ ${millis} -lt 10 ]; then
+ millis="00${millis}"
+ elif [ ${millis} -lt 100 ]; then
+ millis="0${millis}"
+ fi
+
+ debug "sleeping for ${seconds}.${millis} seconds."
+ ${mysleep} ${seconds}.${millis}
+ else
+ debug "sleeping for ${update_every} seconds."
+ ${mysleep} $update_every
+ fi
+
+ test ${now_ms} -ge ${exit_at} && exit 0
+ done
+
+ fatal "nothing left to do, exiting..."
+}
+
+global_update
diff --git a/collectors/charts.d.plugin/cpu_apps/Makefile.inc b/collectors/charts.d.plugin/cpu_apps/Makefile.inc
new file mode 100644
index 0000000..a35f828
--- /dev/null
+++ b/collectors/charts.d.plugin/cpu_apps/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += cpu_apps/cpu_apps.chart.sh
+dist_chartsconfig_DATA += cpu_apps/cpu_apps.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += cpu_apps/README.md cpu_apps/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/cpu_apps/README.md b/collectors/charts.d.plugin/cpu_apps/README.md
new file mode 100644
index 0000000..a32a633
--- /dev/null
+++ b/collectors/charts.d.plugin/cpu_apps/README.md
@@ -0,0 +1,6 @@
+# cpu_apps
+
+> THIS MODULE IS OBSOLETE.
+> USE [APPS.PLUGIN](../../apps.plugin).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fcpu_apps%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh
new file mode 100644
index 0000000..e91c46d
--- /dev/null
+++ b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh
@@ -0,0 +1,70 @@
+# shellcheck shell=bash disable=SC2154,SC1072,SC1073,SC2009,SC2162,SC2006,SC2002,SC2086,SC1117
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+# THIS PLUGIN IS OBSOLETE
+# USE apps.plugin INSTEAD
+
+# a space separated list of command to monitor
+cpu_apps_apps=
+
+# these are required for computing memory in bytes and cpu in seconds
+#cpu_apps_pagesize="`getconf PAGESIZE`"
+cpu_apps_clockticks="$(getconf CLK_TCK)"
+
+cpu_apps_update_every=60
+
+cpu_apps_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ if [ -z "$cpu_apps_apps" ]; then
+ error "manual configuration required: please set cpu_apps_apps='command1 command2 ...' in $confd/cpu_apps_apps.conf"
+ return 1
+ fi
+ return 0
+}
+
+cpu_apps_bc_finalze=
+
+cpu_apps_create() {
+
+ echo "CHART chartsd_apps.cpu '' 'Apps CPU' 'milliseconds / $cpu_apps_update_every sec' apps apps stacked 20001 $cpu_apps_update_every"
+
+ local x=
+ for x in $cpu_apps_apps; do
+ echo "DIMENSION $x $x incremental 1000 $cpu_apps_clockticks"
+
+ # this string is needed later in the update() function
+ # to finalize the instructions for the bc command
+ cpu_apps_bc_finalze="$cpu_apps_bc_finalze \"SET $x = \"; $x;"
+ done
+ return 0
+}
+
+cpu_apps_update() {
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ echo "BEGIN chartsd_apps.cpu"
+ ps -o pid,comm -C "$cpu_apps_apps" |
+ grep -v "COMMAND" |
+ (
+ while read pid name; do
+ echo "$name+=$(cat /proc/$pid/stat | cut -d ' ' -f 14-15)"
+ done
+ ) |
+ (
+ sed -e "s/ \+/ /g" -e "s/ /+/g"
+ echo "$cpu_apps_bc_finalze"
+ ) | bc
+ echo "END"
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf b/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf
new file mode 100644
index 0000000..850cd0c
--- /dev/null
+++ b/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf
@@ -0,0 +1,19 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# app.plugin can do better
+
+#cpu_apps_apps=
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#cpu_apps_update_every=2
+
+# the number of retries to do in case of failure
+# before disabling the module
+#cpu_apps_retries=10
diff --git a/collectors/charts.d.plugin/cpufreq/Makefile.inc b/collectors/charts.d.plugin/cpufreq/Makefile.inc
new file mode 100644
index 0000000..6823791
--- /dev/null
+++ b/collectors/charts.d.plugin/cpufreq/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += cpufreq/cpufreq.chart.sh
+dist_chartsconfig_DATA += cpufreq/cpufreq.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += cpufreq/README.md cpufreq/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/cpufreq/README.md b/collectors/charts.d.plugin/cpufreq/README.md
new file mode 100644
index 0000000..84883f5
--- /dev/null
+++ b/collectors/charts.d.plugin/cpufreq/README.md
@@ -0,0 +1,6 @@
+# cpufreq
+
+> THIS MODULE IS OBSOLETE.
+> USE THE [PROC PLUGIN](../../proc.plugin) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fcpufreq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh
new file mode 100644
index 0000000..68708d9
--- /dev/null
+++ b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh
@@ -0,0 +1,88 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# if this chart is called X.chart.sh, then all functions and global variables
+# must start with X_
+
+cpufreq_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices"
+cpufreq_sys_depth=10
+cpufreq_source_update=1
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+cpufreq_update_every=
+cpufreq_priority=10000
+
+cpufreq_find_all_files() {
+ find "$1" -maxdepth $cpufreq_sys_depth -name scaling_cur_freq 2>/dev/null
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+cpufreq_check() {
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ [ -z "$(cpufreq_find_all_files "$cpufreq_sys_dir")" ] && return 1
+ return 0
+}
+
+# _create is called once, to create the charts
+cpufreq_create() {
+ local dir file id i
+
+ # we create a script with the source of the
+ # cpufreq_update() function
+ # - the highest speed we can achieve -
+ [ $cpufreq_source_update -eq 1 ] && echo >"$TMP_DIR/cpufreq.sh" "cpufreq_update() {"
+
+ echo "CHART cpu.cpufreq '' 'CPU Clock' 'MHz' 'cpufreq' '' line $((cpufreq_priority + 1)) $cpufreq_update_every"
+ echo >>"$TMP_DIR/cpufreq.sh" "echo \"BEGIN cpu.cpufreq \$1\""
+
+ i=0
+ for file in $(cpufreq_find_all_files "$cpufreq_sys_dir" | sort -u); do
+ i=$((i + 1))
+ dir=$(dirname "$file")
+ cpu=
+
+ [ -f "$dir/affected_cpus" ] && cpu=$(cat "$dir/affected_cpus")
+ [ -z "$cpu" ] && cpu="$i.a"
+
+ id="$(fixid "cpu$cpu")"
+
+ debug "file='$file', dir='$dir', cpu='$cpu', id='$id'"
+
+ echo "DIMENSION $id '$id' absolute 1 1000"
+ echo >>"$TMP_DIR/cpufreq.sh" "echo \"SET $id = \"\$(< $file )"
+ done
+ echo >>"$TMP_DIR/cpufreq.sh" "echo END"
+
+ [ $cpufreq_source_update -eq 1 ] && echo >>"$TMP_DIR/cpufreq.sh" "}"
+
+ # ok, load the function cpufreq_update() we created
+ # shellcheck disable=SC1090
+ [ $cpufreq_source_update -eq 1 ] && . "$TMP_DIR/cpufreq.sh"
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+cpufreq_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+ # shellcheck disable=SC1090
+ [ $cpufreq_source_update -eq 0 ] && . "$TMP_DIR/cpufreq.sh" "$1"
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/cpufreq/cpufreq.conf b/collectors/charts.d.plugin/cpufreq/cpufreq.conf
new file mode 100644
index 0000000..7130555
--- /dev/null
+++ b/collectors/charts.d.plugin/cpufreq/cpufreq.conf
@@ -0,0 +1,24 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+#cpufreq_sys_dir="/sys/devices"
+#cpufreq_sys_depth=10
+#cpufreq_source_update=1
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#cpufreq_update_every=
+
+# the charts priority on the dashboard
+#cpufreq_priority=10000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#cpufreq_retries=10
diff --git a/collectors/charts.d.plugin/example/Makefile.inc b/collectors/charts.d.plugin/example/Makefile.inc
new file mode 100644
index 0000000..e6838fb
--- /dev/null
+++ b/collectors/charts.d.plugin/example/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += example/example.chart.sh
+dist_chartsconfig_DATA += example/example.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += example/README.md example/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/example/README.md b/collectors/charts.d.plugin/example/README.md
new file mode 100644
index 0000000..e62f767
--- /dev/null
+++ b/collectors/charts.d.plugin/example/README.md
@@ -0,0 +1,6 @@
+# Example
+
+This is just an example charts.d data collector.
+
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fexample%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/example/example.chart.sh b/collectors/charts.d.plugin/example/example.chart.sh
new file mode 100644
index 0000000..8bae570
--- /dev/null
+++ b/collectors/charts.d.plugin/example/example.chart.sh
@@ -0,0 +1,123 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# if this chart is called X.chart.sh, then all functions and global variables
+# must start with X_
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+example_update_every=
+
+# the priority is used to sort the charts on the dashboard
+# 1 = the first chart
+example_priority=150000
+
+# to enable this chart, you have to set this to 12345
+# (just a demonstration for something that needs to be checked)
+example_magic_number=
+
+# global variables to store our collected data
+# remember: they need to start with the module name example_
+example_value1=
+example_value2=
+example_value3=
+example_value4=
+example_last=0
+example_count=0
+
+example_get() {
+ # do all the work to collect / calculate the values
+ # for each dimension
+ #
+ # Remember:
+ # 1. KEEP IT SIMPLE AND SHORT
+ # 2. AVOID FORKS (avoid piping commands)
+ # 3. AVOID CALLING TOO MANY EXTERNAL PROGRAMS
+ # 4. USE LOCAL VARIABLES (global variables may overlap with other modules)
+
+ example_value1=$RANDOM
+ example_value2=$RANDOM
+ example_value3=$RANDOM
+ example_value4=$((8192 + (RANDOM * 16383 / 32767)))
+
+ if [ $example_count -gt 0 ]; then
+ example_count=$((example_count - 1))
+
+ [ $example_last -gt 16383 ] && example_value4=$((example_last + (RANDOM * ((32767 - example_last) / 2) / 32767)))
+ [ $example_last -le 16383 ] && example_value4=$((example_last - (RANDOM * (example_last / 2) / 32767)))
+ else
+ example_count=$((1 + (RANDOM * 5 / 32767)))
+
+ if [ $example_last -gt 16383 ] && [ $example_value4 -gt 16383 ]; then
+ example_value4=$((example_value4 - 16383))
+ fi
+ if [ $example_last -le 16383 ] && [ $example_value4 -lt 16383 ]; then
+ example_value4=$((example_value4 + 16383))
+ fi
+ fi
+ example_last=$example_value4
+
+ # this should return:
+ # - 0 to send the data to netdata
+ # - 1 to report a failure to collect the data
+
+ return 0
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+example_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ # check something
+ [ "${example_magic_number}" != "12345" ] && error "manual configuration required: you have to set example_magic_number=$example_magic_number in example.conf to start example chart." && return 1
+
+ # check that we can collect data
+ example_get || return 1
+
+ return 0
+}
+
+# _create is called once, to create the charts
+example_create() {
+ # create the chart with 3 dimensions
+ cat <<EOF
+CHART example.random '' "Random Numbers Stacked Chart" "% of random numbers" random random stacked $((example_priority)) $example_update_every
+DIMENSION random1 '' percentage-of-absolute-row 1 1
+DIMENSION random2 '' percentage-of-absolute-row 1 1
+DIMENSION random3 '' percentage-of-absolute-row 1 1
+CHART example.random2 '' "A random number" "random number" random random area $((example_priority + 1)) $example_update_every
+DIMENSION random '' absolute 1 1
+EOF
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+example_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ example_get || return 1
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN example.random $1
+SET random1 = $example_value1
+SET random2 = $example_value2
+SET random3 = $example_value3
+END
+BEGIN example.random2 $1
+SET random = $example_value4
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/example/example.conf b/collectors/charts.d.plugin/example/example.conf
new file mode 100644
index 0000000..6232ca5
--- /dev/null
+++ b/collectors/charts.d.plugin/example/example.conf
@@ -0,0 +1,21 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# to enable this chart, you have to set this to 12345
+# (just a demonstration for something that needs to be checked)
+#example_magic_number=12345
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#example_update_every=
+
+# the charts priority on the dashboard
+#example_priority=150000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#example_retries=10
diff --git a/collectors/charts.d.plugin/exim/Makefile.inc b/collectors/charts.d.plugin/exim/Makefile.inc
new file mode 100644
index 0000000..ca2112a
--- /dev/null
+++ b/collectors/charts.d.plugin/exim/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += exim/exim.chart.sh
+dist_chartsconfig_DATA += exim/exim.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += exim/README.md exim/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/exim/README.md b/collectors/charts.d.plugin/exim/README.md
new file mode 100644
index 0000000..b4c8538
--- /dev/null
+++ b/collectors/charts.d.plugin/exim/README.md
@@ -0,0 +1,6 @@
+# exim
+
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/exim) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fexim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/exim/exim.chart.sh b/collectors/charts.d.plugin/exim/exim.chart.sh
new file mode 100644
index 0000000..7b0ef70
--- /dev/null
+++ b/collectors/charts.d.plugin/exim/exim.chart.sh
@@ -0,0 +1,46 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+# Contributed by @jsveiga with PR #480
+
+# the exim command to run
+exim_command=
+
+# how frequently to collect queue size
+exim_update_every=5
+
+exim_priority=60000
+
+exim_check() {
+ if [ -z "${exim_command}" ]; then
+ require_cmd exim || return 1
+ exim_command="${EXIM_CMD}"
+ fi
+
+ if [ "$(${exim_command} -bpc 2>&1 | grep -c denied)" -ne 0 ]; then
+ error "permission denied - please set 'queue_list_requires_admin = false' in your exim options file"
+ return 1
+ fi
+
+ return 0
+}
+
+exim_create() {
+ cat <<EOF
+CHART exim_local.qemails '' "Exim Queue Emails" "emails" queue exim.queued.emails line $((exim_priority + 1)) $exim_update_every
+DIMENSION emails '' absolute 1 1
+EOF
+ return 0
+}
+
+exim_update() {
+ echo "BEGIN exim_local.qemails $1"
+ echo "SET emails = $(run "${exim_command}" -bpc)"
+ echo "END"
+ return 0
+}
diff --git a/collectors/charts.d.plugin/exim/exim.conf b/collectors/charts.d.plugin/exim/exim.conf
new file mode 100644
index 0000000..f96ac4d
--- /dev/null
+++ b/collectors/charts.d.plugin/exim/exim.conf
@@ -0,0 +1,24 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+# the exim command to run
+# if empty, it will use the one found in the system path
+#exim_command=
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#exim_update_every=5
+
+# the charts priority on the dashboard
+#exim_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#exim_retries=10
diff --git a/collectors/charts.d.plugin/hddtemp/Makefile.inc b/collectors/charts.d.plugin/hddtemp/Makefile.inc
new file mode 100644
index 0000000..2bd29e5
--- /dev/null
+++ b/collectors/charts.d.plugin/hddtemp/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += hddtemp/hddtemp.chart.sh
+dist_chartsconfig_DATA += hddtemp/hddtemp.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += hddtemp/README.md hddtemp/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/hddtemp/README.md b/collectors/charts.d.plugin/hddtemp/README.md
new file mode 100644
index 0000000..86a2e19
--- /dev/null
+++ b/collectors/charts.d.plugin/hddtemp/README.md
@@ -0,0 +1,30 @@
+# hddtemp
+
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/hddtemp) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+The plugin will collect temperatures from disks
+
+It will create one chart with all active disks
+
+1. **temperature in Celsius**
+
+### configuration
+
+hddtemp needs to be running in daemonized mode
+
+```sh
+# host with daemonized hddtemp
+hddtemp_host="localhost"
+
+# port on which hddtemp is showing data
+hddtemp_port="7634"
+
+# array of included disks
+# the default is to include all
+hddtemp_disks=()
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fhddtemp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh
new file mode 100644
index 0000000..a4cef3c
--- /dev/null
+++ b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh
@@ -0,0 +1,77 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+# contributed by @paulfantom with PR #511
+
+# if this chart is called X.chart.sh, then all functions and global variables
+# must start with X_
+hddtemp_host="localhost"
+hddtemp_port="7634"
+declare -A hddtemp_disks=()
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+hddtemp_update_every=3
+hddtemp_priority=90000
+
+# _check is called once, to find out if this chart should be enabled or not
+hddtemp_check() {
+ require_cmd nc || return 1
+ run nc $hddtemp_host $hddtemp_port && return 0 || return 1
+}
+
+# _create is called once, to create the charts
+hddtemp_create() {
+ if [ ${#hddtemp_disks[@]} -eq 0 ]; then
+ local all
+ all=$(nc $hddtemp_host $hddtemp_port)
+ unset hddtemp_disks
+ # shellcheck disable=SC2190,SC2207
+ hddtemp_disks=($(grep -Po '/dev/[^|]+' <<<"$all" | cut -c 6-))
+ fi
+ # local disk_names
+ # disk_names=(`sed -e 's/||/\n/g;s/^|//' <<< "$all" | cut -d '|' -f2 | tr ' ' '_'`)
+
+ echo "CHART hddtemp.temperature 'disks_temp' 'temperature' 'Celsius' 'Disks temperature' 'hddtemp.temp' line $((hddtemp_priority)) $hddtemp_update_every"
+ for i in $(seq 0 $((${#hddtemp_disks[@]} - 1))); do
+ # echo "DIMENSION ${hddtemp_disks[i]} ${disk_names[i]} absolute 1 1"
+ echo "DIMENSION ${hddtemp_disks[$i]} '' absolute 1 1"
+ done
+ return 0
+}
+
+# _update is called continuously, to collect the values
+#hddtemp_last=0
+#hddtemp_count=0
+hddtemp_update() {
+ # local all=( `nc $hddtemp_host $hddtemp_port | sed -e 's/||/\n/g;s/^|//' | cut -d '|' -f3` )
+ # local all=( `nc $hddtemp_host $hddtemp_port | awk 'BEGIN { FS="|" };{i=4; while (i <= NF) {print $i+0;i+=5;};}'` )
+ OLD_IFS=$IFS
+ set -f
+ # shellcheck disable=SC2207
+ IFS="|" all=($(nc $hddtemp_host $hddtemp_port 2>/dev/null))
+ set +f
+ IFS=$OLD_IFS
+
+ # check if there is some data
+ if [ -z "${all[3]}" ]; then
+ return 1
+ fi
+
+ # write the result of the work.
+ echo "BEGIN hddtemp.temperature $1"
+ end=${#hddtemp_disks[@]}
+ for ((i = 0; i < end; i++)); do
+ # temperature - this will turn SLP to zero
+ t=$((all[$((i * 5 + 3))]))
+ echo "SET ${hddtemp_disks[$i]} = $t"
+ done
+ echo "END"
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/hddtemp/hddtemp.conf b/collectors/charts.d.plugin/hddtemp/hddtemp.conf
new file mode 100644
index 0000000..b6037b4
--- /dev/null
+++ b/collectors/charts.d.plugin/hddtemp/hddtemp.conf
@@ -0,0 +1,23 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+#hddtemp_host="localhost"
+#hddtemp_port="7634"
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#hddtemp_update_every=3
+
+# the charts priority on the dashboard
+#hddtemp_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#hddtemp_retries=10
diff --git a/collectors/charts.d.plugin/libreswan/Makefile.inc b/collectors/charts.d.plugin/libreswan/Makefile.inc
new file mode 100644
index 0000000..af767d0
--- /dev/null
+++ b/collectors/charts.d.plugin/libreswan/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += libreswan/libreswan.chart.sh
+dist_chartsconfig_DATA += libreswan/libreswan.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += libreswan/README.md libreswan/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/libreswan/README.md b/collectors/charts.d.plugin/libreswan/README.md
new file mode 100644
index 0000000..18c6450
--- /dev/null
+++ b/collectors/charts.d.plugin/libreswan/README.md
@@ -0,0 +1,44 @@
+# libreswan
+
+The plugin will collects bytes-in, bytes-out and uptime for all established libreswan IPSEC tunnels.
+
+The following charts are created, **per tunnel**:
+
+1. **Uptime**
+
+ * the uptime of the tunnel
+
+2. **Traffic**
+
+ * bytes in
+ * bytes out
+
+### configuration
+
+Its config file is `/etc/netdata/charts.d/libreswan.conf`.
+
+The plugin executes 2 commands to collect all the information it needs:
+
+```sh
+ipsec whack --status
+ipsec whack --trafficstatus
+```
+
+The first command is used to extract the currently established tunnels, their IDs and their names.
+The second command is used to extract the current uptime and traffic.
+
+Most probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.
+The plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.
+
+To allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:
+
+```
+netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status
+netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus
+```
+
+Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Flibreswan%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
new file mode 100644
index 0000000..1a8f90b
--- /dev/null
+++ b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
@@ -0,0 +1,172 @@
+# shellcheck shell=bash disable=SC1117
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+libreswan_update_every=1
+
+# the priority is used to sort the charts on the dashboard
+# 1 = the first chart
+libreswan_priority=90000
+
+# set to 1, to run ipsec with sudo
+libreswan_sudo=1
+
+# global variables to store our collected data
+
+# [TUNNELID] = TUNNELNAME
+# here we track the *latest* established tunnels
+# as detected by: ipsec whack --status
+declare -A libreswan_connected_tunnels=()
+
+# [TUNNELID] = VALUE
+# here we track values of all established tunnels (not only the latest)
+# as detected by: ipsec whack --trafficstatus
+declare -A libreswan_traffic_in=()
+declare -A libreswan_traffic_out=()
+declare -A libreswan_established_add_time=()
+
+# [TUNNELNAME] = CHARTID
+# here we remember CHARTIDs of all tunnels
+# we need this to avoid converting tunnel names to chart IDs on every iteration
+declare -A libreswan_tunnel_charts=()
+
+# run the ipsec command
+libreswan_ipsec() {
+ if [ ${libreswan_sudo} -ne 0 ]; then
+ sudo -n "${IPSEC_CMD}" "${@}"
+ return $?
+ else
+ "${IPSEC_CMD}" "${@}"
+ return $?
+ fi
+}
+
+# fetch latest values - fill the arrays
+libreswan_get() {
+ # do all the work to collect / calculate the values
+ # for each dimension
+
+ # empty the variables
+ libreswan_traffic_in=()
+ libreswan_traffic_out=()
+ libreswan_established_add_time=()
+ libreswan_connected_tunnels=()
+
+ # convert the ipsec command output to a shell script
+ # and source it to get the values
+ # shellcheck disable=SC1090
+ source <(
+ {
+ libreswan_ipsec whack --status
+ libreswan_ipsec whack --trafficstatus
+ } | sed -n \
+ -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\".*IPsec SA established.*newest IPSEC.*|libreswan_connected_tunnels[\"\1\"]=\"\2\"|p" \
+ -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\",.* add_time=\([0-9]\+\),.* inBytes=\([0-9]\+\),.* outBytes=\([0-9]\+\).*|libreswan_traffic_in[\"\1\"]=\"\4\"; libreswan_traffic_out[\"\1\"]=\"\5\"; libreswan_established_add_time[\"\1\"]=\"\3\";|p"
+ ) || return 1
+
+ # check we got some data
+ [ ${#libreswan_connected_tunnels[@]} -eq 0 ] && return 1
+
+ return 0
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+libreswan_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ require_cmd ipsec || return 1
+
+ # make sure it is libreswan
+ # shellcheck disable=SC2143
+ if [ -z "$(ipsec --version | grep -i libreswan)" ]; then
+ error "ipsec command is not Libreswan. Disabling Libreswan plugin."
+ return 1
+ fi
+
+ # check that we can collect data
+ libreswan_get || return 1
+
+ return 0
+}
+
+# create the charts for an ipsec tunnel
+libreswan_create_one() {
+ local n="${1}" name
+
+ name="${libreswan_connected_tunnels[${n}]}"
+
+ [ ! -z "${libreswan_tunnel_charts[${name}]}" ] && return 0
+
+ libreswan_tunnel_charts[${name}]="$(fixid "${name}")"
+
+ cat <<EOF
+CHART libreswan.${libreswan_tunnel_charts[${name}]}_net '${name}_net' "LibreSWAN Tunnel ${name} Traffic" "kilobits/s" "${name}" libreswan.net area $((libreswan_priority)) $libreswan_update_every
+DIMENSION in '' incremental 8 1000
+DIMENSION out '' incremental -8 1000
+CHART libreswan.${libreswan_tunnel_charts[${name}]}_uptime '${name}_uptime' "LibreSWAN Tunnel ${name} Uptime" "seconds" "${name}" libreswan.uptime line $((libreswan_priority + 1)) $libreswan_update_every
+DIMENSION uptime '' absolute 1 1
+EOF
+
+ return 0
+
+}
+
+# _create is called once, to create the charts
+libreswan_create() {
+ local n
+ for n in "${!libreswan_connected_tunnels[@]}"; do
+ libreswan_create_one "${n}"
+ done
+ return 0
+}
+
+libreswan_now=$(date +%s)
+
+# send the values to netdata for an ipsec tunnel
+libreswan_update_one() {
+ local n="${1}" microseconds="${2}" name id uptime
+
+ name="${libreswan_connected_tunnels[${n}]}"
+ id="${libreswan_tunnel_charts[${name}]}"
+
+ [ -z "${id}" ] && libreswan_create_one "${name}"
+
+ uptime=$((libreswan_now - libreswan_established_add_time[${n}]))
+ [ ${uptime} -lt 0 ] && uptime=0
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN libreswan.${id}_net ${microseconds}
+SET in = ${libreswan_traffic_in[${n}]}
+SET out = ${libreswan_traffic_out[${n}]}
+END
+BEGIN libreswan.${id}_uptime ${microseconds}
+SET uptime = ${uptime}
+END
+VALUESEOF
+}
+
+# _update is called continiously, to collect the values
+libreswan_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ libreswan_get || return 1
+ libreswan_now=$(date +%s)
+
+ local n
+ for n in "${!libreswan_connected_tunnels[@]}"; do
+ libreswan_update_one "${n}" "${@}"
+ done
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/libreswan/libreswan.conf b/collectors/charts.d.plugin/libreswan/libreswan.conf
new file mode 100644
index 0000000..9b3ee77
--- /dev/null
+++ b/collectors/charts.d.plugin/libreswan/libreswan.conf
@@ -0,0 +1,29 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+#
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#libreswan_update_every=1
+
+# the charts priority on the dashboard
+#libreswan_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#libreswan_retries=10
+
+# set to 1, to run ipsec with sudo (the default)
+# set to 0, to run ipsec without sudo
+#libreswan_sudo=1
+
+# TO ALLOW NETDATA RUN ipsec AS ROOT
+# CREATE THE FILE: /etc/sudoers.d/netdata
+# WITH THESE 2 LINES (uncommented of course):
+#
+# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status
+# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus
diff --git a/collectors/charts.d.plugin/load_average/Makefile.inc b/collectors/charts.d.plugin/load_average/Makefile.inc
new file mode 100644
index 0000000..e5a481b
--- /dev/null
+++ b/collectors/charts.d.plugin/load_average/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += load_average/load_average.chart.sh
+dist_chartsconfig_DATA += load_average/load_average.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += load_average/README.md load_average/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/load_average/README.md b/collectors/charts.d.plugin/load_average/README.md
new file mode 100644
index 0000000..ef84b5b
--- /dev/null
+++ b/collectors/charts.d.plugin/load_average/README.md
@@ -0,0 +1,6 @@
+# load_average
+
+> THIS MODULE IS OBSOLETE.
+> THE NETDATA DAEMON COLLECTS LOAD AVERAGE BY ITSELF
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fload_average%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/load_average/load_average.chart.sh b/collectors/charts.d.plugin/load_average/load_average.chart.sh
new file mode 100644
index 0000000..841e3d9
--- /dev/null
+++ b/collectors/charts.d.plugin/load_average/load_average.chart.sh
@@ -0,0 +1,69 @@
+# shellcheck shell=bash disable=SC2154,SC1072,SC1073,SC2009,SC2162,SC2006,SC2002,SC2086,SC1117
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+load_average_update_every=5
+load_priority=100
+
+# this is an example charts.d collector
+# it is disabled by default.
+# there is no point to enable it, since netdata already
+# collects this information using its internal plugins.
+load_average_enabled=0
+
+load_average_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ if [ ${load_average_update_every} -lt 5 ]; then
+ # there is no meaning for shorter than 5 seconds
+ # the kernel changes this value every 5 seconds
+ load_average_update_every=5
+ fi
+
+ [ ${load_average_enabled} -eq 0 ] && return 1
+ return 0
+}
+
+load_average_create() {
+ # create a chart with 3 dimensions
+ cat <<EOF
+CHART system.load '' "System Load Average" "load" load system.load line $((load_priority + 1)) $load_average_update_every
+DIMENSION load1 '1 min' absolute 1 100
+DIMENSION load5 '5 mins' absolute 1 100
+DIMENSION load15 '15 mins' absolute 1 100
+EOF
+
+ return 0
+}
+
+load_average_update() {
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ # here we parse the system average load
+ # it is decimal (with 2 decimal digits), so we remove the dot and
+ # at the definition we have divisor = 100, to have the graph show the right value
+ loadavg="$(cat /proc/loadavg | sed -e "s/\.//g")"
+ load1=$(echo $loadavg | cut -d ' ' -f 1)
+ load5=$(echo $loadavg | cut -d ' ' -f 2)
+ load15=$(echo $loadavg | cut -d ' ' -f 3)
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN system.load
+SET load1 = $load1
+SET load5 = $load5
+SET load15 = $load15
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/load_average/load_average.conf b/collectors/charts.d.plugin/load_average/load_average.conf
new file mode 100644
index 0000000..6897927
--- /dev/null
+++ b/collectors/charts.d.plugin/load_average/load_average.conf
@@ -0,0 +1,22 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# netdata can collect this metric already
+
+#load_average_enabled=0
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#load_average_update_every=5
+
+# the charts priority on the dashboard
+#load_average_priority=100
+
+# the number of retries to do in case of failure
+# before disabling the module
+#load_average_retries=10
diff --git a/collectors/charts.d.plugin/loopsleepms.sh.inc b/collectors/charts.d.plugin/loopsleepms.sh.inc
new file mode 100644
index 0000000..e44eff6
--- /dev/null
+++ b/collectors/charts.d.plugin/loopsleepms.sh.inc
@@ -0,0 +1,219 @@
+# no need for shebang - this file is included from other scripts
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+LOOPSLEEP_DATE="$(which date 2>/dev/null || command -v date 2>/dev/null)"
+if [ -z "$LOOPSLEEP_DATE" ]; then
+ echo >&2 "$0: ERROR: Cannot find the command 'date' in the system path."
+ exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# use the date command as a high resolution timer
+
+now_ms=
+LOOPSLEEPMS_HIGHRES=1
+test "$($LOOPSLEEP_DATE +%N)" = "%N" && LOOPSLEEPMS_HIGHRES=0
+test -z "$($LOOPSLEEP_DATE +%N)" && LOOPSLEEPMS_HIGHRES=0
+current_time_ms_from_date() {
+ if [ $LOOPSLEEPMS_HIGHRES -eq 0 ]; then
+ now_ms="$($LOOPSLEEP_DATE +'%s')000"
+ else
+ now_ms="$(($($LOOPSLEEP_DATE +'%s * 1000 + %-N / 1000000')))"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# use /proc/uptime as a high resolution timer
+
+current_time_ms_from_date
+current_time_ms_from_uptime_started="${now_ms}"
+current_time_ms_from_uptime_last="${now_ms}"
+current_time_ms_from_uptime_first=0
+current_time_ms_from_uptime() {
+ local up rest arr=() n
+
+ read up rest </proc/uptime
+ if [ $? -ne 0 ]; then
+ echo >&2 "$0: Cannot read /proc/uptime - falling back to current_time_ms_from_date()."
+ current_time_ms="current_time_ms_from_date"
+ current_time_ms_from_date
+ current_time_ms_accuracy=1
+ return
+ fi
+
+ arr=(${up//./ })
+
+ if [ ${#arr[1]} -lt 1 ]; then
+ n="${arr[0]}000"
+ elif [ ${#arr[1]} -lt 2 ]; then
+ n="${arr[0]}${arr[1]}00"
+ elif [ ${#arr[1]} -lt 3 ]; then
+ n="${arr[0]}${arr[1]}0"
+ else
+ n="${arr[0]}${arr[1]}"
+ fi
+
+ now_ms=$((current_time_ms_from_uptime_started - current_time_ms_from_uptime_first + n))
+
+ if [ "${now_ms}" -lt "${current_time_ms_from_uptime_last}" ]; then
+ echo >&2 "$0: Cannot use current_time_ms_from_uptime() - new time ${now_ms} is older than the last ${current_time_ms_from_uptime_last} - falling back to current_time_ms_from_date()."
+ current_time_ms="current_time_ms_from_date"
+ current_time_ms_from_date
+ current_time_ms_accuracy=1
+ fi
+
+ current_time_ms_from_uptime_last="${now_ms}"
+}
+current_time_ms_from_uptime
+current_time_ms_from_uptime_first="$((now_ms - current_time_ms_from_uptime_started))"
+current_time_ms_from_uptime_last="${current_time_ms_from_uptime_first}"
+current_time_ms="current_time_ms_from_uptime"
+current_time_ms_accuracy=10
+if [ "${current_time_ms_from_uptime_first}" -eq 0 ]; then
+ echo >&2 "$0: Invalid setup for current_time_ms_from_uptime() - falling back to current_time_ms_from_date()."
+ current_time_ms="current_time_ms_from_date"
+ current_time_ms_accuracy=1
+fi
+
+# -----------------------------------------------------------------------------
+# use read with timeout for sleep
+
+mysleep=""
+
+mysleep_fifo="${NETDATA_CACHE_DIR-/tmp}/.netdata_bash_sleep_timer_fifo"
+[ -f "${mysleep_fifo}" ] && rm "${mysleep_fifo}"
+[ ! -p "${mysleep_fifo}" ] && mkfifo "${mysleep_fifo}"
+[ -p "${mysleep_fifo}" ] && mysleep="mysleep_read"
+
+mysleep_read() {
+ read -t "${1}" <>"${mysleep_fifo}"
+ ret=$?
+ if [ $ret -le 128 ]; then
+ echo >&2 "$0: Cannot use read for sleeping (return code ${ret})."
+ mysleep="sleep"
+ ${mysleep} "${1}"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# use bash loadable module for sleep
+
+mysleep_builtin() {
+ builtin sleep "${1}"
+ ret=$?
+ if [ $ret -ne 0 ]; then
+ echo >&2 "$0: Cannot use builtin sleep for sleeping (return code ${ret})."
+ mysleep="sleep"
+ ${mysleep} "${1}"
+ fi
+}
+
+if [ -z "${mysleep}" -a "$((BASH_VERSINFO[0] + 0))" -ge 3 -a "${NETDATA_BASH_LOADABLES}" != "DISABLE" ]; then
+ # enable modules only for bash version 3+
+
+ for bash_modules_path in ${BASH_LOADABLES_PATH//:/ } "$(pkg-config bash --variable=loadablesdir 2>/dev/null)" "/usr/lib/bash" "/lib/bash" "/lib64/bash" "/usr/local/lib/bash" "/usr/local/lib64/bash"; do
+ [ -z "${bash_modules_path}" -o ! -d "${bash_modules_path}" ] && continue
+
+ # check for sleep
+ for bash_module_sleep in "sleep" "sleep.so"; do
+ if [ -f "${bash_modules_path}/${bash_module_sleep}" ]; then
+ if enable -f "${bash_modules_path}/${bash_module_sleep}" sleep 2>/dev/null; then
+ mysleep="mysleep_builtin"
+ # echo >&2 "$0: Using bash loadable ${bash_modules_path}/${bash_module_sleep} for sleep"
+ break
+ fi
+ fi
+
+ done
+
+ [ ! -z "${mysleep}" ] && break
+ done
+fi
+
+# -----------------------------------------------------------------------------
+# fallback to external sleep
+
+[ -z "${mysleep}" ] && mysleep="sleep"
+
+# -----------------------------------------------------------------------------
+# this function is used to sleep a fraction of a second
+# it calculates the difference between every time is called
+# and tries to align the sleep time to give you exactly the
+# loop you need.
+
+LOOPSLEEPMS_LASTRUN=0
+LOOPSLEEPMS_NEXTRUN=0
+LOOPSLEEPMS_LASTSLEEP=0
+LOOPSLEEPMS_LASTWORK=0
+
+loopsleepms() {
+ local tellwork=0 t="${1}" div s m now mstosleep
+
+ if [ "${t}" = "tellwork" ]; then
+ tellwork=1
+ shift
+ t="${1}"
+ fi
+
+ # $t = the time in seconds to wait
+
+ # if high resolution is not supported
+ # just sleep the time requested, in seconds
+ if [ ${LOOPSLEEPMS_HIGHRES} -eq 0 ]; then
+ sleep ${t}
+ return
+ fi
+
+ # get the current time, in ms in ${now_ms}
+ ${current_time_ms}
+
+ # calculate ms since last run
+ [ ${LOOPSLEEPMS_LASTRUN} -gt 0 ] &&
+ LOOPSLEEPMS_LASTWORK=$((now_ms - LOOPSLEEPMS_LASTRUN - LOOPSLEEPMS_LASTSLEEP + current_time_ms_accuracy))
+ # echo "# last loop's work took $LOOPSLEEPMS_LASTWORK ms"
+
+ # remember this run
+ LOOPSLEEPMS_LASTRUN=${now_ms}
+
+ # calculate the next run
+ LOOPSLEEPMS_NEXTRUN=$(((now_ms - (now_ms % (t * 1000))) + (t * 1000)))
+
+ # calculate ms to sleep
+ mstosleep=$((LOOPSLEEPMS_NEXTRUN - now_ms + current_time_ms_accuracy))
+ # echo "# mstosleep is $mstosleep ms"
+
+ # if we are too slow, sleep some time
+ test ${mstosleep} -lt 200 && mstosleep=200
+
+ s=$((mstosleep / 1000))
+ m=$((mstosleep - (s * 1000)))
+ [ "${m}" -lt 100 ] && m="0${m}"
+ [ "${m}" -lt 10 ] && m="0${m}"
+
+ test $tellwork -eq 1 && echo >&2 " >>> PERFORMANCE >>> WORK TOOK ${LOOPSLEEPMS_LASTWORK} ms ( $((LOOPSLEEPMS_LASTWORK * 100 / 1000)).$((LOOPSLEEPMS_LASTWORK % 10))% cpu ) >>> SLEEPING ${mstosleep} ms"
+
+ # echo "# sleeping ${s}.${m}"
+ # echo
+ ${mysleep} ${s}.${m}
+
+ # keep the values we need
+ # for our next run
+ LOOPSLEEPMS_LASTSLEEP=$mstosleep
+}
+
+# test it
+#while [ 1 ]
+#do
+# r=$(( (RANDOM * 2000 / 32767) ))
+# s=$((r / 1000))
+# m=$((r - (s * 1000)))
+# [ "${m}" -lt 100 ] && m="0${m}"
+# [ "${m}" -lt 10 ] && m="0${m}"
+# echo "${r} = ${s}.${m}"
+#
+# # the work
+# ${mysleep} ${s}.${m}
+#
+# # the alignment loop
+# loopsleepms tellwork 1
+#done
diff --git a/collectors/charts.d.plugin/mem_apps/Makefile.inc b/collectors/charts.d.plugin/mem_apps/Makefile.inc
new file mode 100644
index 0000000..ea546fb
--- /dev/null
+++ b/collectors/charts.d.plugin/mem_apps/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += mem_apps/mem_apps.chart.sh
+dist_chartsconfig_DATA += mem_apps/mem_apps.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mem_apps/README.md mem_apps/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/mem_apps/README.md b/collectors/charts.d.plugin/mem_apps/README.md
new file mode 100644
index 0000000..a9513e9
--- /dev/null
+++ b/collectors/charts.d.plugin/mem_apps/README.md
@@ -0,0 +1,6 @@
+# mem_apps
+
+> THIS MODULE IS OBSOLETE.
+> USE [APPS.PLUGIN](../../apps.plugin).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fmem_apps%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh
new file mode 100644
index 0000000..b9b84a4
--- /dev/null
+++ b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh
@@ -0,0 +1,62 @@
+# shellcheck shell=bash disable=SC2154,SC1072,SC1073,SC2009,SC2162,SC2006,SC2002,SC2086,SC1117
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+mem_apps_apps=
+
+# these are required for computing memory in bytes and cpu in seconds
+#mem_apps_pagesize="`getconf PAGESIZE`"
+#mem_apps_clockticks="`getconf CLK_TCK`"
+
+mem_apps_update_every=
+
+mem_apps_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ if [ -z "$mem_apps_apps" ]; then
+ error "manual configuration required: please set mem_apps_apps='command1 command2 ...' in $confd/mem_apps_apps.conf"
+ return 1
+ fi
+ return 0
+}
+
+mem_apps_bc_finalze=
+
+mem_apps_create() {
+
+ echo "CHART chartsd_apps.mem '' 'Apps Memory' MB apps apps.mem stacked 20000 $mem_apps_update_every"
+
+ local x=
+ for x in $mem_apps_apps; do
+ echo "DIMENSION $x $x absolute 1 1024"
+
+ # this string is needed later in the update() function
+ # to finalize the instructions for the bc command
+ mem_apps_bc_finalze="$mem_apps_bc_finalze \"SET $x = \"; $x;"
+ done
+ return 0
+}
+
+mem_apps_update() {
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ echo "BEGIN chartsd_apps.mem"
+ ps -o comm,rss -C "$mem_apps_apps" |
+ grep -v "^COMMAND" |
+ (
+ sed -e "s/ \+/ /g" -e "s/ /+=/g"
+ echo "$mem_apps_bc_finalze"
+ ) | bc
+ echo "END"
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/mem_apps/mem_apps.conf b/collectors/charts.d.plugin/mem_apps/mem_apps.conf
new file mode 100644
index 0000000..75d24dc
--- /dev/null
+++ b/collectors/charts.d.plugin/mem_apps/mem_apps.conf
@@ -0,0 +1,19 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# app.plugin can do better
+
+#mem_apps_apps=
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#mem_apps_update_every=2
+
+# the number of retries to do in case of failure
+# before disabling the module
+#mem_apps_retries=10
diff --git a/collectors/charts.d.plugin/mysql/Makefile.inc b/collectors/charts.d.plugin/mysql/Makefile.inc
new file mode 100644
index 0000000..ca02fd0
--- /dev/null
+++ b/collectors/charts.d.plugin/mysql/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += mysql/mysql.chart.sh
+dist_chartsconfig_DATA += mysql/mysql.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mysql/README.md mysql/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/mysql/README.md b/collectors/charts.d.plugin/mysql/README.md
new file mode 100644
index 0000000..e52449a
--- /dev/null
+++ b/collectors/charts.d.plugin/mysql/README.md
@@ -0,0 +1,83 @@
+# mysql
+
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/mysql) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+The plugin will monitor one or more mysql servers
+
+It will produce the following charts:
+
+1. **Bandwidth** in kbps
+ * in
+ * out
+
+2. **Queries** in queries/sec
+ * queries
+ * questions
+ * slow queries
+
+3. **Operations** in operations/sec
+ * opened tables
+ * flush
+ * commit
+ * delete
+ * prepare
+ * read first
+ * read key
+ * read next
+ * read prev
+ * read random
+ * read random next
+ * rollback
+ * save point
+ * update
+ * write
+
+4. **Table Locks** in locks/sec
+ * immediate
+ * waited
+
+5. **Select Issues** in issues/sec
+ * full join
+ * full range join
+ * range
+ * range check
+ * scan
+
+6. **Sort Issues** in issues/sec
+ * merge passes
+ * range
+ * scan
+
+### configuration
+
+You can configure many database servers, like this:
+
+You can provide, per server, the following:
+
+1. a name, anything you like, but keep it short
+2. the mysql command to connect to the server
+3. the mysql command line options to be used for connecting to the server
+
+Here is an example for 2 servers:
+
+```sh
+mysql_opts[server1]="-h server1.example.com"
+mysql_opts[server2]="-h server2.example.com --connect_timeout 2"
+```
+
+The above will use the `mysql` command found in the system path.
+You can also provide a custom mysql command per server, like this:
+
+```sh
+mysql_cmds[server2]="/opt/mysql/bin/mysql"
+```
+
+The above sets the mysql command only for server2. server1 will use the system default.
+
+If no configuration is given, the plugin will attempt to connect to mysql server at localhost.
+
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fmysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/mysql/mysql.chart.sh b/collectors/charts.d.plugin/mysql/mysql.chart.sh
new file mode 100644
index 0000000..e1207dc
--- /dev/null
+++ b/collectors/charts.d.plugin/mysql/mysql.chart.sh
@@ -0,0 +1,511 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# http://dev.mysql.com/doc/refman/5.0/en/server-status-variables.html
+#
+# https://dev.mysql.com/doc/refman/5.1/en/show-status.html
+# SHOW STATUS provides server status information (see Section 5.1.6, “Server Status Variables”).
+# This statement does not require any privilege.
+# It requires only the ability to connect to the server.
+
+mysql_update_every=2
+mysql_priority=60000
+
+declare -A mysql_cmds=() mysql_opts=() mysql_ids=() mysql_data=()
+
+mysql_get() {
+ local arr
+ local oIFS="${IFS}"
+ mysql_data=()
+ IFS=$'\t'$'\n'
+ #arr=($(run "${@}" -e "SHOW GLOBAL STATUS WHERE value REGEXP '^[0-9]';" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)" ))
+ #arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^ ]+\s[0-9]" ))
+ # shellcheck disable=SC2207
+ arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | grep -E "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^[:space:]]+[[:space:]]+[0-9]+"))
+ IFS="${oIFS}"
+
+ [ "${#arr[@]}" -lt 3 ] && return 1
+ local end=${#arr[@]}
+ for ((i = 2; i < end; i += 2)); do
+ mysql_data["${arr[$i]}"]=${arr[i + 1]}
+ done
+
+ [ -z "${mysql_data[Connections]}" ] && return 1
+
+ mysql_data[Thread_cache_misses]=0
+ [ $((mysql_data[Connections] + 1 - 1)) -gt 0 ] && mysql_data[Thread_cache_misses]=$((mysql_data[Threads_created] * 10000 / mysql_data[Connections]))
+
+ return 0
+}
+
+mysql_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ local x m mysql_cmd tryroot=0 unconfigured=0
+
+ if [ "${1}" = "tryroot" ]; then
+ tryroot=1
+ shift
+ fi
+
+ # shellcheck disable=SC2230
+ [ -z "${mysql_cmd}" ] && mysql_cmd="$(which mysql 2>/dev/null || command -v mysql 2>/dev/null)"
+
+ if [ ${#mysql_opts[@]} -eq 0 ]; then
+ unconfigured=1
+
+ mysql_cmds[local]="$mysql_cmd"
+
+ if [ $tryroot -eq 1 ]; then
+ # the user has not configured us for mysql access
+ # if the root user is passwordless in mysql, we can
+ # attempt to connect to mysql as root
+ mysql_opts[local]="-u root"
+ else
+ mysql_opts[local]=
+ fi
+ fi
+
+ # check once if the url works
+ for m in "${!mysql_opts[@]}"; do
+ [ -z "${mysql_cmds[$m]}" ] && mysql_cmds[$m]="$mysql_cmd"
+ if [ -z "${mysql_cmds[$m]}" ]; then
+ # shellcheck disable=SC2154
+ error "cannot get mysql command for '${m}'. Please set mysql_cmds[$m]='/path/to/mysql', in $confd/mysql.conf"
+ fi
+
+ mysql_get "${mysql_cmds[$m]}" ${mysql_opts[$m]}
+ # shellcheck disable=SC2181
+ if [ ! $? -eq 0 ]; then
+ error "cannot get global status for '$m'. Please set mysql_opts[$m]='options' to whatever needed to get connected to the mysql server, in $confd/mysql.conf"
+ unset "mysql_cmds[$m]"
+ unset "mysql_opts[$m]"
+ unset "mysql_ids[$m]"
+ continue
+ fi
+
+ mysql_ids[$m]="$(fixid "$m")"
+ done
+
+ if [ ${#mysql_opts[@]} -eq 0 ]; then
+ if [ ${unconfigured} -eq 1 ] && [ ${tryroot} -eq 0 ]; then
+ mysql_check tryroot "${@}"
+ return $?
+ else
+ error "no mysql servers found. Please set mysql_opts[name]='options' to whatever needed to get connected to the mysql server, in $confd/mysql.conf"
+ return 1
+ fi
+ fi
+
+ return 0
+}
+
+mysql_create() {
+ local x
+
+ # create the charts
+ for x in "${mysql_ids[@]}"; do
+ cat <<EOF
+CHART mysql_$x.net '' "mysql Bandwidth" "kilobits/s" bandwidth mysql.net area $((mysql_priority + 1)) $mysql_update_every
+DIMENSION Bytes_received in incremental 8 1024
+DIMENSION Bytes_sent out incremental -8 1024
+
+CHART mysql_$x.queries '' "mysql Queries" "queries/s" queries mysql.queries line $((mysql_priority + 2)) $mysql_update_every
+DIMENSION Queries queries incremental 1 1
+DIMENSION Questions questions incremental 1 1
+DIMENSION Slow_queries slow_queries incremental -1 1
+
+CHART mysql_$x.handlers '' "mysql Handlers" "handlers/s" handlers mysql.handlers line $((mysql_priority + 3)) $mysql_update_every
+DIMENSION Handler_commit commit incremental 1 1
+DIMENSION Handler_delete delete incremental 1 1
+DIMENSION Handler_prepare prepare incremental 1 1
+DIMENSION Handler_read_first read_first incremental 1 1
+DIMENSION Handler_read_key read_key incremental 1 1
+DIMENSION Handler_read_next read_next incremental 1 1
+DIMENSION Handler_read_prev read_prev incremental 1 1
+DIMENSION Handler_read_rnd read_rnd incremental 1 1
+DIMENSION Handler_read_rnd_next read_rnd_next incremental 1 1
+DIMENSION Handler_rollback rollback incremental 1 1
+DIMENSION Handler_savepoint savepoint incremental 1 1
+DIMENSION Handler_savepoint_rollback savepoint_rollback incremental 1 1
+DIMENSION Handler_update update incremental 1 1
+DIMENSION Handler_write write incremental 1 1
+
+CHART mysql_$x.table_locks '' "mysql Tables Locks" "locks/s" locks mysql.table_locks line $((mysql_priority + 4)) $mysql_update_every
+DIMENSION Table_locks_immediate immediate incremental 1 1
+DIMENSION Table_locks_waited waited incremental -1 1
+
+CHART mysql_$x.join_issues '' "mysql Select Join Issues" "joins/s" issues mysql.join_issues line $((mysql_priority + 5)) $mysql_update_every
+DIMENSION Select_full_join full_join incremental 1 1
+DIMENSION Select_full_range_join full_range_join incremental 1 1
+DIMENSION Select_range range incremental 1 1
+DIMENSION Select_range_check range_check incremental 1 1
+DIMENSION Select_scan scan incremental 1 1
+
+CHART mysql_$x.sort_issues '' "mysql Sort Issues" "issues/s" issues mysql.sort.issues line $((mysql_priority + 6)) $mysql_update_every
+DIMENSION Sort_merge_passes merge_passes incremental 1 1
+DIMENSION Sort_range range incremental 1 1
+DIMENSION Sort_scan scan incremental 1 1
+
+CHART mysql_$x.tmp '' "mysql Tmp Operations" "counter" temporaries mysql.tmp line $((mysql_priority + 7)) $mysql_update_every
+DIMENSION Created_tmp_disk_tables disk_tables incremental 1 1
+DIMENSION Created_tmp_files files incremental 1 1
+DIMENSION Created_tmp_tables tables incremental 1 1
+
+CHART mysql_$x.connections '' "mysql Connections" "connections/s" connections mysql.connections line $((mysql_priority + 8)) $mysql_update_every
+DIMENSION Connections all incremental 1 1
+DIMENSION Aborted_connects aborded incremental 1 1
+
+CHART mysql_$x.binlog_cache '' "mysql Binlog Cache" "transactions/s" binlog mysql.binlog_cache line $((mysql_priority + 9)) $mysql_update_every
+DIMENSION Binlog_cache_disk_use disk incremental 1 1
+DIMENSION Binlog_cache_use all incremental 1 1
+
+CHART mysql_$x.threads '' "mysql Threads" "threads" threads mysql.threads line $((mysql_priority + 10)) $mysql_update_every
+DIMENSION Threads_connected connected absolute 1 1
+DIMENSION Threads_created created incremental 1 1
+DIMENSION Threads_cached cached absolute -1 1
+DIMENSION Threads_running running absolute 1 1
+
+CHART mysql_$x.thread_cache_misses '' "mysql Threads Cache Misses" "misses" threads mysql.thread_cache_misses area $((mysql_priority + 11)) $mysql_update_every
+DIMENSION misses misses absolute 1 100
+
+CHART mysql_$x.innodb_io '' "mysql InnoDB I/O Bandwidth" "kilobytes/s" innodb mysql.innodb_io area $((mysql_priority + 12)) $mysql_update_every
+DIMENSION Innodb_data_read read incremental 1 1024
+DIMENSION Innodb_data_written write incremental -1 1024
+
+CHART mysql_$x.innodb_io_ops '' "mysql InnoDB I/O Operations" "operations/s" innodb mysql.innodb_io_ops line $((mysql_priority + 13)) $mysql_update_every
+DIMENSION Innodb_data_reads reads incremental 1 1
+DIMENSION Innodb_data_writes writes incremental -1 1
+DIMENSION Innodb_data_fsyncs fsyncs incremental 1 1
+
+CHART mysql_$x.innodb_io_pending_ops '' "mysql InnoDB Pending I/O Operations" "operations" innodb mysql.innodb_io_pending_ops line $((mysql_priority + 14)) $mysql_update_every
+DIMENSION Innodb_data_pending_reads reads absolute 1 1
+DIMENSION Innodb_data_pending_writes writes absolute -1 1
+DIMENSION Innodb_data_pending_fsyncs fsyncs absolute 1 1
+
+CHART mysql_$x.innodb_log '' "mysql InnoDB Log Operations" "operations/s" innodb mysql.innodb_log line $((mysql_priority + 15)) $mysql_update_every
+DIMENSION Innodb_log_waits waits incremental 1 1
+DIMENSION Innodb_log_write_requests write_requests incremental -1 1
+DIMENSION Innodb_log_writes writes incremental -1 1
+
+CHART mysql_$x.innodb_os_log '' "mysql InnoDB OS Log Operations" "operations" innodb mysql.innodb_os_log line $((mysql_priority + 16)) $mysql_update_every
+DIMENSION Innodb_os_log_fsyncs fsyncs incremental 1 1
+DIMENSION Innodb_os_log_pending_fsyncs pending_fsyncs absolute 1 1
+DIMENSION Innodb_os_log_pending_writes pending_writes absolute -1 1
+
+CHART mysql_$x.innodb_os_log_io '' "mysql InnoDB OS Log Bandwidth" "kilobytes/s" innodb mysql.innodb_os_log_io area $((mysql_priority + 17)) $mysql_update_every
+DIMENSION Innodb_os_log_written write incremental -1 1024
+
+CHART mysql_$x.innodb_cur_row_lock '' "mysql InnoDB Current Row Locks" "operations" innodb mysql.innodb_cur_row_lock area $((mysql_priority + 18)) $mysql_update_every
+DIMENSION Innodb_row_lock_current_waits current_waits absolute 1 1
+
+CHART mysql_$x.innodb_rows '' "mysql InnoDB Row Operations" "operations/s" innodb mysql.innodb_rows area $((mysql_priority + 19)) $mysql_update_every
+DIMENSION Innodb_rows_read read incremental 1 1
+DIMENSION Innodb_rows_deleted deleted incremental -1 1
+DIMENSION Innodb_rows_inserted inserted incremental 1 1
+DIMENSION Innodb_rows_updated updated incremental -1 1
+
+CHART mysql_$x.innodb_buffer_pool_pages '' "mysql InnoDB Buffer Pool Pages" "pages" innodb mysql.innodb_buffer_pool_pages line $((mysql_priority + 20)) $mysql_update_every
+DIMENSION Innodb_buffer_pool_pages_data data absolute 1 1
+DIMENSION Innodb_buffer_pool_pages_dirty dirty absolute -1 1
+DIMENSION Innodb_buffer_pool_pages_free free absolute 1 1
+DIMENSION Innodb_buffer_pool_pages_flushed flushed incremental -1 1
+DIMENSION Innodb_buffer_pool_pages_misc misc absolute -1 1
+DIMENSION Innodb_buffer_pool_pages_total total absolute 1 1
+
+CHART mysql_$x.innodb_buffer_pool_bytes '' "mysql InnoDB Buffer Pool Bytes" "MiB" innodb mysql.innodb_buffer_pool_bytes area $((mysql_priority + 21)) $mysql_update_every
+DIMENSION Innodb_buffer_pool_bytes_data data absolute 1 $((1024 * 1024))
+DIMENSION Innodb_buffer_pool_bytes_dirty dirty absolute -1 $((1024 * 1024))
+
+CHART mysql_$x.innodb_buffer_pool_read_ahead '' "mysql InnoDB Buffer Pool Read Ahead" "operations/s" innodb mysql.innodb_buffer_pool_read_ahead area $((mysql_priority + 22)) $mysql_update_every
+DIMENSION Innodb_buffer_pool_read_ahead all incremental 1 1
+DIMENSION Innodb_buffer_pool_read_ahead_evicted evicted incremental -1 1
+DIMENSION Innodb_buffer_pool_read_ahead_rnd random incremental 1 1
+
+CHART mysql_$x.innodb_buffer_pool_reqs '' "mysql InnoDB Buffer Pool Requests" "requests/s" innodb mysql.innodb_buffer_pool_reqs area $((mysql_priority + 23)) $mysql_update_every
+DIMENSION Innodb_buffer_pool_read_requests reads incremental 1 1
+DIMENSION Innodb_buffer_pool_write_requests writes incremental -1 1
+
+CHART mysql_$x.innodb_buffer_pool_ops '' "mysql InnoDB Buffer Pool Operations" "operations/s" innodb mysql.innodb_buffer_pool_ops area $((mysql_priority + 24)) $mysql_update_every
+DIMENSION Innodb_buffer_pool_reads 'disk reads' incremental 1 1
+DIMENSION Innodb_buffer_pool_wait_free 'wait free' incremental -1 1
+
+CHART mysql_$x.qcache_ops '' "mysql QCache Operations" "queries/s" qcache mysql.qcache_ops line $((mysql_priority + 25)) $mysql_update_every
+DIMENSION Qcache_hits hits incremental 1 1
+DIMENSION Qcache_lowmem_prunes 'lowmem prunes' incremental -1 1
+DIMENSION Qcache_inserts inserts incremental 1 1
+DIMENSION Qcache_not_cached 'not cached' incremental -1 1
+
+CHART mysql_$x.qcache '' "mysql QCache Queries in Cache" "queries" qcache mysql.qcache line $((mysql_priority + 26)) $mysql_update_every
+DIMENSION Qcache_queries_in_cache queries absolute 1 1
+
+CHART mysql_$x.qcache_freemem '' "mysql QCache Free Memory" "MiB" qcache mysql.qcache_freemem area $((mysql_priority + 27)) $mysql_update_every
+DIMENSION Qcache_free_memory free absolute 1 $((1024 * 1024))
+
+CHART mysql_$x.qcache_memblocks '' "mysql QCache Memory Blocks" "blocks" qcache mysql.qcache_memblocks line $((mysql_priority + 28)) $mysql_update_every
+DIMENSION Qcache_free_blocks free absolute 1 1
+DIMENSION Qcache_total_blocks total absolute 1 1
+
+CHART mysql_$x.key_blocks '' "mysql MyISAM Key Cache Blocks" "blocks" myisam mysql.key_blocks line $((mysql_priority + 29)) $mysql_update_every
+DIMENSION Key_blocks_unused unused absolute 1 1
+DIMENSION Key_blocks_used used absolute -1 1
+DIMENSION Key_blocks_not_flushed 'not flushed' absolute 1 1
+
+CHART mysql_$x.key_requests '' "mysql MyISAM Key Cache Requests" "requests/s" myisam mysql.key_requests area $((mysql_priority + 30)) $mysql_update_every
+DIMENSION Key_read_requests reads incremental 1 1
+DIMENSION Key_write_requests writes incremental -1 1
+
+CHART mysql_$x.key_disk_ops '' "mysql MyISAM Key Cache Disk Operations" "operations/s" myisam mysql.key_disk_ops area $((mysql_priority + 31)) $mysql_update_every
+DIMENSION Key_reads reads incremental 1 1
+DIMENSION Key_writes writes incremental -1 1
+
+CHART mysql_$x.files '' "mysql Open Files" "files" files mysql.files line $((mysql_priority + 32)) $mysql_update_every
+DIMENSION Open_files files absolute 1 1
+
+CHART mysql_$x.files_rate '' "mysql Opened Files Rate" "files/s" files mysql.files_rate line $((mysql_priority + 33)) $mysql_update_every
+DIMENSION Opened_files files incremental 1 1
+EOF
+
+ if [ ! -z "${mysql_data[Binlog_stmt_cache_disk_use]}" ]; then
+ cat <<EOF
+CHART mysql_$x.binlog_stmt_cache '' "mysql Binlog Statement Cache" "statements/s" binlog mysql.binlog_stmt_cache line $((mysql_priority + 50)) $mysql_update_every
+DIMENSION Binlog_stmt_cache_disk_use disk incremental 1 1
+DIMENSION Binlog_stmt_cache_use all incremental 1 1
+EOF
+ fi
+
+ if [ ! -z "${mysql_data[Connection_errors_accept]}" ]; then
+ cat <<EOF
+CHART mysql_$x.connection_errors '' "mysql Connection Errors" "connections/s" connections mysql.connection_errors line $((mysql_priority + 51)) $mysql_update_every
+DIMENSION Connection_errors_accept accept incremental 1 1
+DIMENSION Connection_errors_internal internal incremental 1 1
+DIMENSION Connection_errors_max_connections max incremental 1 1
+DIMENSION Connection_errors_peer_addr peer_addr incremental 1 1
+DIMENSION Connection_errors_select select incremental 1 1
+DIMENSION Connection_errors_tcpwrap tcpwrap incremental 1 1
+EOF
+ fi
+
+ done
+ return 0
+}
+
+mysql_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ local m x
+ for m in "${!mysql_ids[@]}"; do
+ x="${mysql_ids[$m]}"
+ mysql_get "${mysql_cmds[$m]}" ${mysql_opts[$m]}
+
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]; then
+ unset "mysql_ids[$m]"
+ unset "mysql_opts[$m]"
+ unset "mysql_cmds[$m]"
+ error "failed to get values for '${m}', disabling it."
+ continue
+ fi
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN mysql_$x.net $1
+SET Bytes_received = ${mysql_data[Bytes_received]}
+SET Bytes_sent = ${mysql_data[Bytes_sent]}
+END
+BEGIN mysql_$x.queries $1
+SET Queries = ${mysql_data[Queries]}
+SET Questions = ${mysql_data[Questions]}
+SET Slow_queries = ${mysql_data[Slow_queries]}
+END
+BEGIN mysql_$x.handlers $1
+SET Handler_commit = ${mysql_data[Handler_commit]}
+SET Handler_delete = ${mysql_data[Handler_delete]}
+SET Handler_prepare = ${mysql_data[Handler_prepare]}
+SET Handler_read_first = ${mysql_data[Handler_read_first]}
+SET Handler_read_key = ${mysql_data[Handler_read_key]}
+SET Handler_read_next = ${mysql_data[Handler_read_next]}
+SET Handler_read_prev = ${mysql_data[Handler_read_prev]}
+SET Handler_read_rnd = ${mysql_data[Handler_read_rnd]}
+SET Handler_read_rnd_next = ${mysql_data[Handler_read_rnd_next]}
+SET Handler_rollback = ${mysql_data[Handler_rollback]}
+SET Handler_savepoint = ${mysql_data[Handler_savepoint]}
+SET Handler_savepoint_rollback = ${mysql_data[Handler_savepoint_rollback]}
+SET Handler_update = ${mysql_data[Handler_update]}
+SET Handler_write = ${mysql_data[Handler_write]}
+END
+BEGIN mysql_$x.table_locks $1
+SET Table_locks_immediate = ${mysql_data[Table_locks_immediate]}
+SET Table_locks_waited = ${mysql_data[Table_locks_waited]}
+END
+BEGIN mysql_$x.join_issues $1
+SET Select_full_join = ${mysql_data[Select_full_join]}
+SET Select_full_range_join = ${mysql_data[Select_full_range_join]}
+SET Select_range = ${mysql_data[Select_range]}
+SET Select_range_check = ${mysql_data[Select_range_check]}
+SET Select_scan = ${mysql_data[Select_scan]}
+END
+BEGIN mysql_$x.sort_issues $1
+SET Sort_merge_passes = ${mysql_data[Sort_merge_passes]}
+SET Sort_range = ${mysql_data[Sort_range]}
+SET Sort_scan = ${mysql_data[Sort_scan]}
+END
+BEGIN mysql_$x.tmp $1
+SET Created_tmp_disk_tables = ${mysql_data[Created_tmp_disk_tables]}
+SET Created_tmp_files = ${mysql_data[Created_tmp_files]}
+SET Created_tmp_tables = ${mysql_data[Created_tmp_tables]}
+END
+BEGIN mysql_$x.connections $1
+SET Connections = ${mysql_data[Connections]}
+SET Aborted_connects = ${mysql_data[Aborted_connects]}
+END
+BEGIN mysql_$x.binlog_cache $1
+SET Binlog_cache_disk_use = ${mysql_data[Binlog_cache_disk_use]}
+SET Binlog_cache_use = ${mysql_data[Binlog_cache_use]}
+END
+BEGIN mysql_$x.threads $1
+SET Threads_connected = ${mysql_data[Threads_connected]}
+SET Threads_created = ${mysql_data[Threads_created]}
+SET Threads_cached = ${mysql_data[Threads_cached]}
+SET Threads_running = ${mysql_data[Threads_running]}
+END
+BEGIN mysql_$x.thread_cache_misses $1
+SET misses = ${mysql_data[Thread_cache_misses]}
+END
+BEGIN mysql_$x.innodb_io $1
+SET Innodb_data_read = ${mysql_data[Innodb_data_read]}
+SET Innodb_data_written = ${mysql_data[Innodb_data_written]}
+END
+BEGIN mysql_$x.innodb_io_ops $1
+SET Innodb_data_reads = ${mysql_data[Innodb_data_reads]}
+SET Innodb_data_writes = ${mysql_data[Innodb_data_writes]}
+SET Innodb_data_fsyncs = ${mysql_data[Innodb_data_fsyncs]}
+END
+BEGIN mysql_$x.innodb_io_pending_ops $1
+SET Innodb_data_pending_reads = ${mysql_data[Innodb_data_pending_reads]}
+SET Innodb_data_pending_writes = ${mysql_data[Innodb_data_pending_writes]}
+SET Innodb_data_pending_fsyncs = ${mysql_data[Innodb_data_pending_fsyncs]}
+END
+BEGIN mysql_$x.innodb_log $1
+SET Innodb_log_waits = ${mysql_data[Innodb_log_waits]}
+SET Innodb_log_write_requests = ${mysql_data[Innodb_log_write_requests]}
+SET Innodb_log_writes = ${mysql_data[Innodb_log_writes]}
+END
+BEGIN mysql_$x.innodb_os_log $1
+SET Innodb_os_log_fsyncs = ${mysql_data[Innodb_os_log_fsyncs]}
+SET Innodb_os_log_pending_fsyncs = ${mysql_data[Innodb_os_log_pending_fsyncs]}
+SET Innodb_os_log_pending_writes = ${mysql_data[Innodb_os_log_pending_writes]}
+END
+BEGIN mysql_$x.innodb_os_log_io $1
+SET Innodb_os_log_written = ${mysql_data[Innodb_os_log_written]}
+END
+BEGIN mysql_$x.innodb_cur_row_lock $1
+SET Innodb_row_lock_current_waits = ${mysql_data[Innodb_row_lock_current_waits]}
+END
+BEGIN mysql_$x.innodb_rows $1
+SET Innodb_rows_inserted = ${mysql_data[Innodb_rows_inserted]}
+SET Innodb_rows_read = ${mysql_data[Innodb_rows_read]}
+SET Innodb_rows_updated = ${mysql_data[Innodb_rows_updated]}
+SET Innodb_rows_deleted = ${mysql_data[Innodb_rows_deleted]}
+END
+BEGIN mysql_$x.innodb_buffer_pool_pages $1
+SET Innodb_buffer_pool_pages_data = ${mysql_data[Innodb_buffer_pool_pages_data]}
+SET Innodb_buffer_pool_pages_dirty = ${mysql_data[Innodb_buffer_pool_pages_dirty]}
+SET Innodb_buffer_pool_pages_free = ${mysql_data[Innodb_buffer_pool_pages_free]}
+SET Innodb_buffer_pool_pages_flushed = ${mysql_data[Innodb_buffer_pool_pages_flushed]}
+SET Innodb_buffer_pool_pages_misc = ${mysql_data[Innodb_buffer_pool_pages_misc]}
+SET Innodb_buffer_pool_pages_total = ${mysql_data[Innodb_buffer_pool_pages_total]}
+END
+BEGIN mysql_$x.innodb_buffer_pool_bytes $1
+SET Innodb_buffer_pool_bytes_data = ${mysql_data[Innodb_buffer_pool_bytes_data]}
+SET Innodb_buffer_pool_bytes_dirty = ${mysql_data[Innodb_buffer_pool_bytes_dirty]}
+END
+BEGIN mysql_$x.innodb_buffer_pool_read_ahead $1
+SET Innodb_buffer_pool_read_ahead = ${mysql_data[Innodb_buffer_pool_read_ahead]}
+SET Innodb_buffer_pool_read_ahead_evicted = ${mysql_data[Innodb_buffer_pool_read_ahead_evicted]}
+SET Innodb_buffer_pool_read_ahead_rnd = ${mysql_data[Innodb_buffer_pool_read_ahead_rnd]}
+END
+BEGIN mysql_$x.innodb_buffer_pool_reqs $1
+SET Innodb_buffer_pool_read_requests = ${mysql_data[Innodb_buffer_pool_read_requests]}
+SET Innodb_buffer_pool_write_requests = ${mysql_data[Innodb_buffer_pool_write_requests]}
+END
+BEGIN mysql_$x.innodb_buffer_pool_ops $1
+SET Innodb_buffer_pool_reads = ${mysql_data[Innodb_buffer_pool_reads]}
+SET Innodb_buffer_pool_wait_free = ${mysql_data[Innodb_buffer_pool_wait_free]}
+END
+BEGIN mysql_$x.qcache_ops $1
+SET Qcache_hits hits = ${mysql_data[Qcache_hits]}
+SET Qcache_lowmem_prunes = ${mysql_data[Qcache_lowmem_prunes]}
+SET Qcache_inserts = ${mysql_data[Qcache_inserts]}
+SET Qcache_not_cached = ${mysql_data[Qcache_not_cached]}
+END
+BEGIN mysql_$x.qcache $1
+SET Qcache_queries_in_cache = ${mysql_data[Qcache_queries_in_cache]}
+END
+BEGIN mysql_$x.qcache_freemem $1
+SET Qcache_free_memory = ${mysql_data[Qcache_free_memory]}
+END
+BEGIN mysql_$x.qcache_memblocks $1
+SET Qcache_free_blocks = ${mysql_data[Qcache_free_blocks]}
+SET Qcache_total_blocks = ${mysql_data[Qcache_total_blocks]}
+END
+BEGIN mysql_$x.key_blocks $1
+SET Key_blocks_unused = ${mysql_data[Key_blocks_unused]}
+SET Key_blocks_used = ${mysql_data[Key_blocks_used]}
+SET Key_blocks_not_flushed = ${mysql_data[Key_blocks_not_flushed]}
+END
+BEGIN mysql_$x.key_requests $1
+SET Key_read_requests = ${mysql_data[Key_read_requests]}
+SET Key_write_requests = ${mysql_data[Key_write_requests]}
+END
+BEGIN mysql_$x.key_disk_ops $1
+SET Key_reads = ${mysql_data[Key_reads]}
+SET Key_writes = ${mysql_data[Key_writes]}
+END
+BEGIN mysql_$x.files $1
+SET Open_files = ${mysql_data[Open_files]}
+END
+BEGIN mysql_$x.files_rate $1
+SET Opened_files = ${mysql_data[Opened_files]}
+END
+VALUESEOF
+
+ if [ ! -z "${mysql_data[Binlog_stmt_cache_disk_use]}" ]; then
+ cat <<VALUESEOF
+BEGIN mysql_$x.binlog_stmt_cache $1
+SET Binlog_stmt_cache_disk_use = ${mysql_data[Binlog_stmt_cache_disk_use]}
+SET Binlog_stmt_cache_use = ${mysql_data[Binlog_stmt_cache_use]}
+END
+VALUESEOF
+ fi
+
+ if [ ! -z "${mysql_data[Connection_errors_accept]}" ]; then
+ cat <<VALUESEOF
+BEGIN mysql_$x.connection_errors $1
+SET Connection_errors_accept = ${mysql_data[Connection_errors_accept]}
+SET Connection_errors_internal = ${mysql_data[Connection_errors_internal]}
+SET Connection_errors_max_connections = ${mysql_data[Connection_errors_max_connections]}
+SET Connection_errors_peer_addr = ${mysql_data[Connection_errors_peer_addr]}
+SET Connection_errors_select = ${mysql_data[Connection_errors_select]}
+SET Connection_errors_tcpwrap = ${mysql_data[Connection_errors_tcpwrap]}
+END
+VALUESEOF
+ fi
+ done
+
+ [ ${#mysql_ids[@]} -eq 0 ] && error "no mysql servers left active." && return 1
+ return 0
+}
diff --git a/collectors/charts.d.plugin/mysql/mysql.conf b/collectors/charts.d.plugin/mysql/mysql.conf
new file mode 100644
index 0000000..683e4af
--- /dev/null
+++ b/collectors/charts.d.plugin/mysql/mysql.conf
@@ -0,0 +1,23 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+#mysql_cmds[name]=""
+#mysql_opts[name]=""
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#mysql_update_every=2
+
+# the charts priority on the dashboard
+#mysql_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#mysql_retries=10
diff --git a/collectors/charts.d.plugin/nginx/Makefile.inc b/collectors/charts.d.plugin/nginx/Makefile.inc
new file mode 100644
index 0000000..c9d31aa
--- /dev/null
+++ b/collectors/charts.d.plugin/nginx/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += nginx/nginx.chart.sh
+dist_chartsconfig_DATA += nginx/nginx.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nginx/README.md nginx/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/nginx/README.md b/collectors/charts.d.plugin/nginx/README.md
new file mode 100644
index 0000000..42a4f81
--- /dev/null
+++ b/collectors/charts.d.plugin/nginx/README.md
@@ -0,0 +1,6 @@
+# nginx
+
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/nginx) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fnginx%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/nginx/nginx.chart.sh b/collectors/charts.d.plugin/nginx/nginx.chart.sh
new file mode 100644
index 0000000..812de2c
--- /dev/null
+++ b/collectors/charts.d.plugin/nginx/nginx.chart.sh
@@ -0,0 +1,141 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# if this chart is called X.chart.sh, then all functions and global variables
+# must start with X_
+
+nginx_url="http://127.0.0.1:80/stub_status"
+nginx_curl_opts=""
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+nginx_update_every=
+nginx_priority=60000
+
+declare -a nginx_response=()
+nginx_active_connections=0
+nginx_accepts=0
+nginx_handled=0
+nginx_requests=0
+nginx_reading=0
+nginx_writing=0
+nginx_waiting=0
+nginx_get() {
+ # shellcheck disable=SC2207
+ nginx_response=($(run curl -Ss ${nginx_curl_opts} "${nginx_url}"))
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ] || [ "${#nginx_response[@]}" -eq 0 ]; then return 1; fi
+
+ if [ "${nginx_response[0]}" != "Active" ] ||
+ [ "${nginx_response[1]}" != "connections:" ] ||
+ [ "${nginx_response[3]}" != "server" ] ||
+ [ "${nginx_response[4]}" != "accepts" ] ||
+ [ "${nginx_response[5]}" != "handled" ] ||
+ [ "${nginx_response[6]}" != "requests" ] ||
+ [ "${nginx_response[10]}" != "Reading:" ] ||
+ [ "${nginx_response[12]}" != "Writing:" ] ||
+ [ "${nginx_response[14]}" != "Waiting:" ]; then
+ error "Invalid response from nginx server: ${nginx_response[*]}"
+ return 1
+ fi
+
+ nginx_active_connections="${nginx_response[2]}"
+ nginx_accepts="${nginx_response[7]}"
+ nginx_handled="${nginx_response[8]}"
+ nginx_requests="${nginx_response[9]}"
+ nginx_reading="${nginx_response[11]}"
+ nginx_writing="${nginx_response[13]}"
+ nginx_waiting="${nginx_response[15]}"
+
+ if [ -z "${nginx_active_connections}" ] ||
+ [ -z "${nginx_accepts}" ] ||
+ [ -z "${nginx_handled}" ] ||
+ [ -z "${nginx_requests}" ] ||
+ [ -z "${nginx_reading}" ] ||
+ [ -z "${nginx_writing}" ] ||
+ [ -z "${nginx_waiting}" ]; then
+ error "empty values got from nginx server: ${nginx_response[*]}"
+ return 1
+ fi
+
+ return 0
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+nginx_check() {
+
+ nginx_get
+ # shellcheck disable=2181
+ if [ $? -ne 0 ]; then
+ # shellcheck disable=SC2154
+ error "cannot find stub_status on URL '${nginx_url}'. Please set nginx_url='http://nginx.server/stub_status' in $confd/nginx.conf"
+ return 1
+ fi
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ return 0
+}
+
+# _create is called once, to create the charts
+nginx_create() {
+ cat <<EOF
+CHART nginx_local.connections '' "nginx Active Connections" "connections" nginx nginx.connections line $((nginx_priority + 1)) $nginx_update_every
+DIMENSION active '' absolute 1 1
+
+CHART nginx_local.requests '' "nginx Requests" "requests/s" nginx nginx.requests line $((nginx_priority + 2)) $nginx_update_every
+DIMENSION requests '' incremental 1 1
+
+CHART nginx_local.connections_status '' "nginx Active Connections by Status" "connections" nginx nginx.connections.status line $((nginx_priority + 3)) $nginx_update_every
+DIMENSION reading '' absolute 1 1
+DIMENSION writing '' absolute 1 1
+DIMENSION waiting idle absolute 1 1
+
+CHART nginx_local.connect_rate '' "nginx Connections Rate" "connections/s" nginx nginx.connections.rate line $((nginx_priority + 4)) $nginx_update_every
+DIMENSION accepts accepted incremental 1 1
+DIMENSION handled '' incremental 1 1
+EOF
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+nginx_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ nginx_get || return 1
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN nginx_local.connections $1
+SET active = $((nginx_active_connections))
+END
+BEGIN nginx_local.requests $1
+SET requests = $((nginx_requests))
+END
+BEGIN nginx_local.connections_status $1
+SET reading = $((nginx_reading))
+SET writing = $((nginx_writing))
+SET waiting = $((nginx_waiting))
+END
+BEGIN nginx_local.connect_rate $1
+SET accepts = $((nginx_accepts))
+SET handled = $((nginx_handled))
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/nginx/nginx.conf b/collectors/charts.d.plugin/nginx/nginx.conf
new file mode 100644
index 0000000..c46100a
--- /dev/null
+++ b/collectors/charts.d.plugin/nginx/nginx.conf
@@ -0,0 +1,23 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+#nginx_url="http://127.0.0.1:80/stub_status"
+#nginx_curl_opts=""
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#nginx_update_every=
+
+# the charts priority on the dashboard
+#nginx_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#nginx_retries=10
diff --git a/collectors/charts.d.plugin/nut/Makefile.inc b/collectors/charts.d.plugin/nut/Makefile.inc
new file mode 100644
index 0000000..4fb4714
--- /dev/null
+++ b/collectors/charts.d.plugin/nut/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += nut/nut.chart.sh
+dist_chartsconfig_DATA += nut/nut.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nut/README.md nut/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/nut/README.md b/collectors/charts.d.plugin/nut/README.md
new file mode 100644
index 0000000..3e16993
--- /dev/null
+++ b/collectors/charts.d.plugin/nut/README.md
@@ -0,0 +1,61 @@
+# nut
+
+The plugin will collect UPS data for all UPSes configured in the system.
+
+The following charts will be created:
+
+1. **UPS Charge**
+
+ * percentage changed
+
+2. **UPS Battery Voltage**
+
+ * current voltage
+ * high voltage
+ * low voltage
+ * nominal voltage
+
+3. **UPS Input Voltage**
+
+ * current voltage
+ * fault voltage
+ * nominal voltage
+
+4. **UPS Input Current**
+
+ * nominal current
+
+5. **UPS Input Frequency**
+
+ * current frequency
+ * nominal frequency
+
+6. **UPS Output Voltage**
+
+ * current voltage
+
+7. **UPS Load**
+
+ * current load
+
+8. **UPS Temperature**
+
+ * current temperature
+
+
+### configuration
+
+This is the internal default for `/etc/netdata/nut.conf`
+
+```sh
+# a space separated list of UPS names
+# if empty, the list returned by 'upsc -l' will be used
+nut_ups=
+
+# how frequently to collect UPS data
+nut_update_every=2
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fnut%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/nut/nut.chart.sh b/collectors/charts.d.plugin/nut/nut.chart.sh
new file mode 100644
index 0000000..933d356
--- /dev/null
+++ b/collectors/charts.d.plugin/nut/nut.chart.sh
@@ -0,0 +1,232 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# a space separated list of UPS names
+# if empty, the list returned by 'upsc -l' will be used
+nut_ups=
+
+# how frequently to collect UPS data
+nut_update_every=2
+
+# how much time in seconds, to wait for nut to respond
+nut_timeout=2
+
+# set this to 1, to enable another chart showing the number
+# of UPS clients connected to upsd
+nut_clients_chart=0
+
+# the priority of nut related to other charts
+nut_priority=90000
+
+declare -A nut_ids=()
+declare -A nut_names=()
+
+nut_get_all() {
+ run -t $nut_timeout upsc -l
+}
+
+nut_get() {
+ run -t $nut_timeout upsc "$1"
+
+ if [ "${nut_clients_chart}" -eq "1" ]; then
+ printf "ups.connected_clients: "
+ run -t $nut_timeout upsc -c "$1" | wc -l
+ fi
+}
+
+nut_check() {
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ local x
+
+ require_cmd upsc || return 1
+
+ [ -z "$nut_ups" ] && nut_ups="$(nut_get_all)"
+
+ for x in $nut_ups; do
+ nut_get "$x" >/dev/null
+ # shellcheck disable=SC2181
+ if [ $? -eq 0 ]; then
+ if [ ! -z "${nut_names[${x}]}" ]; then
+ nut_ids[$x]="$(fixid "${nut_names[${x}]}")"
+ else
+ nut_ids[$x]="$(fixid "$x")"
+ fi
+ continue
+ fi
+ error "cannot get information for NUT UPS '$x'."
+ done
+
+ if [ ${#nut_ids[@]} -eq 0 ]; then
+ # shellcheck disable=SC2154
+ error "Cannot find UPSes - please set nut_ups='ups_name' in $confd/nut.conf"
+ return 1
+ fi
+
+ return 0
+}
+
+nut_create() {
+ # create the charts
+ local x
+
+ for x in "${nut_ids[@]}"; do
+ cat <<EOF
+CHART nut_$x.charge '' "UPS Charge" "percentage" ups nut.charge area $((nut_priority + 1)) $nut_update_every
+DIMENSION battery_charge charge absolute 1 100
+
+CHART nut_$x.runtime '' "UPS Runtime" "seconds" ups nut.runtime area $((nut_priority + 2)) $nut_update_every
+DIMENSION battery_runtime runtime absolute 1 100
+
+CHART nut_$x.battery_voltage '' "UPS Battery Voltage" "Volts" ups nut.battery.voltage line $((nut_priority + 3)) $nut_update_every
+DIMENSION battery_voltage voltage absolute 1 100
+DIMENSION battery_voltage_high high absolute 1 100
+DIMENSION battery_voltage_low low absolute 1 100
+DIMENSION battery_voltage_nominal nominal absolute 1 100
+
+CHART nut_$x.input_voltage '' "UPS Input Voltage" "Volts" input nut.input.voltage line $((nut_priority + 4)) $nut_update_every
+DIMENSION input_voltage voltage absolute 1 100
+DIMENSION input_voltage_fault fault absolute 1 100
+DIMENSION input_voltage_nominal nominal absolute 1 100
+
+CHART nut_$x.input_current '' "UPS Input Current" "Ampere" input nut.input.current line $((nut_priority + 5)) $nut_update_every
+DIMENSION input_current_nominal nominal absolute 1 100
+
+CHART nut_$x.input_frequency '' "UPS Input Frequency" "Hz" input nut.input.frequency line $((nut_priority + 6)) $nut_update_every
+DIMENSION input_frequency frequency absolute 1 100
+DIMENSION input_frequency_nominal nominal absolute 1 100
+
+CHART nut_$x.output_voltage '' "UPS Output Voltage" "Volts" output nut.output.voltage line $((nut_priority + 7)) $nut_update_every
+DIMENSION output_voltage voltage absolute 1 100
+
+CHART nut_$x.load '' "UPS Load" "percentage" ups nut.load area $((nut_priority)) $nut_update_every
+DIMENSION load load absolute 1 100
+
+CHART nut_$x.temp '' "UPS Temperature" "temperature" ups nut.temperature line $((nut_priority + 8)) $nut_update_every
+DIMENSION temp temp absolute 1 100
+EOF
+
+ if [ "${nut_clients_chart}" = "1" ]; then
+ cat <<EOF2
+CHART nut_$x.clients '' "UPS Connected Clients" "clients" ups nut.clients area $((nut_priority + 9)) $nut_update_every
+DIMENSION clients '' absolute 1 1
+EOF2
+ fi
+
+ done
+
+ return 0
+}
+
+nut_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ local i x
+ for i in "${!nut_ids[@]}"; do
+ x="${nut_ids[$i]}"
+ nut_get "$i" | awk "
+BEGIN {
+ battery_charge = 0;
+ battery_runtime = 0;
+ battery_voltage = 0;
+ battery_voltage_high = 0;
+ battery_voltage_low = 0;
+ battery_voltage_nominal = 0;
+ input_voltage = 0;
+ input_voltage_fault = 0;
+ input_voltage_nominal = 0;
+ input_current_nominal = 0;
+ input_frequency = 0;
+ input_frequency_nominal = 0;
+ output_voltage = 0;
+ load = 0;
+ temp = 0;
+ client = 0;
+ do_clients = ${nut_clients_chart};
+}
+/^battery.charge: .*/ { battery_charge = \$2 * 100 };
+/^battery.runtime: .*/ { battery_runtime = \$2 * 100 };
+/^battery.voltage: .*/ { battery_voltage = \$2 * 100 };
+/^battery.voltage.high: .*/ { battery_voltage_high = \$2 * 100 };
+/^battery.voltage.low: .*/ { battery_voltage_low = \$2 * 100 };
+/^battery.voltage.nominal: .*/ { battery_voltage_nominal = \$2 * 100 };
+/^input.voltage: .*/ { input_voltage = \$2 * 100 };
+/^input.voltage.fault: .*/ { input_voltage_fault = \$2 * 100 };
+/^input.voltage.nominal: .*/ { input_voltage_nominal = \$2 * 100 };
+/^input.current.nominal: .*/ { input_current_nominal = \$2 * 100 };
+/^input.frequency: .*/ { input_frequency = \$2 * 100 };
+/^input.frequency.nominal: .*/ { input_frequency_nominal = \$2 * 100 };
+/^output.voltage: .*/ { output_voltage = \$2 * 100 };
+/^ups.load: .*/ { load = \$2 * 100 };
+/^ups.temperature: .*/ { temp = \$2 * 100 };
+/^ups.connected_clients: .*/ { clients = \$2 };
+END {
+ print \"BEGIN nut_$x.charge $1\";
+ print \"SET battery_charge = \" battery_charge;
+ print \"END\"
+
+ print \"BEGIN nut_$x.runtime $1\";
+ print \"SET battery_runtime = \" battery_runtime;
+ print \"END\"
+
+ print \"BEGIN nut_$x.battery_voltage $1\";
+ print \"SET battery_voltage = \" battery_voltage;
+ print \"SET battery_voltage_high = \" battery_voltage_high;
+ print \"SET battery_voltage_low = \" battery_voltage_low;
+ print \"SET battery_voltage_nominal = \" battery_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN nut_$x.input_voltage $1\";
+ print \"SET input_voltage = \" input_voltage;
+ print \"SET input_voltage_fault = \" input_voltage_fault;
+ print \"SET input_voltage_nominal = \" input_voltage_nominal;
+ print \"END\"
+
+ print \"BEGIN nut_$x.input_current $1\";
+ print \"SET input_current_nominal = \" input_current_nominal;
+ print \"END\"
+
+ print \"BEGIN nut_$x.input_frequency $1\";
+ print \"SET input_frequency = \" input_frequency;
+ print \"SET input_frequency_nominal = \" input_frequency_nominal;
+ print \"END\"
+
+ print \"BEGIN nut_$x.output_voltage $1\";
+ print \"SET output_voltage = \" output_voltage;
+ print \"END\"
+
+ print \"BEGIN nut_$x.load $1\";
+ print \"SET load = \" load;
+ print \"END\"
+
+ print \"BEGIN nut_$x.temp $1\";
+ print \"SET temp = \" temp;
+ print \"END\"
+
+ if(do_clients) {
+ print \"BEGIN nut_$x.clients $1\";
+ print \"SET clients = \" clients;
+ print \"END\"
+ }
+}"
+ # shellcheck disable=2181
+ [ $? -ne 0 ] && unset "nut_ids[$i]" && error "failed to get values for '$i', disabling it."
+ done
+
+ [ ${#nut_ids[@]} -eq 0 ] && error "no UPSes left active." && return 1
+ return 0
+}
diff --git a/collectors/charts.d.plugin/nut/nut.conf b/collectors/charts.d.plugin/nut/nut.conf
new file mode 100644
index 0000000..b95ad90
--- /dev/null
+++ b/collectors/charts.d.plugin/nut/nut.conf
@@ -0,0 +1,33 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# a space separated list of UPS names
+# if empty, the list returned by 'upsc -l' will be used
+#nut_ups=
+
+# each line represents an alias for one UPS
+# if empty, the FQDN will be used
+#nut_names["FQDN1"]="alias"
+#nut_names["FQDN2"]="alias"
+
+# how much time in seconds, to wait for nut to respond
+#nut_timeout=2
+
+# set this to 1, to enable another chart showing the number
+# of UPS clients connected to upsd
+#nut_clients_chart=1
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#nut_update_every=2
+
+# the charts priority on the dashboard
+#nut_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#nut_retries=10
diff --git a/collectors/charts.d.plugin/opensips/Makefile.inc b/collectors/charts.d.plugin/opensips/Makefile.inc
new file mode 100644
index 0000000..a7b5d3a
--- /dev/null
+++ b/collectors/charts.d.plugin/opensips/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += opensips/opensips.chart.sh
+dist_chartsconfig_DATA += opensips/opensips.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += opensips/README.md opensips/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/opensips/README.md b/collectors/charts.d.plugin/opensips/README.md
new file mode 100644
index 0000000..cb056da
--- /dev/null
+++ b/collectors/charts.d.plugin/opensips/README.md
@@ -0,0 +1,7 @@
+# OpenSIPS
+
+*Under construction*
+
+Collects OpenSIPS metrics
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fopensips%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/opensips/opensips.chart.sh b/collectors/charts.d.plugin/opensips/opensips.chart.sh
new file mode 100644
index 0000000..b42462d
--- /dev/null
+++ b/collectors/charts.d.plugin/opensips/opensips.chart.sh
@@ -0,0 +1,324 @@
+# shellcheck shell=bash disable=SC1117,SC2154,SC2086
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+opensips_opts="fifo get_statistics all"
+opensips_cmd=
+opensips_update_every=5
+opensips_timeout=2
+opensips_priority=80000
+
+opensips_get_stats() {
+ run -t $opensips_timeout "$opensips_cmd" $opensips_opts |
+ grep "^\(core\|dialog\|net\|registrar\|shmem\|siptrace\|sl\|tm\|uri\|usrloc\):[a-zA-Z0-9_-]\+[[:space:]]*[=:]\+[[:space:]]*[0-9]\+[[:space:]]*$" |
+ sed \
+ -e "s|[[:space:]]*[=:]\+[[:space:]]*\([0-9]\+\)[[:space:]]*$|=\1|g" \
+ -e "s|[[:space:]:-]\+|_|g" \
+ -e "s|^|opensips_|g"
+
+ local ret=$?
+ [ $ret -ne 0 ] && echo "opensips_command_failed=1"
+ return $ret
+}
+
+opensips_check() {
+ # if the user did not provide an opensips_cmd
+ # try to find it in the system
+ if [ -z "$opensips_cmd" ]; then
+ require_cmd opensipsctl || return 1
+ fi
+
+ # check once if the command works
+ local x
+ x="$(opensips_get_stats | grep "^opensips_core_")"
+ # shellcheck disable=SC2181
+ if [ ! $? -eq 0 ] || [ -z "$x" ]; then
+ error "cannot get global status. Please set opensips_opts='options' whatever needed to get connected to opensips server, in $confd/opensips.conf"
+ return 1
+ fi
+
+ return 0
+}
+
+opensips_create() {
+ # create the charts
+ cat <<EOF
+CHART opensips.dialogs_active '' "OpenSIPS Active Dialogs" "dialogs" dialogs '' area $((opensips_priority + 1)) $opensips_update_every
+DIMENSION dialog_active_dialogs active absolute 1 1
+DIMENSION dialog_early_dialogs early absolute -1 1
+
+CHART opensips.users '' "OpenSIPS Users" "users" users '' line $((opensips_priority + 2)) $opensips_update_every
+DIMENSION usrloc_registered_users registered absolute 1 1
+DIMENSION usrloc_location_users location absolute 1 1
+DIMENSION usrloc_location_contacts contacts absolute 1 1
+DIMENSION usrloc_location_expires expires incremental -1 1
+
+CHART opensips.registrar '' "OpenSIPS Registrar" "registrations/s" registrar '' line $((opensips_priority + 3)) $opensips_update_every
+DIMENSION registrar_accepted_regs accepted incremental 1 1
+DIMENSION registrar_rejected_regs rejected incremental -1 1
+
+CHART opensips.transactions '' "OpenSIPS Transactions" "transactions/s" transactions '' line $((opensips_priority + 4)) $opensips_update_every
+DIMENSION tm_UAS_transactions UAS incremental 1 1
+DIMENSION tm_UAC_transactions UAC incremental -1 1
+
+CHART opensips.core_rcv '' "OpenSIPS Core Receives" "queries/s" core '' line $((opensips_priority + 5)) $opensips_update_every
+DIMENSION core_rcv_requests requests incremental 1 1
+DIMENSION core_rcv_replies replies incremental -1 1
+
+CHART opensips.core_fwd '' "OpenSIPS Core Forwards" "queries/s" core '' line $((opensips_priority + 6)) $opensips_update_every
+DIMENSION core_fwd_requests requests incremental 1 1
+DIMENSION core_fwd_replies replies incremental -1 1
+
+CHART opensips.core_drop '' "OpenSIPS Core Drops" "queries/s" core '' line $((opensips_priority + 7)) $opensips_update_every
+DIMENSION core_drop_requests requests incremental 1 1
+DIMENSION core_drop_replies replies incremental -1 1
+
+CHART opensips.core_err '' "OpenSIPS Core Errors" "queries/s" core '' line $((opensips_priority + 8)) $opensips_update_every
+DIMENSION core_err_requests requests incremental 1 1
+DIMENSION core_err_replies replies incremental -1 1
+
+CHART opensips.core_bad '' "OpenSIPS Core Bad" "queries/s" core '' line $((opensips_priority + 9)) $opensips_update_every
+DIMENSION core_bad_URIs_rcvd bad_URIs_rcvd incremental 1 1
+DIMENSION core_unsupported_methods unsupported_methods incremental 1 1
+DIMENSION core_bad_msg_hdr bad_msg_hdr incremental 1 1
+
+CHART opensips.tm_replies '' "OpenSIPS TM Replies" "replies/s" transactions '' line $((opensips_priority + 10)) $opensips_update_every
+DIMENSION tm_received_replies received incremental 1 1
+DIMENSION tm_relayed_replies relayed incremental 1 1
+DIMENSION tm_local_replies local incremental 1 1
+
+CHART opensips.transactions_status '' "OpenSIPS Transactions Status" "transactions/s" transactions '' line $((opensips_priority + 11)) $opensips_update_every
+DIMENSION tm_2xx_transactions 2xx incremental 1 1
+DIMENSION tm_3xx_transactions 3xx incremental 1 1
+DIMENSION tm_4xx_transactions 4xx incremental 1 1
+DIMENSION tm_5xx_transactions 5xx incremental 1 1
+DIMENSION tm_6xx_transactions 6xx incremental 1 1
+
+CHART opensips.transactions_inuse '' "OpenSIPS InUse Transactions" "transactions" transactions '' line $((opensips_priority + 12)) $opensips_update_every
+DIMENSION tm_inuse_transactions inuse absolute 1 1
+
+CHART opensips.sl_replies '' "OpenSIPS SL Replies" "replies/s" core '' line $((opensips_priority + 13)) $opensips_update_every
+DIMENSION sl_1xx_replies 1xx incremental 1 1
+DIMENSION sl_2xx_replies 2xx incremental 1 1
+DIMENSION sl_3xx_replies 3xx incremental 1 1
+DIMENSION sl_4xx_replies 4xx incremental 1 1
+DIMENSION sl_5xx_replies 5xx incremental 1 1
+DIMENSION sl_6xx_replies 6xx incremental 1 1
+DIMENSION sl_sent_replies sent incremental 1 1
+DIMENSION sl_sent_err_replies error incremental 1 1
+DIMENSION sl_received_ACKs ACKed incremental 1 1
+
+CHART opensips.dialogs '' "OpenSIPS Dialogs" "dialogs/s" dialogs '' line $((opensips_priority + 14)) $opensips_update_every
+DIMENSION dialog_processed_dialogs processed incremental 1 1
+DIMENSION dialog_expired_dialogs expired incremental 1 1
+DIMENSION dialog_failed_dialogs failed incremental -1 1
+
+CHART opensips.net_waiting '' "OpenSIPS Network Waiting" "kilobytes" net '' line $((opensips_priority + 15)) $opensips_update_every
+DIMENSION net_waiting_udp UDP absolute 1 1024
+DIMENSION net_waiting_tcp TCP absolute 1 1024
+
+CHART opensips.uri_checks '' "OpenSIPS URI Checks" "checks / sec" uri '' line $((opensips_priority + 16)) $opensips_update_every
+DIMENSION uri_positive_checks positive incremental 1 1
+DIMENSION uri_negative_checks negative incremental -1 1
+
+CHART opensips.traces '' "OpenSIPS Traces" "traces / sec" traces '' line $((opensips_priority + 17)) $opensips_update_every
+DIMENSION siptrace_traced_requests requests incremental 1 1
+DIMENSION siptrace_traced_replies replies incremental -1 1
+
+CHART opensips.shmem '' "OpenSIPS Shared Memory" "kilobytes" mem '' line $((opensips_priority + 18)) $opensips_update_every
+DIMENSION shmem_total_size total absolute 1 1024
+DIMENSION shmem_used_size used absolute 1 1024
+DIMENSION shmem_real_used_size real_used absolute 1 1024
+DIMENSION shmem_max_used_size max_used absolute 1 1024
+DIMENSION shmem_free_size free absolute 1 1024
+
+CHART opensips.shmem_fragments '' "OpenSIPS Shared Memory Fragmentation" "fragments" mem '' line $((opensips_priority + 19)) $opensips_update_every
+DIMENSION shmem_fragments fragments absolute 1 1
+EOF
+
+ return 0
+}
+
+opensips_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+
+ # 1. get the counters page from opensips
+ # 2. sed to remove spaces; replace . with _; remove spaces around =; prepend each line with: local opensips_
+ # 3. egrep lines starting with:
+ # local opensips_client_http_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
+ # local opensips_server_all_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
+ # 4. then execute this as a script with the eval
+ # be very carefull with eval:
+ # prepare the script and always grep at the end the lines that are usefull, so that
+ # even if something goes wrong, no other code can be executed
+
+ unset \
+ opensips_dialog_active_dialogs \
+ opensips_dialog_early_dialogs \
+ opensips_usrloc_registered_users \
+ opensips_usrloc_location_users \
+ opensips_usrloc_location_contacts \
+ opensips_usrloc_location_expires \
+ opensips_registrar_accepted_regs \
+ opensips_registrar_rejected_regs \
+ opensips_tm_UAS_transactions \
+ opensips_tm_UAC_transactions \
+ opensips_core_rcv_requests \
+ opensips_core_rcv_replies \
+ opensips_core_fwd_requests \
+ opensips_core_fwd_replies \
+ opensips_core_drop_requests \
+ opensips_core_drop_replies \
+ opensips_core_err_requests \
+ opensips_core_err_replies \
+ opensips_core_bad_URIs_rcvd \
+ opensips_core_unsupported_methods \
+ opensips_core_bad_msg_hdr \
+ opensips_tm_received_replies \
+ opensips_tm_relayed_replies \
+ opensips_tm_local_replies \
+ opensips_tm_2xx_transactions \
+ opensips_tm_3xx_transactions \
+ opensips_tm_4xx_transactions \
+ opensips_tm_5xx_transactions \
+ opensips_tm_6xx_transactions \
+ opensips_tm_inuse_transactions \
+ opensips_sl_1xx_replies \
+ opensips_sl_2xx_replies \
+ opensips_sl_3xx_replies \
+ opensips_sl_4xx_replies \
+ opensips_sl_5xx_replies \
+ opensips_sl_6xx_replies \
+ opensips_sl_sent_replies \
+ opensips_sl_sent_err_replies \
+ opensips_sl_received_ACKs \
+ opensips_dialog_processed_dialogs \
+ opensips_dialog_expired_dialogs \
+ opensips_dialog_failed_dialogs \
+ opensips_net_waiting_udp \
+ opensips_net_waiting_tcp \
+ opensips_uri_positive_checks \
+ opensips_uri_negative_checks \
+ opensips_siptrace_traced_requests \
+ opensips_siptrace_traced_replies \
+ opensips_shmem_total_size \
+ opensips_shmem_used_size \
+ opensips_shmem_real_used_size \
+ opensips_shmem_max_used_size \
+ opensips_shmem_free_size \
+ opensips_shmem_fragments
+
+ opensips_command_failed=0
+ eval "local $(opensips_get_stats)"
+ # shellcheck disable=SC2181
+ [ $? -ne 0 ] && return 1
+
+ [ $opensips_command_failed -eq 1 ] && error "failed to get values, disabling." && return 1
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN opensips.dialogs_active $1
+SET dialog_active_dialogs = $opensips_dialog_active_dialogs
+SET dialog_early_dialogs = $opensips_dialog_early_dialogs
+END
+BEGIN opensips.users $1
+SET usrloc_registered_users = $opensips_usrloc_registered_users
+SET usrloc_location_users = $opensips_usrloc_location_users
+SET usrloc_location_contacts = $opensips_usrloc_location_contacts
+SET usrloc_location_expires = $opensips_usrloc_location_expires
+END
+BEGIN opensips.registrar $1
+SET registrar_accepted_regs = $opensips_registrar_accepted_regs
+SET registrar_rejected_regs = $opensips_registrar_rejected_regs
+END
+BEGIN opensips.transactions $1
+SET tm_UAS_transactions = $opensips_tm_UAS_transactions
+SET tm_UAC_transactions = $opensips_tm_UAC_transactions
+END
+BEGIN opensips.core_rcv $1
+SET core_rcv_requests = $opensips_core_rcv_requests
+SET core_rcv_replies = $opensips_core_rcv_replies
+END
+BEGIN opensips.core_fwd $1
+SET core_fwd_requests = $opensips_core_fwd_requests
+SET core_fwd_replies = $opensips_core_fwd_replies
+END
+BEGIN opensips.core_drop $1
+SET core_drop_requests = $opensips_core_drop_requests
+SET core_drop_replies = $opensips_core_drop_replies
+END
+BEGIN opensips.core_err $1
+SET core_err_requests = $opensips_core_err_requests
+SET core_err_replies = $opensips_core_err_replies
+END
+BEGIN opensips.core_bad $1
+SET core_bad_URIs_rcvd = $opensips_core_bad_URIs_rcvd
+SET core_unsupported_methods = $opensips_core_unsupported_methods
+SET core_bad_msg_hdr = $opensips_core_bad_msg_hdr
+END
+BEGIN opensips.tm_replies $1
+SET tm_received_replies = $opensips_tm_received_replies
+SET tm_relayed_replies = $opensips_tm_relayed_replies
+SET tm_local_replies = $opensips_tm_local_replies
+END
+BEGIN opensips.transactions_status $1
+SET tm_2xx_transactions = $opensips_tm_2xx_transactions
+SET tm_3xx_transactions = $opensips_tm_3xx_transactions
+SET tm_4xx_transactions = $opensips_tm_4xx_transactions
+SET tm_5xx_transactions = $opensips_tm_5xx_transactions
+SET tm_6xx_transactions = $opensips_tm_6xx_transactions
+END
+BEGIN opensips.transactions_inuse $1
+SET tm_inuse_transactions = $opensips_tm_inuse_transactions
+END
+BEGIN opensips.sl_replies $1
+SET sl_1xx_replies = $opensips_sl_1xx_replies
+SET sl_2xx_replies = $opensips_sl_2xx_replies
+SET sl_3xx_replies = $opensips_sl_3xx_replies
+SET sl_4xx_replies = $opensips_sl_4xx_replies
+SET sl_5xx_replies = $opensips_sl_5xx_replies
+SET sl_6xx_replies = $opensips_sl_6xx_replies
+SET sl_sent_replies = $opensips_sl_sent_replies
+SET sl_sent_err_replies = $opensips_sl_sent_err_replies
+SET sl_received_ACKs = $opensips_sl_received_ACKs
+END
+BEGIN opensips.dialogs $1
+SET dialog_processed_dialogs = $opensips_dialog_processed_dialogs
+SET dialog_expired_dialogs = $opensips_dialog_expired_dialogs
+SET dialog_failed_dialogs = $opensips_dialog_failed_dialogs
+END
+BEGIN opensips.net_waiting $1
+SET net_waiting_udp = $opensips_net_waiting_udp
+SET net_waiting_tcp = $opensips_net_waiting_tcp
+END
+BEGIN opensips.uri_checks $1
+SET uri_positive_checks = $opensips_uri_positive_checks
+SET uri_negative_checks = $opensips_uri_negative_checks
+END
+BEGIN opensips.traces $1
+SET siptrace_traced_requests = $opensips_siptrace_traced_requests
+SET siptrace_traced_replies = $opensips_siptrace_traced_replies
+END
+BEGIN opensips.shmem $1
+SET shmem_total_size = $opensips_shmem_total_size
+SET shmem_used_size = $opensips_shmem_used_size
+SET shmem_real_used_size = $opensips_shmem_real_used_size
+SET shmem_max_used_size = $opensips_shmem_max_used_size
+SET shmem_free_size = $opensips_shmem_free_size
+END
+BEGIN opensips.shmem_fragments $1
+SET shmem_fragments = $opensips_shmem_fragments
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/opensips/opensips.conf b/collectors/charts.d.plugin/opensips/opensips.conf
new file mode 100644
index 0000000..e25111d
--- /dev/null
+++ b/collectors/charts.d.plugin/opensips/opensips.conf
@@ -0,0 +1,21 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+#opensips_opts="fifo get_statistics all"
+#opensips_cmd=
+#opensips_timeout=2
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#opensips_update_every=5
+
+# the charts priority on the dashboard
+#opensips_priority=80000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#opensips_retries=10
diff --git a/collectors/charts.d.plugin/phpfpm/Makefile.inc b/collectors/charts.d.plugin/phpfpm/Makefile.inc
new file mode 100644
index 0000000..56bff61
--- /dev/null
+++ b/collectors/charts.d.plugin/phpfpm/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += phpfpm/phpfpm.chart.sh
+dist_chartsconfig_DATA += phpfpm/phpfpm.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += phpfpm/README.md phpfpm/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/phpfpm/README.md b/collectors/charts.d.plugin/phpfpm/README.md
new file mode 100644
index 0000000..36462ba
--- /dev/null
+++ b/collectors/charts.d.plugin/phpfpm/README.md
@@ -0,0 +1,6 @@
+# phpfm
+
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/phpfpm) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fphpfpm%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh
new file mode 100644
index 0000000..b1edb23
--- /dev/null
+++ b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh
@@ -0,0 +1,169 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+# Contributed by @safeie with PR #276
+
+# first, you need open php-fpm status in php-fpm.conf
+# second, you need add status location in nginx.conf
+# you can see, https://easyengine.io/tutorials/php/fpm-status-page/
+
+declare -A phpfpm_urls=()
+declare -A phpfpm_curl_opts=()
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+phpfpm_update_every=
+phpfpm_priority=60000
+
+declare -a phpfpm_response=()
+phpfpm_pool=""
+phpfpm_start_time=""
+phpfpm_start_since=0
+phpfpm_accepted_conn=0
+phpfpm_listen_queue=0
+phpfpm_max_listen_queue=0
+phpfpm_listen_queue_len=0
+phpfpm_idle_processes=0
+phpfpm_active_processes=0
+phpfpm_total_processes=0
+phpfpm_max_active_processes=0
+phpfpm_max_children_reached=0
+phpfpm_slow_requests=0
+phpfpm_get() {
+ local opts="${1}" url="${2}"
+
+ # shellcheck disable=SC2207,2086
+ phpfpm_response=($(run curl -Ss ${opts} "${url}"))
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ] || [ "${#phpfpm_response[@]}" -eq 0 ]; then
+ return 1
+ fi
+
+ if [[ ${phpfpm_response[0]} != "pool:" || ${phpfpm_response[2]} != "process" || ${phpfpm_response[5]} != "start" || ${phpfpm_response[12]} != "accepted" || ${phpfpm_response[15]} != "listen" || ${phpfpm_response[16]} != "queue:" || ${phpfpm_response[26]} != "idle" || ${phpfpm_response[29]} != "active" || ${phpfpm_response[32]} != "total" ]]; then
+ error "invalid response from phpfpm status server: ${phpfpm_response[*]}"
+ return 1
+ fi
+
+ phpfpm_pool="${phpfpm_response[1]}"
+ phpfpm_start_time="${phpfpm_response[7]} ${phpfpm_response[8]}"
+ phpfpm_start_since="${phpfpm_response[11]}"
+ phpfpm_accepted_conn="${phpfpm_response[14]}"
+ phpfpm_listen_queue="${phpfpm_response[17]}"
+ phpfpm_max_listen_queue="${phpfpm_response[21]}"
+ phpfpm_listen_queue_len="${phpfpm_response[25]}"
+ phpfpm_idle_processes="${phpfpm_response[28]}"
+ phpfpm_active_processes="${phpfpm_response[31]}"
+ phpfpm_total_processes="${phpfpm_response[34]}"
+ phpfpm_max_active_processes="${phpfpm_response[38]}"
+ phpfpm_max_children_reached="${phpfpm_response[42]}"
+ if [ "${phpfpm_response[43]}" == "slow" ]; then
+ phpfpm_slow_requests="${phpfpm_response[45]}"
+ else
+ phpfpm_slow_requests="-1"
+ fi
+
+ if [[ -z ${phpfpm_pool} || -z ${phpfpm_start_time} || -z ${phpfpm_start_since} || -z ${phpfpm_accepted_conn} || -z ${phpfpm_listen_queue} || -z ${phpfpm_max_listen_queue} || -z ${phpfpm_listen_queue_len} || -z ${phpfpm_idle_processes} || -z ${phpfpm_active_processes} || -z ${phpfpm_total_processes} || -z ${phpfpm_max_active_processes} || -z ${phpfpm_max_children_reached} ]]; then
+ error "empty values got from phpfpm status server: ${phpfpm_response[*]}"
+ return 1
+ fi
+
+ return 0
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+phpfpm_check() {
+ if [ ${#phpfpm_urls[@]} -eq 0 ]; then
+ phpfpm_urls[local]="http://localhost/status"
+ fi
+
+ local m
+ for m in "${!phpfpm_urls[@]}"; do
+ phpfpm_get "${phpfpm_curl_opts[$m]}" "${phpfpm_urls[$m]}"
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]; then
+ # shellcheck disable=SC2154
+ error "cannot find status on URL '${phpfpm_urls[$m]}'. Please set phpfpm_urls[$m]='http://localhost/status' in $confd/phpfpm.conf"
+ unset "phpfpm_urls[$m]"
+ continue
+ fi
+ done
+
+ if [ ${#phpfpm_urls[@]} -eq 0 ]; then
+ error "no phpfpm servers found. Please set phpfpm_urls[name]='url' to whatever needed to get status to the phpfpm server, in $confd/phpfpm.conf"
+ return 1
+ fi
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ return 0
+}
+
+# _create is called once, to create the charts
+phpfpm_create() {
+ local m
+ for m in "${!phpfpm_urls[@]}"; do
+ cat <<EOF
+CHART phpfpm_$m.connections '' "PHP-FPM Active Connections" "connections" phpfpm phpfpm.connections line $((phpfpm_priority + 1)) $phpfpm_update_every
+DIMENSION active '' absolute 1 1
+DIMENSION maxActive 'max active' absolute 1 1
+DIMENSION idle '' absolute 1 1
+
+CHART phpfpm_$m.requests '' "PHP-FPM Requests" "requests/s" phpfpm phpfpm.requests line $((phpfpm_priority + 2)) $phpfpm_update_every
+DIMENSION requests '' incremental 1 1
+
+CHART phpfpm_$m.performance '' "PHP-FPM Performance" "status" phpfpm phpfpm.performance line $((phpfpm_priority + 3)) $phpfpm_update_every
+DIMENSION reached 'max children reached' absolute 1 1
+EOF
+ if [ $((phpfpm_slow_requests)) -ne -1 ]; then
+ echo "DIMENSION slow 'slow requests' absolute 1 1"
+ fi
+ done
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+phpfpm_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ local m
+ for m in "${!phpfpm_urls[@]}"; do
+ phpfpm_get "${phpfpm_curl_opts[$m]}" "${phpfpm_urls[$m]}"
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]; then
+ continue
+ fi
+
+ # write the result of the work.
+ cat <<EOF
+BEGIN phpfpm_$m.connections $1
+SET active = $((phpfpm_active_processes))
+SET maxActive = $((phpfpm_max_active_processes))
+SET idle = $((phpfpm_idle_processes))
+END
+BEGIN phpfpm_$m.requests $1
+SET requests = $((phpfpm_accepted_conn))
+END
+BEGIN phpfpm_$m.performance $1
+SET reached = $((phpfpm_max_children_reached))
+EOF
+ if [ $((phpfpm_slow_requests)) -ne -1 ]; then
+ echo "SET slow = $((phpfpm_slow_requests))"
+ fi
+ echo "END"
+ done
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/phpfpm/phpfpm.conf b/collectors/charts.d.plugin/phpfpm/phpfpm.conf
new file mode 100644
index 0000000..e4dd023
--- /dev/null
+++ b/collectors/charts.d.plugin/phpfpm/phpfpm.conf
@@ -0,0 +1,27 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+# first, you need open php-fpm status in php-fpm.conf
+# second, you need add status location in nginx.conf
+# you can see, https://easyengine.io/tutorials/php/fpm-status-page/
+#phpfpm_urls[name]=""
+#phpfpm_curl_opts[name]=""
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#phpfpm_update_every=
+
+# the charts priority on the dashboard
+#phpfpm_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#phpfpm_retries=10
+
diff --git a/collectors/charts.d.plugin/postfix/Makefile.inc b/collectors/charts.d.plugin/postfix/Makefile.inc
new file mode 100644
index 0000000..6e14835
--- /dev/null
+++ b/collectors/charts.d.plugin/postfix/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += postfix/postfix.chart.sh
+dist_chartsconfig_DATA += postfix/postfix.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += postfix/README.md postfix/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/postfix/README.md b/collectors/charts.d.plugin/postfix/README.md
new file mode 100644
index 0000000..e0dc633
--- /dev/null
+++ b/collectors/charts.d.plugin/postfix/README.md
@@ -0,0 +1,28 @@
+# postfix
+
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/postfix) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+The plugin will collect the postfix queue size.
+
+It will create two charts:
+
+1. **queue size in emails**
+2. **queue size in KB**
+
+### configuration
+
+This is the internal default for `/etc/netdata/postfix.conf`
+
+```sh
+# the postqueue command
+# if empty, it will use the one found in the system path
+postfix_postqueue=
+
+# how frequently to collect queue size
+postfix_update_every=15
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fpostfix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/postfix/postfix.chart.sh b/collectors/charts.d.plugin/postfix/postfix.chart.sh
new file mode 100644
index 0000000..ff59db9
--- /dev/null
+++ b/collectors/charts.d.plugin/postfix/postfix.chart.sh
@@ -0,0 +1,87 @@
+# shellcheck shell=bash disable=SC1117
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# the postqueue command
+# if empty, it will use the one found in the system path
+postfix_postqueue=
+
+# how frequently to collect queue size
+postfix_update_every=15
+
+postfix_priority=60000
+
+postfix_check() {
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ # try to find the postqueue executable
+ if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ]; then
+ # shellcheck disable=SC2230
+ postfix_postqueue="$(which postqueue 2>/dev/null || command -v postqueue 2>/dev/null)"
+ fi
+
+ if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ]; then
+ # shellcheck disable=SC2154
+ error "cannot find postqueue. Please set 'postfix_postqueue=/path/to/postqueue' in $confd/postfix.conf"
+ return 1
+ fi
+
+ return 0
+}
+
+postfix_create() {
+ cat <<EOF
+CHART postfix_local.qemails '' "Postfix Queue Emails" "emails" queue postfix.queued.emails line $((postfix_priority + 1)) $postfix_update_every
+DIMENSION emails '' absolute 1 1
+CHART postfix_local.qsize '' "Postfix Queue Emails Size" "emails size in KB" queue postfix.queued.size area $((postfix_priority + 2)) $postfix_update_every
+DIMENSION size '' absolute 1 1
+EOF
+
+ return 0
+}
+
+postfix_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ # 1. execute postqueue -p
+ # 2. get the line that begins with --
+ # 3. match the 2 numbers on the line and output 2 lines like these:
+ # local postfix_q_size=NUMBER
+ # local postfix_q_emails=NUMBER
+ # 4. then execute this a script with the eval
+ #
+ # be very carefull with eval:
+ # prepare the script and always egrep at the end the lines that are usefull, so that
+ # even if something goes wrong, no other code can be executed
+ postfix_q_emails=0
+ postfix_q_size=0
+
+ eval "$(run "$postfix_postqueue" -p |
+ grep "^--" |
+ sed -e "s/-- \([0-9]\+\) Kbytes in \([0-9]\+\) Requests.$/local postfix_q_size=\1\nlocal postfix_q_emails=\2/g" |
+ grep -E "^local postfix_q_(emails|size)=[0-9]+$")"
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN postfix_local.qemails $1
+SET emails = $postfix_q_emails
+END
+BEGIN postfix_local.qsize $1
+SET size = $postfix_q_size
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/postfix/postfix.conf b/collectors/charts.d.plugin/postfix/postfix.conf
new file mode 100644
index 0000000..b77817b
--- /dev/null
+++ b/collectors/charts.d.plugin/postfix/postfix.conf
@@ -0,0 +1,25 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+# the postqueue command
+# if empty, it will use the one found in the system path
+#postfix_postqueue=
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#postfix_update_every=15
+
+# the charts priority on the dashboard
+#postfix_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#postfix_retries=10
+
diff --git a/collectors/charts.d.plugin/sensors/Makefile.inc b/collectors/charts.d.plugin/sensors/Makefile.inc
new file mode 100644
index 0000000..f466a1b
--- /dev/null
+++ b/collectors/charts.d.plugin/sensors/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += sensors/sensors.chart.sh
+dist_chartsconfig_DATA += sensors/sensors.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += sensors/README.md sensors/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md
new file mode 100644
index 0000000..4f3e46d
--- /dev/null
+++ b/collectors/charts.d.plugin/sensors/README.md
@@ -0,0 +1,55 @@
+# sensors
+
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/sensors) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+> Unlike the python one, this module can collect temperature on RPi.
+
+
+The plugin will provide charts for all configured system sensors
+
+> This plugin is reading sensors directly from the kernel.
+> The `lm-sensors` package is able to perform calculations on the
+> kernel provided values, this plugin will not perform.
+> So, the values graphed, are the raw hardware values of the sensors.
+
+The plugin will create netdata charts for:
+
+1. **Temperature**
+2. **Voltage**
+3. **Current**
+4. **Power**
+5. **Fans Speed**
+6. **Energy**
+7. **Humidity**
+
+One chart for every sensor chip found and each of the above will be created.
+
+### configuration
+
+This is the internal default for `/etc/netdata/sensors.conf`
+
+```sh
+# the directory the kernel keeps sensor data
+sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices"
+
+# how deep in the tree to check for sensor data
+sensors_sys_depth=10
+
+# if set to 1, the script will overwrite internal
+# script functions with code generated ones
+# leave to 1, is faster
+sensors_source_update=1
+
+# how frequently to collect sensor data
+# the default is to collect it at every iteration of charts.d
+sensors_update_every=
+
+# array of sensors which are excluded
+# the default is to include all
+sensors_excluded=()
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fsensors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/sensors/sensors.chart.sh b/collectors/charts.d.plugin/sensors/sensors.chart.sh
new file mode 100644
index 0000000..b921877
--- /dev/null
+++ b/collectors/charts.d.plugin/sensors/sensors.chart.sh
@@ -0,0 +1,250 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+# sensors docs
+# https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface
+
+# if this chart is called X.chart.sh, then all functions and global variables
+# must start with X_
+
+# the directory the kernel keeps sensor data
+sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices"
+
+# how deep in the tree to check for sensor data
+sensors_sys_depth=10
+
+# if set to 1, the script will overwrite internal
+# script functions with code generated ones
+# leave to 1, is faster
+sensors_source_update=1
+
+# how frequently to collect sensor data
+# the default is to collect it at every iteration of charts.d
+sensors_update_every=
+
+sensors_priority=90000
+
+declare -A sensors_excluded=()
+
+sensors_find_all_files() {
+ find "$1" -maxdepth $sensors_sys_depth -name \*_input -o -name temp 2>/dev/null
+}
+
+sensors_find_all_dirs() {
+ # shellcheck disable=SC2162
+ sensors_find_all_files "$1" | while read; do
+ dirname "$REPLY"
+ done | sort -u
+}
+
+# _check is called once, to find out if this chart should be enabled or not
+sensors_check() {
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ [ -z "$(sensors_find_all_files "$sensors_sys_dir")" ] && error "no sensors found in '$sensors_sys_dir'." && return 1
+ return 0
+}
+
+sensors_check_files() {
+ # we only need sensors that report a non-zero value
+ # also remove not needed sensors
+
+ local f v excluded
+ for f in "$@"; do
+ [ ! -f "$f" ] && continue
+ for ex in "${sensors_excluded[@]}"; do
+ [[ $f =~ .*$ex$ ]] && excluded='1' && break
+ done
+
+ [ "$excluded" != "1" ] && v="$(cat "$f")" || v=0
+ v=$((v + 1 - 1))
+ [ $v -ne 0 ] && echo "$f" && continue
+ excluded=
+
+ error "$f gives zero values"
+ done
+}
+
+sensors_check_temp_type() {
+ # valid temp types are 1 to 6
+ # disabled sensors have the value 0
+
+ local f t v
+ for f in "$@"; do
+ # shellcheck disable=SC2001
+ t=$(echo "$f" | sed "s|_input$|_type|g")
+ [ "$f" = "$t" ] && echo "$f" && continue
+ [ ! -f "$t" ] && echo "$f" && continue
+
+ v="$(cat "$t")"
+ v=$((v + 1 - 1))
+ [ $v -ne 0 ] && echo "$f" && continue
+
+ error "$f is disabled"
+ done
+}
+
+# _create is called once, to create the charts
+sensors_create() {
+ local path dir name x file lfile labelname device subsystem id type mode files multiplier divisor
+
+ # we create a script with the source of the
+ # sensors_update() function
+ # - the highest speed we can achieve -
+ [ $sensors_source_update -eq 1 ] && echo >"$TMP_DIR/sensors.sh" "sensors_update() {"
+
+ for path in $(sensors_find_all_dirs "$sensors_sys_dir" | sort -u); do
+ dir=$(basename "$path")
+ device=
+ subsystem=
+ id=
+ type=
+ name=
+
+ [ -h "$path/device" ] && device=$(readlink -f "$path/device")
+ [ ! -z "$device" ] && device=$(basename "$device")
+ [ -z "$device" ] && device="$dir"
+
+ [ -h "$path/subsystem" ] && subsystem=$(readlink -f "$path/subsystem")
+ [ ! -z "$subsystem" ] && subsystem=$(basename "$subsystem")
+ [ -z "$subsystem" ] && subsystem="$dir"
+
+ [ -f "$path/name" ] && name=$(cat "$path/name")
+ [ -z "$name" ] && name="$dir"
+
+ [ -f "$path/type" ] && type=$(cat "$path/type")
+ [ -z "$type" ] && type="$dir"
+
+ id="$(fixid "$device.$subsystem.$dir")"
+
+ debug "path='$path', dir='$dir', device='$device', subsystem='$subsystem', id='$id', name='$name'"
+
+ for mode in temperature voltage fans power current energy humidity; do
+ files=
+ multiplier=1
+ divisor=1
+ algorithm="absolute"
+
+ case $mode in
+ temperature)
+ files="$(
+ ls "$path"/temp*_input 2>/dev/null
+ ls "$path/temp" 2>/dev/null
+ )"
+ files="$(sensors_check_files "$files")"
+ files="$(sensors_check_temp_type "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.temp_$id '' '$name Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.temp_$id \$1\""
+ divisor=1000
+ ;;
+
+ voltage)
+ files="$(ls "$path"/in*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.volt_$id '' '$name Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.volt_$id \$1\""
+ divisor=1000
+ ;;
+
+ current)
+ files="$(ls "$path"/curr*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.curr_$id '' '$name Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.curr_$id \$1\""
+ divisor=1000
+ ;;
+
+ power)
+ files="$(ls "$path"/power*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.power_$id '' '$name Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.power_$id \$1\""
+ divisor=1000000
+ ;;
+
+ fans)
+ files="$(ls "$path"/fan*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.fan_$id '' '$name Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.fan_$id \$1\""
+ ;;
+
+ energy)
+ files="$(ls "$path"/energy*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.energy_$id '' '$name Energy' 'Joule' 'energy' 'sensors.energy' areastack $((sensors_priority + 6)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.energy_$id \$1\""
+ algorithm="incremental"
+ divisor=1000000
+ ;;
+
+ humidity)
+ files="$(ls "$path"/humidity*_input 2>/dev/null)"
+ files="$(sensors_check_files "$files")"
+ [ -z "$files" ] && continue
+ echo "CHART sensors.humidity_$id '' '$name Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.humidity_$id \$1\""
+ divisor=1000
+ ;;
+
+ *)
+ continue
+ ;;
+ esac
+
+ for x in $files; do
+ file="$x"
+ fid="$(fixid "$file")"
+ lfile="$(basename "$file" | sed "s|_input$|_label|g")"
+ labelname="$(basename "$file" | sed "s|_input$||g")"
+
+ if [ ! "$path/$lfile" = "$file" ] && [ -f "$path/$lfile" ]; then
+ labelname="$(cat "$path/$lfile")"
+ fi
+
+ echo "DIMENSION $fid '$labelname' $algorithm $multiplier $divisor"
+ echo >>"$TMP_DIR/sensors.sh" "echo \"SET $fid = \"\$(< $file )"
+ done
+
+ echo >>"$TMP_DIR/sensors.sh" "echo END"
+ done
+ done
+
+ [ $sensors_source_update -eq 1 ] && echo >>"$TMP_DIR/sensors.sh" "}"
+
+ # ok, load the function sensors_update() we created
+ # shellcheck source=/dev/null
+ [ $sensors_source_update -eq 1 ] && . "$TMP_DIR/sensors.sh"
+
+ return 0
+}
+
+# _update is called continuously, to collect the values
+sensors_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ # shellcheck source=/dev/null
+ [ $sensors_source_update -eq 0 ] && . "$TMP_DIR/sensors.sh" "$1"
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/sensors/sensors.conf b/collectors/charts.d.plugin/sensors/sensors.conf
new file mode 100644
index 0000000..bcb2880
--- /dev/null
+++ b/collectors/charts.d.plugin/sensors/sensors.conf
@@ -0,0 +1,32 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+# the directory the kernel keeps sensor data
+#sensors_sys_dir="/sys/devices"
+
+# how deep in the tree to check for sensor data
+#sensors_sys_depth=10
+
+# if set to 1, the script will overwrite internal
+# script functions with code generated ones
+# leave to 1, is faster
+#sensors_source_update=1
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#sensors_update_every=
+
+# the charts priority on the dashboard
+#sensors_priority=90000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#sensors_retries=10
+
diff --git a/collectors/charts.d.plugin/squid/Makefile.inc b/collectors/charts.d.plugin/squid/Makefile.inc
new file mode 100644
index 0000000..ad470d8
--- /dev/null
+++ b/collectors/charts.d.plugin/squid/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += squid/squid.chart.sh
+dist_chartsconfig_DATA += squid/squid.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += squid/README.md squid/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/squid/README.md b/collectors/charts.d.plugin/squid/README.md
new file mode 100644
index 0000000..cfb6179
--- /dev/null
+++ b/collectors/charts.d.plugin/squid/README.md
@@ -0,0 +1,67 @@
+# squid
+
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/squid) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+The plugin will monitor a squid server.
+
+It will produce 4 charts:
+
+1. **Squid Client Bandwidth** in kbps
+
+ * in
+ * out
+ * hits
+
+2. **Squid Client Requests** in requests/sec
+
+ * requests
+ * hits
+ * errors
+
+3. **Squid Server Bandwidth** in kbps
+
+ * in
+ * out
+
+4. **Squid Server Requests** in requests/sec
+
+ * requests
+ * errors
+
+### autoconfig
+
+The plugin will by itself detect squid servers running on
+localhost, on ports 3128 or 8080.
+
+It will attempt to download URLs in the form:
+
+- `cache_object://HOST:PORT/counters`
+- `/squid-internal-mgr/counters`
+
+If any succeeds, it will use this.
+
+### configuration
+
+If you need to configure it by hand, create the file
+`/etc/netdata/squid.conf` with the following variables:
+
+- `squid_host=IP` the IP of the squid host
+- `squid_port=PORT` the port the squid is listening
+- `squid_url="URL"` the URL with the statistics to be fetched from squid
+- `squid_timeout=SECONDS` how much time we should wait for squid to respond
+- `squid_update_every=SECONDS` the frequency of the data collection
+
+Example `/etc/netdata/squid.conf`:
+
+```sh
+squid_host=127.0.0.1
+squid_port=3128
+squid_url="cache_object://127.0.0.1:3128/counters"
+squid_timeout=2
+squid_update_every=5
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fsquid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/squid/squid.chart.sh b/collectors/charts.d.plugin/squid/squid.chart.sh
new file mode 100644
index 0000000..ebddb32
--- /dev/null
+++ b/collectors/charts.d.plugin/squid/squid.chart.sh
@@ -0,0 +1,141 @@
+# shellcheck shell=bash disable=SC2154
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+
+squid_host=
+squid_port=
+squid_url=
+squid_update_every=2
+squid_priority=60000
+
+squid_get_stats_internal() {
+ local host="$1" port="$2" url="$3"
+ run squidclient -h "$host" -p "$port" "$url"
+}
+
+squid_get_stats() {
+ squid_get_stats_internal "$squid_host" "$squid_port" "$squid_url"
+}
+
+squid_autodetect() {
+ local host="127.0.0.1" port url x
+
+ for port in 3128 8080; do
+ for url in "cache_object://$host:$port/counters" "/squid-internal-mgr/counters"; do
+ x=$(squid_get_stats_internal "$host" "$port" "$url" | grep client_http.requests)
+ if [ ! -z "$x" ]; then
+ squid_host="$host"
+ squid_port="$port"
+ squid_url="$url"
+ debug "found squid at '$host:$port' with url '$url'"
+ return 0
+ fi
+ done
+ done
+
+ error "cannot find squid running in localhost. Please set squid_url='url' and squid_host='IP' and squid_port='PORT' in $confd/squid.conf"
+ return 1
+}
+
+squid_check() {
+ require_cmd squidclient || return 1
+ require_cmd sed || return 1
+ require_cmd egrep || return 1
+
+ if [ -z "$squid_host" ] || [ -z "$squid_port" ] || [ -z "$squid_url" ]; then
+ squid_autodetect || return 1
+ fi
+
+ # check once if the url works
+ local x
+ x="$(squid_get_stats | grep client_http.requests)"
+ # shellcheck disable=SC2181
+ if [ ! $? -eq 0 ] || [ -z "$x" ]; then
+ error "cannot fetch URL '$squid_url' by connecting to $squid_host:$squid_port. Please set squid_url='url' and squid_host='host' and squid_port='port' in $confd/squid.conf"
+ return 1
+ fi
+
+ return 0
+}
+
+squid_create() {
+ # create the charts
+ cat <<EOF
+CHART squid_local.clients_net '' "Squid Client Bandwidth" "kilobits / sec" clients squid.clients.net area $((squid_priority + 1)) $squid_update_every
+DIMENSION client_http_kbytes_in in incremental 8 1
+DIMENSION client_http_kbytes_out out incremental -8 1
+DIMENSION client_http_hit_kbytes_out hits incremental -8 1
+
+CHART squid_local.clients_requests '' "Squid Client Requests" "requests / sec" clients squid.clients.requests line $((squid_priority + 3)) $squid_update_every
+DIMENSION client_http_requests requests incremental 1 1
+DIMENSION client_http_hits hits incremental 1 1
+DIMENSION client_http_errors errors incremental -1 1
+
+CHART squid_local.servers_net '' "Squid Server Bandwidth" "kilobits / sec" servers squid.servers.net area $((squid_priority + 2)) $squid_update_every
+DIMENSION server_all_kbytes_in in incremental 8 1
+DIMENSION server_all_kbytes_out out incremental -8 1
+
+CHART squid_local.servers_requests '' "Squid Server Requests" "requests / sec" servers squid.servers.requests line $((squid_priority + 4)) $squid_update_every
+DIMENSION server_all_requests requests incremental 1 1
+DIMENSION server_all_errors errors incremental -1 1
+EOF
+
+ return 0
+}
+
+squid_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ # 1. get the counters page from squid
+ # 2. sed to remove spaces; replace . with _; remove spaces around =; prepend each line with: local squid_
+ # 3. egrep lines starting with:
+ # local squid_client_http_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
+ # local squid_server_all_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
+ # 4. then execute this as a script with the eval
+ #
+ # be very carefull with eval:
+ # prepare the script and always grep at the end the lines that are usefull, so that
+ # even if something goes wrong, no other code can be executed
+
+ # shellcheck disable=SC1117
+ eval "$(squid_get_stats |
+ sed -e "s/ \+/ /g" -e "s/\./_/g" -e "s/^\([a-z0-9_]\+\) *= *\([0-9]\+\)$/local squid_\1=\2/g" |
+ grep -E "^local squid_(client_http|server_all)_[a-z0-9_]+=[0-9]+$")"
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN squid_local.clients_net $1
+SET client_http_kbytes_in = $squid_client_http_kbytes_in
+SET client_http_kbytes_out = $squid_client_http_kbytes_out
+SET client_http_hit_kbytes_out = $squid_client_http_hit_kbytes_out
+END
+
+BEGIN squid_local.clients_requests $1
+SET client_http_requests = $squid_client_http_requests
+SET client_http_hits = $squid_client_http_hits
+SET client_http_errors = $squid_client_http_errors
+END
+
+BEGIN squid_local.servers_net $1
+SET server_all_kbytes_in = $squid_server_all_kbytes_in
+SET server_all_kbytes_out = $squid_server_all_kbytes_out
+END
+
+BEGIN squid_local.servers_requests $1
+SET server_all_requests = $squid_server_all_requests
+SET server_all_errors = $squid_server_all_errors
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/squid/squid.conf b/collectors/charts.d.plugin/squid/squid.conf
new file mode 100644
index 0000000..19e928f
--- /dev/null
+++ b/collectors/charts.d.plugin/squid/squid.conf
@@ -0,0 +1,26 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+#squid_host=
+#squid_port=
+#squid_url=
+#squid_timeout=2
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#squid_update_every=2
+
+# the charts priority on the dashboard
+#squid_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#squid_retries=10
+
diff --git a/collectors/charts.d.plugin/tomcat/Makefile.inc b/collectors/charts.d.plugin/tomcat/Makefile.inc
new file mode 100644
index 0000000..ef05b19
--- /dev/null
+++ b/collectors/charts.d.plugin/tomcat/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_charts_DATA += tomcat/tomcat.chart.sh
+dist_chartsconfig_DATA += tomcat/tomcat.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += tomcat/README.md tomcat/Makefile.inc
+
diff --git a/collectors/charts.d.plugin/tomcat/README.md b/collectors/charts.d.plugin/tomcat/README.md
new file mode 100644
index 0000000..8433786
--- /dev/null
+++ b/collectors/charts.d.plugin/tomcat/README.md
@@ -0,0 +1,6 @@
+# tomcat
+
+> THIS MODULE IS OBSOLETE.
+> USE [THE PYTHON ONE](../../python.d.plugin/tomcat) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Ftomcat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/charts.d.plugin/tomcat/tomcat.chart.sh b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh
new file mode 100644
index 0000000..9ca75e6
--- /dev/null
+++ b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh
@@ -0,0 +1,152 @@
+# shellcheck shell=bash
+# no need for shebang - this file is loaded from charts.d.plugin
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
+#
+# Contributed by @jgeromero with PR #277
+
+# Description: Tomcat netdata charts.d plugin
+# Author: Jorge Romero
+
+# the URL to download tomcat status info
+# usually http://localhost:8080/manager/status?XML=true
+tomcat_url=""
+tomcat_curl_opts=""
+
+# set tomcat username/password here
+tomcat_user=""
+tomcat_password=""
+
+# _update_every is a special variable - it holds the number of seconds
+# between the calls of the _update() function
+tomcat_update_every=
+
+tomcat_priority=60000
+
+# convert tomcat floating point values
+# to integer using this multiplier
+# this only affects precision - the values
+# will be in the proper units
+tomcat_decimal_detail=1000000
+
+# used by volume chart to convert bytes to kB
+tomcat_decimal_kB_detail=1000
+
+tomcat_check() {
+
+ require_cmd xmlstarlet || return 1
+
+ # check if url, username, passwords are set
+ if [ -z "${tomcat_url}" ]; then
+ error "tomcat url is unset or set to the empty string"
+ return 1
+ fi
+ if [ -z "${tomcat_user}" ]; then
+ # check backwards compatibility
+ # shellcheck disable=SC2154
+ if [ -z "${tomcatUser}" ]; then
+ error "tomcat user is unset or set to the empty string"
+ return 1
+ else
+ tomcat_user="${tomcatUser}"
+ fi
+ fi
+ if [ -z "${tomcat_password}" ]; then
+ # check backwards compatibility
+ # shellcheck disable=SC2154
+ if [ -z "${tomcatPassword}" ]; then
+ error "tomcat password is unset or set to the empty string"
+ return 1
+ else
+ tomcat_password="${tomcatPassword}"
+ fi
+ fi
+
+ # check if we can get to tomcat's status page
+ tomcat_get
+ # shellcheck disable=2181
+ if [ $? -ne 0 ]; then
+ error "cannot get to status page on URL '${tomcat_url}'. Please make sure tomcat url, username and password are correct."
+ return 1
+ fi
+
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ return 0
+}
+
+tomcat_get() {
+ # collect tomcat values
+ tomcat_port="$(
+ IFS=/ read -ra a <<<"$tomcat_url"
+ hostport=${a[2]}
+ echo "${hostport#*:}"
+ )"
+ mapfile -t lines < <(run curl -u "$tomcat_user":"$tomcat_password" -Ss ${tomcat_curl_opts} "$tomcat_url" |
+ run xmlstarlet sel \
+ -t -m "/status/jvm/memory" -v @free \
+ -n -m "/status/connector[@name='\"http-bio-$tomcat_port\"']/threadInfo" -v @currentThreadCount \
+ -n -v @currentThreadsBusy \
+ -n -m "/status/connector[@name='\"http-bio-$tomcat_port\"']/requestInfo" -v @requestCount \
+ -n -v @bytesSent -n -)
+
+ tomcat_jvm_freememory="${lines[0]}"
+ tomcat_threads="${lines[1]}"
+ tomcat_threads_busy="${lines[2]}"
+ tomcat_accesses="${lines[3]}"
+ tomcat_volume="${lines[4]}"
+
+ return 0
+}
+
+# _create is called once, to create the charts
+tomcat_create() {
+ cat <<EOF
+CHART tomcat.accesses '' "tomcat requests" "requests/s" statistics tomcat.accesses area $((tomcat_priority + 8)) $tomcat_update_every
+DIMENSION accesses '' incremental
+CHART tomcat.volume '' "tomcat volume" "kB/s" volume tomcat.volume area $((tomcat_priority + 5)) $tomcat_update_every
+DIMENSION volume '' incremental divisor ${tomcat_decimal_kB_detail}
+CHART tomcat.threads '' "tomcat threads" "current threads" statistics tomcat.threads line $((tomcat_priority + 6)) $tomcat_update_every
+DIMENSION current '' absolute 1
+DIMENSION busy '' absolute 1
+CHART tomcat.jvm '' "JVM Free Memory" "MB" statistics tomcat.jvm area $((tomcat_priority + 8)) $tomcat_update_every
+DIMENSION jvm '' absolute 1 ${tomcat_decimal_detail}
+EOF
+ return 0
+}
+
+# _update is called continuously, to collect the values
+tomcat_update() {
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ tomcat_get || return 1
+
+ # write the result of the work.
+ cat <<VALUESEOF
+BEGIN tomcat.accesses $1
+SET accesses = $((tomcat_accesses))
+END
+BEGIN tomcat.volume $1
+SET volume = $((tomcat_volume))
+END
+BEGIN tomcat.threads $1
+SET current = $((tomcat_threads))
+SET busy = $((tomcat_threads_busy))
+END
+BEGIN tomcat.jvm $1
+SET jvm = $((tomcat_jvm_freememory))
+END
+VALUESEOF
+
+ return 0
+}
diff --git a/collectors/charts.d.plugin/tomcat/tomcat.conf b/collectors/charts.d.plugin/tomcat/tomcat.conf
new file mode 100644
index 0000000..e9f3eef
--- /dev/null
+++ b/collectors/charts.d.plugin/tomcat/tomcat.conf
@@ -0,0 +1,38 @@
+# no need for shebang - this file is loaded from charts.d.plugin
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
+# GPL v3+
+
+# THIS PLUGIN IS DEPRECATED
+# USE THE PYTHON.D ONE
+
+# the URL to download tomcat status info
+# usually http://localhost:8080/manager/status?XML=true
+#tomcat_url=""
+#tomcat_curl_opts=""
+
+# set tomcat username/password here
+#tomcat_user=""
+#tomcat_password=""
+
+# the data collection frequency
+# if unset, will inherit the netdata update frequency
+#tomcat_update_every=1
+
+# the charts priority on the dashboard
+#tomcat_priority=60000
+
+# the number of retries to do in case of failure
+# before disabling the module
+#tomcat_retries=10
+
+# convert tomcat floating point values
+# to integer using this multiplier
+# this only affects precision - the values
+# will be in the proper units
+#tomcat_decimal_detail=1000000
+
+# used by volume chart to convert bytes to KB
+#tomcat_decimal_KB_detail=1000