summaryrefslogtreecommitdiffstats
path: root/plugins.d
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--[-rwxr-xr-x]collectors/cgroups.plugin/cgroup-name.sh (renamed from plugins.d/cgroup-name.sh)35
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-network-helper.sh (renamed from plugins.d/cgroup-network-helper.sh)29
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.dryrun-helper.sh (renamed from plugins.d/charts.d.dryrun-helper.sh)9
-rw-r--r--[-rwxr-xr-x]collectors/charts.d.plugin/charts.d.plugin (renamed from plugins.d/charts.d.plugin)86
-rw-r--r--collectors/charts.d.plugin/loopsleepms.sh.inc (renamed from plugins.d/loopsleepms.sh.inc)54
-rw-r--r--collectors/fping.plugin/Makefile.in (renamed from plugins.d/Makefile.in)120
-rw-r--r--[-rwxr-xr-x]collectors/fping.plugin/fping.plugin (renamed from plugins.d/fping.plugin)34
-rw-r--r--[-rwxr-xr-x]collectors/node.d.plugin/node.d.plugin (renamed from plugins.d/node.d.plugin)97
-rw-r--r--[-rwxr-xr-x]collectors/python.d.plugin/python.d.plugin (renamed from plugins.d/python.d.plugin)121
-rw-r--r--[-rwxr-xr-x]collectors/tc.plugin/tc-qos-helper.sh (renamed from plugins.d/tc-qos-helper.sh)110
-rwxr-xr-xhealth/notifications/alarm-email.sh (renamed from plugins.d/alarm-email.sh)1
-rw-r--r--[-rwxr-xr-x]health/notifications/alarm-notify.sh (renamed from plugins.d/alarm-notify.sh)521
-rwxr-xr-xhealth/notifications/alarm-test.sh (renamed from plugins.d/alarm-test.sh)4
-rw-r--r--plugins.d/Makefile.am23
-rw-r--r--plugins.d/README.md236
15 files changed, 940 insertions, 540 deletions
diff --git a/plugins.d/cgroup-name.sh b/collectors/cgroups.plugin/cgroup-name.sh
index 3c8ad7205..6bf8b8b03 100755..100644
--- a/plugins.d/cgroup-name.sh
+++ b/collectors/cgroups.plugin/cgroup-name.sh
@@ -3,7 +3,7 @@
# netdata
# real-time performance and health monitoring, done right!
# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
+# SPDX-License-Identifier: GPL-3.0-or-later
#
# Script to find a better name for cgroups
#
@@ -51,8 +51,10 @@ debug() {
# -----------------------------------------------------------------------------
-[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
-CONFIG="${NETDATA_CONFIG_DIR}/cgroups-names.conf"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d"
+
+DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
CGROUP="${1}"
NAME=
@@ -63,16 +65,21 @@ if [ -z "${CGROUP}" ]
fatal "called without a cgroup name. Nothing to do."
fi
-if [ -f "${CONFIG}" ]
+for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf"
+do
+ if [ -f "${CONFIG}" ]
then
- NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed "s/[[:space:]]\+/ /g" | cut -d ' ' -f 2)"
- if [ -z "${NAME}" ]
- then
- info "cannot find cgroup '${CGROUP}' in '${CONFIG}'."
+ NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed "s/[[:space:]]\+/ /g" | cut -d ' ' -f 2)"
+ if [ -z "${NAME}" ]
+ then
+ info "cannot find cgroup '${CGROUP}' in '${CONFIG}'."
+ else
+ break
+ fi
+ #else
+ # info "configuration file '${CONFIG}' is not available."
fi
-#else
-# info "configuration file '${CONFIG}' is not available."
-fi
+done
function docker_get_name_classic {
local id="${1}"
@@ -83,13 +90,13 @@ function docker_get_name_classic {
function docker_get_name_api {
local id="${1}"
- if [ ! -S "/var/run/docker.sock" ]
+ if [ ! -S "${DOCKER_HOST}" ]
then
- warning "Can't find /var/run/docker.sock"
+ warning "Can't find ${DOCKER_HOST}"
return 1
fi
info "Running API command: /containers/${id}/json"
- JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\r\n" | nc -U /var/run/docker.sock | grep '^{.*')
+ JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\r\n" | nc -U ${DOCKER_HOST} | grep '^{.*')
NAME=$(echo $JSON | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||')
return 0
}
diff --git a/plugins.d/cgroup-network-helper.sh b/collectors/cgroups.plugin/cgroup-network-helper.sh
index f07059986..666f02fc8 100755
--- a/plugins.d/cgroup-network-helper.sh
+++ b/collectors/cgroups.plugin/cgroup-network-helper.sh
@@ -1,10 +1,11 @@
#!/usr/bin/env bash
+# shellcheck disable=SC1117
# cgroup-network-helper.sh
# detect container and virtual machine interfaces
#
# (C) 2017 Costa Tsaousis
-# GPL v3+
+# SPDX-License-Identifier: GPL-3.0-or-later
#
# This script is called as root (by cgroup-network), with either a pid, or a cgroup path.
# It tries to find all the network interfaces that belong to the same cgroup.
@@ -23,6 +24,7 @@
# -----------------------------------------------------------------------------
# the system path is cleared by cgroup-network
+# shellcheck source=/dev/null
[ -f /etc/profile ] && source /etc/profile
export LC_ALL=C
@@ -66,7 +68,7 @@ debug() {
# -----------------------------------------------------------------------------
# check for BASH v4+ (required for associative arrays)
-[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && \
+[ $(( BASH_VERSINFO[0] )) -lt 4 ] && \
fatal "BASH version 4 or later is required (this is ${BASH_VERSION})."
# -----------------------------------------------------------------------------
@@ -86,7 +88,7 @@ do
shift
done
-if [ -z "${pid}" -a -z "${cgroup}" ]
+if [ -z "${pid}" ] && [ -z "${cgroup}" ]
then
fatal "Either --pid or --cgroup is required"
fi
@@ -103,7 +105,7 @@ set_source() {
# cgroup-network can detect veth interfaces by itself (written in C).
# If you seek for a shell version of what it does, check this:
-# https://github.com/firehol/netdata/issues/474#issuecomment-317866709
+# https://github.com/netdata/netdata/issues/474#issuecomment-317866709
# -----------------------------------------------------------------------------
@@ -115,7 +117,7 @@ proc_pid_fdinfo_iff() {
debug "Searching for tun/tap interfaces for pid ${p}..."
set_source "fdinfo"
- grep ^iff:.* "${NETDATA_HOST_PREFIX}/proc/${p}/fdinfo"/* 2>/dev/null | cut -f 2
+ grep "^iff:.*" "${NETDATA_HOST_PREFIX}/proc/${p}/fdinfo"/* 2>/dev/null | cut -f 2
}
find_tun_tap_interfaces_for_cgroup() {
@@ -128,7 +130,7 @@ find_tun_tap_interfaces_for_cgroup() {
local p
for p in $(< "${c}/emulator/cgroup.procs" )
do
- proc_pid_fdinfo_iff ${p}
+ proc_pid_fdinfo_iff "${p}"
done
fi
}
@@ -154,11 +156,14 @@ virsh_find_all_interfaces_for_cgroup() {
local c="${1}" # the cgroup path
# the virsh command
- local virsh="$(which virsh 2>/dev/null || command -v virsh 2>/dev/null)"
+ local virsh
+ # shellcheck disable=SC2230
+ virsh="$(which virsh 2>/dev/null || command -v virsh 2>/dev/null)"
if [ ! -z "${virsh}" ]
then
- local d="$(virsh_cgroup_to_domain_name "${c}")"
+ local d
+ d="$(virsh_cgroup_to_domain_name "${c}")"
if [ ! -z "${d}" ]
then
@@ -167,7 +172,7 @@ virsh_find_all_interfaces_for_cgroup() {
# match only 'network' interfaces from virsh output
set_source "virsh"
- "${virsh}" -r domiflist ${d} |\
+ "${virsh}" -r domiflist "${d}" |\
sed -n \
-e "s|^\([^[:space:]]\+\)[[:space:]]\+network[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p" \
-e "s|^\([^[:space:]]\+\)[[:space:]]\+bridge[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p"
@@ -188,7 +193,7 @@ find_all_interfaces_of_pid_or_cgroup() {
then
# we have been called with a pid
- proc_pid_fdinfo_iff ${p}
+ proc_pid_fdinfo_iff "${p}"
elif [ ! -z "${c}" ]
then
@@ -219,6 +224,7 @@ declare -A devs=()
# store all interfaces found in the associative array
# this will also give the unique devices, as seen by the host
last_src=
+# shellcheck disable=SC2162
while read host_device guest_device
do
[ -z "${host_device}" ] && continue
@@ -231,8 +237,9 @@ do
# when we run in debug, show the source
debug "Found host device '${host_device}', guest device '${guest_device}', detected via '${last_src}'"
- [ -z "${devs[${host_device}]}" -o "${devs[${host_device}]}" = "${host_device}" ] && \
+ if [ -z "${devs[${host_device}]}" ] || [ "${devs[${host_device}]}" = "${host_device}" ]; then
devs[${host_device}]="${guest_device}"
+ fi
done < <( find_all_interfaces_of_pid_or_cgroup "${pid}" "${cgroup}" )
diff --git a/plugins.d/charts.d.dryrun-helper.sh b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
index 8142f9882..67496c1bd 100755
--- a/plugins.d/charts.d.dryrun-helper.sh
+++ b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
@@ -1,4 +1,7 @@
#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# shellcheck disable=SC2181
# will stop the script for any error
set -e
@@ -10,8 +13,8 @@ conf="$3"
can_diff=1
-tmp1="`mktemp`"
-tmp2="`mktemp`"
+tmp1="$(mktemp)"
+tmp2="$(mktemp)"
myset() {
set | grep -v "^_=" | grep -v "^PIPESTATUS=" | grep -v "^BASH_LINENO="
@@ -36,6 +39,7 @@ myset >"$tmp1"
# include the plugin and its config
if [ -f "$conf" ]
then
+ # shellcheck source=/dev/null
. "$conf"
if [ $? -ne 0 ]
then
@@ -45,6 +49,7 @@ then
fi
fi
+# shellcheck source=/dev/null
. "$chart"
if [ $? -ne 0 ]
then
diff --git a/plugins.d/charts.d.plugin b/collectors/charts.d.plugin/charts.d.plugin
index 9bd03fd47..1c6e8c5c9 100755..100644
--- a/plugins.d/charts.d.plugin
+++ b/collectors/charts.d.plugin/charts.d.plugin
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
# netdata
# real-time performance and health monitoring, done right!
@@ -116,14 +117,15 @@ info "started from '$PROGRAM_FILE' with options: $*"
# netdata exposes a few environment variables for us
[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
-[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d"
pluginsd="${NETDATA_PLUGINS_DIR}"
-confd="${NETDATA_CONFIG_DIR}"
+stockconfd="${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}"
+userconfd="${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}"
+olduserconfd="${NETDATA_USER_CONFIG_DIR}"
chartsd="$pluginsd/../charts.d"
-myconfig="$confd/$PROGRAM_NAME.conf"
-
minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}"
update_every=${minimum_update_frequency} # this will be overwritten by the command line
@@ -230,22 +232,33 @@ mysleep="sleep"
# if found and included, this file overwrites loopsleepms()
# and current_time_ms() with a high resolution timer function
# for precise looping.
-. "$pluginsd/loopsleepms.sh.inc"
+source "$pluginsd/loopsleepms.sh.inc"
+[ $? -ne 0 ] && error "Failed to load '$pluginsd/loopsleepms.sh.inc'."
# -----------------------------------------------------------------------------
# load my configuration
-if [ -f "$myconfig" ]
+for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf"
+do
+ if [ -f "$myconfig" ]
then
- . "$myconfig"
- [ $? -ne 0 ] && fatal "cannot load $myconfig"
+ source "$myconfig"
+ if [ $? -ne 0 ]
+ then
+ error "Config file '$myconfig' loaded with errors."
+ else
+ info "Configuration file '$myconfig' loaded."
+ fi
+ else
+ warning "Configuration file '$myconfig' not found."
+ fi
+done
+
+# make sure time_divisor is right
+time_divisor=$((time_divisor))
+[ $time_divisor -lt 10 ] && time_divisor=10
+[ $time_divisor -gt 100 ] && time_divisor=100
- time_divisor=$((time_divisor))
- [ $time_divisor -lt 10 ] && time_divisor=10
- [ $time_divisor -gt 100 ] && time_divisor=100
-else
- info "configuration file '$myconfig' not found. Using defaults."
-fi
# we check for the timeout command, after we load our
# configuration, so that the user may overwrite the
@@ -296,7 +309,7 @@ run() {
{
printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '"
printf "%q " "${@}"
- printf "' failed:\n --- BEGIN TRACE ---\n"
+ printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n"
cat "${TMP_DIR}/run.${pid}"
printf " --- END TRACE ---\n"
} >&2
@@ -411,7 +424,7 @@ all_enabled_charts() {
if [ ! "${enabled}" = "${required}" ]
then
- info "is disabled. Add a line with $chart=$required in $myconfig to enable it (or remove the line that disables it)."
+ info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)."
else
debug "is enabled for auto-detection."
local charts="$charts $chart"
@@ -447,18 +460,18 @@ all_enabled_charts() {
fi
# check its config
- #if [ -f "$confd/$chart.conf" ]
+ #if [ -f "$userconfd/$chart.conf" ]
#then
- # if [ ! -z "$( cat "$confd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ]
+ # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ]
# then
- # error "module's $chart config $confd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it."
+ # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it."
# continue
# fi
#fi
#if [ $dryrunner -eq 1 ]
# then
- # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$confd/$chart.conf" >/dev/null
+ # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null
# if [ $? -ne 0 ]
# then
# error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it."
@@ -486,18 +499,35 @@ do
debug "loading module: '$chartsd/$chart.chart.sh'"
- . "$chartsd/$chart.chart.sh"
+ source "$chartsd/$chart.chart.sh"
+ [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors."
- if [ -f "$confd/$PROGRAM_NAME/$chart.conf" ]
+ # first load the stock config
+ if [ -f "$stockconfd/$chart.conf" ]
then
- debug "loading module configuration: '$confd/$PROGRAM_NAME/$chart.conf'"
- . "$confd/$PROGRAM_NAME/$chart.conf"
- elif [ -f "$confd/$chart.conf" ]
+ debug "loading module configuration: '$stockconfd/$chart.conf'"
+ source "$stockconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors."
+ else
+ debug "not found module configuration: '$stockconfd/$chart.conf'"
+ fi
+
+ # then load the user config (it overwrites the stock)
+ if [ -f "$userconfd/$chart.conf" ]
then
- debug "loading module configuration: '$confd/$chart.conf'"
- . "$confd/$chart.conf"
+ debug "loading module configuration: '$userconfd/$chart.conf'"
+ source "$userconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors."
else
- warning "configuration file '$confd/$PROGRAM_NAME/$chart.conf' not found. Using defaults."
+ debug "not found module configuration: '$userconfd/$chart.conf'"
+
+ if [ -f "$olduserconfd/$chart.conf" ]
+ then
+ # support for very old netdata that had the charts.d module configs in /etc/netdata
+ info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'"
+ source "$olduserconfd/$chart.conf"
+ [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors."
+ fi
fi
eval "dt=\$$chart$suffix_update_every"
diff --git a/plugins.d/loopsleepms.sh.inc b/collectors/charts.d.plugin/loopsleepms.sh.inc
index ef3db192d..bdc032b99 100644
--- a/plugins.d/loopsleepms.sh.inc
+++ b/collectors/charts.d.plugin/loopsleepms.sh.inc
@@ -1,4 +1,5 @@
# no need for shebang - this file is included from other scripts
+# SPDX-License-Identifier: GPL-3.0-or-later
LOOPSLEEP_DATE="$(which date 2>/dev/null || command -v date 2>/dev/null)"
if [ -z "$LOOPSLEEP_DATE" ]
@@ -85,11 +86,12 @@ fi
# -----------------------------------------------------------------------------
# use read with timeout for sleep
-mysleep="mysleep_read"
+mysleep=""
mysleep_fifo="${NETDATA_CACHE_DIR-/tmp}/.netdata_bash_sleep_timer_fifo"
-[ ! -e "${mysleep_fifo}" ] && mkfifo "${mysleep_fifo}"
-[ ! -e "${mysleep_fifo}" ] && mysleep="sleep"
+[ -f "${mysleep_fifo}" ] && rm "${mysleep_fifo}"
+[ ! -p "${mysleep_fifo}" ] && mkfifo "${mysleep_fifo}"
+[ -p "${mysleep_fifo}" ] && mysleep="mysleep_read"
mysleep_read() {
read -t "${1}" <>"${mysleep_fifo}"
@@ -102,6 +104,52 @@ mysleep_read() {
fi
}
+# -----------------------------------------------------------------------------
+# use bash loadable module for sleep
+
+mysleep_builtin() {
+ builtin sleep "${1}"
+ ret=$?
+ if [ $ret -ne 0 ]
+ then
+ echo >&2 "$0: Cannot use builtin sleep for sleeping (return code ${ret})."
+ mysleep="sleep"
+ ${mysleep} "${1}"
+ fi
+}
+
+if [ -z "${mysleep}" -a "$((BASH_VERSINFO[0] +0))" -ge 3 -a "${NETDATA_BASH_LOADABLES}" != "DISABLE" ]
+ then
+ # enable modules only for bash version 3+
+
+ for bash_modules_path in ${BASH_LOADABLES_PATH//:/ } "$(pkg-config bash --variable=loadablesdir 2>/dev/null)" "/usr/lib/bash" "/lib/bash" "/lib64/bash" "/usr/local/lib/bash" "/usr/local/lib64/bash"
+ do
+ [ -z "${bash_modules_path}" -o ! -d "${bash_modules_path}" ] && continue
+
+ # check for sleep
+ for bash_module_sleep in "sleep" "sleep.so"
+ do
+ if [ -f "${bash_modules_path}/${bash_module_sleep}" ]
+ then
+ if enable -f "${bash_modules_path}/${bash_module_sleep}" sleep 2>/dev/null
+ then
+ mysleep="mysleep_builtin"
+ # echo >&2 "$0: Using bash loadable ${bash_modules_path}/${bash_module_sleep} for sleep"
+ break
+ fi
+ fi
+
+ done
+
+ [ ! -z "${mysleep}" ] && break
+ done
+fi
+
+# -----------------------------------------------------------------------------
+# fallback to external sleep
+
+[ -z "${mysleep}" ] && mysleep="sleep"
+
# -----------------------------------------------------------------------------
# this function is used to sleep a fraction of a second
diff --git a/plugins.d/Makefile.in b/collectors/fping.plugin/Makefile.in
index 059d68f6a..67b9699b7 100644
--- a/plugins.d/Makefile.in
+++ b/collectors/fping.plugin/Makefile.in
@@ -14,6 +14,8 @@
@SET_MAKE@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
VPATH = @srcdir@
am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
@@ -79,18 +81,21 @@ PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
-subdir = plugins.d
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_plugins_SCRIPTS) $(dist_plugins_DATA)
+DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
+ $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_libconfig_DATA) $(dist_noinst_DATA)
+subdir = collectors/fping.plugin
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
- $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
- $(top_srcdir)/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \
- $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
@@ -124,7 +129,8 @@ am__uninstall_files_from_dir = { \
|| { echo " ( cd '$$dir' && rm -f" $$files ")"; \
$(am__cd) "$$dir" && rm -f $$files; }; \
}
-am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pluginsdir)"
+am__installdirs = "$(DESTDIR)$(pluginsdir)" \
+ "$(DESTDIR)$(libconfigdir)"
SCRIPTS = $(dist_plugins_SCRIPTS)
AM_V_P = $(am__v_P_@AM_V@)
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
@@ -145,7 +151,7 @@ am__can_run_installinfo = \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
-DATA = $(dist_plugins_DATA)
+DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
@@ -245,6 +251,7 @@ build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
+build_target = @build_target@
build_vendor = @build_vendor@
builddir = @builddir@
cachedir = @cachedir@
@@ -266,6 +273,7 @@ htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
+libconfigdir = @libconfigdir@
libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
@@ -292,34 +300,31 @@ top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
varlibdir = @varlibdir@
webdir = @webdir@
-
-#
-# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
-#
+AUTOMAKE_OPTIONS = subdir-objects
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_plugins_DATA = \
- README.md \
+CLEANFILES = \
+ fping.plugin \
$(NULL)
+SUFFIXES = .in
dist_plugins_SCRIPTS = \
- alarm-email.sh \
- alarm-notify.sh \
- alarm-test.sh \
- cgroup-name.sh \
- cgroup-network-helper.sh \
- charts.d.dryrun-helper.sh \
- charts.d.plugin \
fping.plugin \
- node.d.plugin \
- python.d.plugin \
- tc-qos-helper.sh \
- loopsleepms.sh.inc \
$(NULL)
+dist_noinst_DATA = \
+ fping.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ fping.conf \
+ $(NULL)
+
all: all-am
.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
@@ -328,9 +333,9 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__confi
exit 1;; \
esac; \
done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu plugins.d/Makefile'; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile'; \
$(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu plugins.d/Makefile
+ $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
@@ -340,6 +345,7 @@ Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
+$(top_srcdir)/build/subst.inc:
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
@@ -384,27 +390,27 @@ uninstall-dist_pluginsSCRIPTS:
files=`for p in $$list; do echo "$$p"; done | \
sed -e 's,.*/,,;$(transform)'`; \
dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-install-dist_pluginsDATA: $(dist_plugins_DATA)
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
@$(NORMAL_INSTALL)
- @list='$(dist_plugins_DATA)'; test -n "$(pluginsdir)" || list=; \
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
done | $(am__base_list) | \
while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pluginsdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pluginsdir)" || exit $$?; \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
done
-uninstall-dist_pluginsDATA:
+uninstall-dist_libconfigDATA:
@$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_DATA)'; test -n "$(pluginsdir)" || list=; \
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
tags TAGS:
ctags CTAGS:
@@ -446,7 +452,7 @@ check-am: all-am
check: check-am
all-am: Makefile $(SCRIPTS) $(DATA)
installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pluginsdir)"; do \
+ for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-am
@@ -471,6 +477,7 @@ install-strip:
mostlyclean-generic:
clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
@@ -500,7 +507,8 @@ info: info-am
info-am:
-install-data-am: install-dist_pluginsDATA install-dist_pluginsSCRIPTS
+install-data-am: install-dist_libconfigDATA \
+ install-dist_pluginsSCRIPTS
install-dvi: install-dvi-am
@@ -544,14 +552,15 @@ ps: ps-am
ps-am:
-uninstall-am: uninstall-dist_pluginsDATA uninstall-dist_pluginsSCRIPTS
+uninstall-am: uninstall-dist_libconfigDATA \
+ uninstall-dist_pluginsSCRIPTS
.MAKE: install-am install-strip
.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
ctags-am distclean distclean-generic distdir dvi dvi-am html \
html-am info info-am install install-am install-data \
- install-data-am install-dist_pluginsDATA \
+ install-data-am install-dist_libconfigDATA \
install-dist_pluginsSCRIPTS install-dvi install-dvi-am \
install-exec install-exec-am install-html install-html-am \
install-info install-info-am install-man install-pdf \
@@ -559,8 +568,23 @@ uninstall-am: uninstall-dist_pluginsDATA uninstall-dist_pluginsSCRIPTS
installcheck installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
pdf-am ps ps-am tags-am uninstall uninstall-am \
- uninstall-dist_pluginsDATA uninstall-dist_pluginsSCRIPTS
-
+ uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
+ -e 's#[@]pythondir_POST@#$(pythondir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
diff --git a/plugins.d/fping.plugin b/collectors/fping.plugin/fping.plugin
index f38a8dde0..cf8f17e9a 100755..100644
--- a/plugins.d/fping.plugin
+++ b/collectors/fping.plugin/fping.plugin
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
# netdata
# real-time performance and health monitoring, done right!
@@ -129,7 +130,8 @@ update_every="${1-1}"
# the netdata configuration directory
# passed by netdata as an environment variable
-[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d"
# -----------------------------------------------------------------------------
# configuration options
@@ -152,23 +154,33 @@ ping_every="$((update_every * 1000 / 5))"
fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
# -----------------------------------------------------------------------------
-# load the configuration file
+# load the configuration files
-if [ ! -f "${NETDATA_CONFIG_DIR}/${plugin}.conf" ]
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf"
+do
+ if [ -f "${CONFIG}" ]
+ then
+ info "Loading config file '${CONFIG}'..."
+ source "${CONFIG}"
+ [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+done
+
+if [ -z "${hosts}" ]
then
- fatal "configuration file '${NETDATA_CONFIG_DIR}/${plugin}.conf' not found - nothing to do."
+ fatal "no hosts configured - nothing to do."
fi
-source "${NETDATA_CONFIG_DIR}/${plugin}.conf"
-
-if [ -z "${hosts}" ]
+if [ -z "${fping}" ]
then
- fatal "no hosts configured in '${NETDATA_CONFIG_DIR}/${plugin}.conf' - nothing to do."
+ fatal "fping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'"
fi
-if [ -z "${fping}" -o ! -x "${fping}" ]
+if [ ! -x "${fping}" ]
then
- fatal "command '${fping}' is not found or is not executable - cannot proceed."
+ fatal "fping command '${fping}' is not executable - cannot proceed."
fi
if [ ${ping_every} -lt 20 ]
@@ -185,4 +197,4 @@ info "starting fping: ${fping} ${options[*]}"
exec "${fping}" "${options[@]}"
# if we cannot execute fping, stop
-fatal "command '${fping} ${options[@]}' failed to be executed."
+fatal "command '${fping} ${options[*]}' failed to be executed (returned code $?)."
diff --git a/plugins.d/node.d.plugin b/collectors/node.d.plugin/node.d.plugin
index b16203912..2570220c2 100755..100644
--- a/plugins.d/node.d.plugin
+++ b/collectors/node.d.plugin/node.d.plugin
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-':' //; exec "$(command -v nodejs || command -v node || command -v js || echo "ERROR node.js IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@"
+':' //; exec "$(command -v nodejs || command -v node || echo "ERROR node IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@"
// shebang hack from:
// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang
@@ -11,7 +11,7 @@
// netdata
// real-time performance and health monitoring, done right!
// (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-// GPL v3+
+// SPDX-License-Identifier: GPL-3.0-or-later
// --------------------------------------------------------------------------------------------------------------------
@@ -21,7 +21,8 @@
// get NETDATA environment variables
var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname;
-var NETDATA_CONFIG_DIR = process.env.NETDATA_CONFIG_DIR || __dirname + '/../../../../etc/netdata';
+var NETDATA_USER_CONFIG_DIR = process.env.NETDATA_USER_CONFIG_DIR || '/usr/local/etc/netdata';
+var NETDATA_STOCK_CONFIG_DIR = process.env.NETDATA_STOCK_CONFIG_DIR || '/usr/local/lib/netdata/conf.d';
var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1;
var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d';
@@ -45,20 +46,42 @@ var netdata = require('netdata');
// --------------------------------------------------------------------------------------------------------------------
// configuration
-function pluginConfig(filename) {
- var f = path.basename(filename);
+function netdata_read_json_config_file(module_filename) {
+ var f = path.basename(module_filename);
+
+ var ufilename, sfilename;
- // node.d.plugin configuration
var m = f.match('.plugin' + '$');
- if(m !== null)
- return netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf';
+ if(m !== null) {
+ ufilename = netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf';
+ sfilename = netdata.options.paths.stock_config + '/' + f.substring(0, m.index) + '.conf';
+ }
- // node.d modules configuration
m = f.match('.node.js' + '$');
- if(m !== null)
- return netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf';
+ if(m !== null) {
+ ufilename = netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf';
+ sfilename = netdata.options.paths.stock_config + '/node.d/' + f.substring(0, m.index) + '.conf';
+ }
+
+ try {
+ netdata.debug('loading module\'s ' + module_filename + ' user-config ' + ufilename);
+ return JSON.parse(fs.readFileSync(ufilename, 'utf8'));
+ }
+ catch(e) {
+ netdata.error('Cannot read user-configuration file ' + ufilename + ': ' + e.message + '.');
+ dumpError(e);
+ }
- return netdata.options.paths.config + '/node.d/' + f + '.conf';
+ try {
+ netdata.debug('loading module\'s ' + module_filename + ' stock-config ' + sfilename);
+ return JSON.parse(fs.readFileSync(sfilename, 'utf8'));
+ }
+ catch(e) {
+ netdata.error('Cannot read stock-configuration file ' + sfilename + ': ' + e.message + ', using internal defaults.');
+ dumpError(e);
+ }
+
+ return {};
}
// internal defaults
@@ -69,35 +92,31 @@ extend(true, netdata.options, {
paths: {
plugins: NETDATA_PLUGINS_DIR,
- config: NETDATA_CONFIG_DIR,
- modules: [],
+ config: NETDATA_USER_CONFIG_DIR,
+ stock_config: NETDATA_STOCK_CONFIG_DIR,
+ modules: []
},
modules_enable_autodetect: true,
modules_enable_all: true,
- modules: {},
+ modules: {}
});
-netdata.options.config_filename = pluginConfig(__filename);
// load configuration file
-try {
- netdata.options_loaded = JSON.parse(fs.readFileSync(netdata.options.config_filename, 'utf8'));
- extend(true, netdata.options, netdata.options_loaded);
+netdata.options_loaded = netdata_read_json_config_file(__filename);
+extend(true, netdata.options, netdata.options_loaded);
- if(!netdata.options.paths.plugins)
- netdata.options.paths.plugins = NETDATA_PLUGINS_DIR;
+if(!netdata.options.paths.plugins)
+ netdata.options.paths.plugins = NETDATA_PLUGINS_DIR;
- if(!netdata.options.paths.config)
- netdata.options.paths.config = NETDATA_CONFIG_DIR;
+if(!netdata.options.paths.config)
+ netdata.options.paths.config = NETDATA_USER_CONFIG_DIR;
- // console.error('merged netdata object:');
- // console.error(util.inspect(netdata, {depth: 10}));
-}
-catch(e) {
- netdata.error('Cannot read configuration file ' + netdata.options.config_filename + ': ' + e.message + ', using internal defaults.');
- netdata.options_loaded = undefined;
- dumpError(e);
-}
+if(!netdata.options.paths.stock_config)
+ netdata.options.paths.stock_config = NETDATA_STOCK_CONFIG_DIR;
+
+// console.error('merged netdata object:');
+// console.error(util.inspect(netdata, {depth: 10}));
// apply module paths to node.js process
@@ -206,9 +225,6 @@ function findModules() {
netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len];
netdata.options.modules[n].loaded = false;
- if(typeof(netdata.options.modules[n].config_filename) !== 'string')
- netdata.options.modules[n].config_filename = pluginConfig(files[len]);
-
// load the module
try {
netdata.debug('loading module ' + netdata.options.modules[n].filename);
@@ -228,16 +244,9 @@ function findModules() {
enable_autodetect: netdata.options.modules_enable_autodetect,
update_every: netdata.options.update_every
};
- try {
- netdata.debug('loading module\'s ' + netdata.options.modules[n].name + ' config ' + netdata.options.modules[n].config_filename);
- var c2 = JSON.parse(fs.readFileSync(netdata.options.modules[n].config_filename, 'utf8'));
- extend(true, c, c2);
- netdata.debug('loaded module\'s ' + netdata.options.modules[n].name + ' config ' + netdata.options.modules[n].config_filename);
- }
- catch(e) {
- netdata.error('Cannot load module\'s ' + netdata.options.modules[n].name + ' config from ' + netdata.options.modules[n].config_filename + ' exception: ' + e + ', using internal defaults.');
- dumpError(e);
- }
+
+ var c2 = netdata_read_json_config_file(files[len]);
+ extend(true, c, c2);
// call module auto-detection / configuration
try {
diff --git a/plugins.d/python.d.plugin b/collectors/python.d.plugin/python.d.plugin
index c9b260164..264c3383d 100755..100644
--- a/plugins.d/python.d.plugin
+++ b/collectors/python.d.plugin/python.d.plugin
@@ -6,7 +6,9 @@ echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # '''
# Description:
# Author: Pawel Krupa (paulfantom)
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+import gc
import os
import sys
import threading
@@ -15,22 +17,30 @@ from re import sub
from sys import version_info, argv
from time import sleep
-try:
- from time import monotonic as time
-except ImportError:
- from time import time
+GC_RUN = True
+GC_COLLECT_EVERY = 300
PY_VERSION = version_info[:2]
-PLUGIN_CONFIG_DIR = os.getenv('NETDATA_CONFIG_DIR', os.path.dirname(__file__) + '/../../../../etc/netdata') + '/'
-CHARTS_PY_DIR = os.path.abspath(os.getenv('NETDATA_PLUGINS_DIR', os.path.dirname(__file__)) + '/../python.d') + '/'
-CHARTS_PY_CONFIG_DIR = PLUGIN_CONFIG_DIR + 'python.d/'
-PYTHON_MODULES_DIR = CHARTS_PY_DIR + 'python_modules'
+
+USER_CONFIG_DIR = os.getenv('NETDATA_USER_CONFIG_DIR', '/usr/local/etc/netdata')
+STOCK_CONFIG_DIR = os.getenv('NETDATA_STOCK_CONFIG_DIR', '/usr/local/lib/netdata/conf.d')
+
+PLUGINS_USER_CONFIG_DIR = os.path.join(USER_CONFIG_DIR, 'python.d')
+PLUGINS_STOCK_CONFIG_DIR = os.path.join(STOCK_CONFIG_DIR, 'python.d')
+
+
+PLUGINS_DIR = os.path.abspath(os.getenv(
+ 'NETDATA_PLUGINS_DIR',
+ os.path.dirname(__file__)) + '/../python.d')
+
+
+PYTHON_MODULES_DIR = os.path.join(PLUGINS_DIR, 'python_modules')
sys.path.append(PYTHON_MODULES_DIR)
-from bases.loaders import ModuleAndConfigLoader
-from bases.loggers import PythonDLogger
-from bases.collection import setdefault_values, run_and_exit
+from bases.loaders import ModuleAndConfigLoader # noqa: E402
+from bases.loggers import PythonDLogger # noqa: E402
+from bases.collection import setdefault_values, run_and_exit # noqa: E402
try:
from collections import OrderedDict
@@ -53,7 +63,7 @@ def module_ok(m):
return m.endswith(MODULE_EXTENSION) and m[:-len(MODULE_EXTENSION)] not in OBSOLETE_MODULES
-ALL_MODULES = [m for m in sorted(os.listdir(CHARTS_PY_DIR)) if module_ok(m)]
+ALL_MODULES = [m for m in sorted(os.listdir(PLUGINS_DIR)) if module_ok(m)]
def parse_cmd():
@@ -68,6 +78,13 @@ def multi_job_check(config):
return next((True for key in config if isinstance(config[key], dict)), False)
+class RawModule:
+ def __init__(self, name, path, explicitly_enabled=True):
+ self.name = name
+ self.path = path
+ self.explicitly_enabled = explicitly_enabled
+
+
class Job(object):
def __init__(self, initialized_job, job_id):
"""
@@ -80,7 +97,7 @@ class Job(object):
self.recheck_every = self.job.configuration.pop('autodetection_retry')
self.checked = False # used in Plugin.check_job()
self.created = False # used in Plugin.create_job_charts()
- if OVERRIDE_UPDATE_EVERY:
+ if self.job.update_every < int(OVERRIDE_UPDATE_EVERY):
self.job.update_every = int(OVERRIDE_UPDATE_EVERY)
def __getattr__(self, item):
@@ -194,9 +211,22 @@ class Plugin(object):
self.modules = OrderedDict()
self.sleep_time = 1
self.runs_counter = 0
- self.config, error = self.loader.load_config_from_file(PLUGIN_CONFIG_DIR + 'python.d.conf')
+
+ user_config = os.path.join(USER_CONFIG_DIR, 'python.d.conf')
+ stock_config = os.path.join(STOCK_CONFIG_DIR, 'python.d.conf')
+
+ Logger.debug("loading '{0}'".format(user_config))
+ self.config, error = self.loader.load_config_from_file(user_config)
+
+ if error:
+ Logger.error("cannot load '{0}': {1}. Will try stock version.".format(user_config, error))
+ Logger.debug("loading '{0}'".format(stock_config))
+ self.config, error = self.loader.load_config_from_file(stock_config)
if error:
- Logger.error('"python.d.conf" configuration file not found. Using defaults.')
+ Logger.error("cannot load '{0}': {1}".format(stock_config, error))
+
+ self.do_gc = self.config.get("gc_run", GC_RUN)
+ self.gc_interval = self.config.get("gc_interval", GC_COLLECT_EVERY)
if not self.config.get('enabled', True):
run_and_exit(Logger.info)('DISABLED in configuration file.')
@@ -223,47 +253,57 @@ class Plugin(object):
def enabled_modules(self):
for mod in MODULES_TO_RUN:
mod_name = mod[:-len(MODULE_EXTENSION)]
- mod_path = CHARTS_PY_DIR + mod
- conf_path = ''.join([CHARTS_PY_CONFIG_DIR, mod_name, '.conf'])
-
- if DEBUG:
- yield mod, mod_name, mod_path, conf_path
- else:
- if all([self.config.get('default_run', True),
- self.config.get(mod_name, True)]):
- yield mod, mod_name, mod_path, conf_path
-
- elif all([not self.config.get('default_run'),
- self.config.get(mod_name)]):
- yield mod, mod_name, mod_path, conf_path
+ mod_path = os.path.join(PLUGINS_DIR, mod)
+ if any(
+ [
+ self.config.get('default_run', True) and self.config.get(mod_name, True),
+ (not self.config.get('default_run')) and self.config.get(mod_name),
+ ]
+ ):
+ yield RawModule(
+ name=mod_name,
+ path=mod_path,
+ explicitly_enabled=self.config.get(mod_name),
+ )
def load_and_initialize_modules(self):
- for mod, mod_name, mod_path, conf_path in self.enabled_modules():
+ for mod in self.enabled_modules():
# Load module from file ------------------------------------------------------------
- loaded_module, error = self.loader.load_module_from_file(mod_name, mod_path)
+ loaded_module, error = self.loader.load_module_from_file(mod.name, mod.path)
log = Logger.error if error else Logger.debug
log("module load source: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK',
- module_name=mod_name))
+ module_name=mod.name))
if error:
Logger.error("load source error : {0}".format(error))
continue
# Load module config from file ------------------------------------------------------
- loaded_config, error = self.loader.load_config_from_file(conf_path)
- log = Logger.error if error else Logger.debug
- log("module load config: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK',
- module_name=mod_name))
+ user_config = os.path.join(PLUGINS_USER_CONFIG_DIR, mod.name + '.conf')
+ stock_config = os.path.join(PLUGINS_STOCK_CONFIG_DIR, mod.name + '.conf')
+
+ Logger.debug("loading '{0}'".format(user_config))
+ loaded_config, error = self.loader.load_config_from_file(user_config)
if error:
- Logger.error('load config error : {0}'.format(error))
+ Logger.error("cannot load '{0}' : {1}. Will try stock version.".format(user_config, error))
+ Logger.debug("loading '{0}'".format(stock_config))
+ loaded_config, error = self.loader.load_config_from_file(stock_config)
+
+ if error:
+ Logger.error("cannot load '{0}': {1}".format(stock_config, error))
+
+ # Skip disabled modules
+ if getattr(loaded_module, 'disabled_by_default', False) and not mod.explicitly_enabled:
+ Logger.info("module '{0}' disabled by default".format(loaded_module.__name__))
+ continue
+
+ # Module initialization ---------------------------------------------------
- # Service instance initialization ---------------------------------------------------
initialized_module = Module(service=loaded_module, config=loaded_config)
Logger.debug("module status: '{module_name}' => [{status}] "
"(jobs: {jobs_number})".format(status='OK' if initialized_module else 'FAILED',
module_name=initialized_module.name,
jobs_number=len(initialized_module)))
-
if initialized_module:
self.modules[initialized_module.name] = initialized_module
@@ -349,6 +389,11 @@ class Plugin(object):
self.cleanup()
self.autodetect_retry()
+ # FIXME: https://github.com/netdata/netdata/issues/3817
+ if self.do_gc and self.runs_counter % self.gc_interval == 0:
+ v = gc.collect()
+ Logger.debug("GC full collection run result: {0}".format(v))
+
def cleanup(self):
for job in self.dead_jobs:
self.delete_job(job)
diff --git a/plugins.d/tc-qos-helper.sh b/collectors/tc.plugin/tc-qos-helper.sh
index 9153f22e2..b49d1f509 100755..100644
--- a/plugins.d/tc-qos-helper.sh
+++ b/collectors/tc.plugin/tc-qos-helper.sh
@@ -3,7 +3,7 @@
# netdata
# real-time performance and health monitoring, done right!
# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
+# SPDX-License-Identifier: GPL-3.0-or-later
#
# This script is a helper to allow netdata collect tc data.
# tc output parsing has been implemented in C, inside netdata
@@ -14,6 +14,47 @@ export LC_ALL=C
# -----------------------------------------------------------------------------
+# logging functions
+
+PROGRAM_FILE="$0"
+PROGRAM_NAME="$(basename $0)"
+PROGRAM_NAME="${PROGRAM_NAME/.plugin}"
+
+logdate() {
+ date "+%Y-%m-%d %H:%M:%S"
+}
+
+log() {
+ local status="${1}"
+ shift
+
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+
+}
+
+warning() {
+ log WARNING "${@}"
+}
+
+error() {
+ log ERROR "${@}"
+}
+
+info() {
+ log INFO "${@}"
+}
+
+fatal() {
+ log FATAL "${@}"
+ exit 1
+}
+
+debug=0
+debug() {
+ [ $debug -eq 1 ] && log DEBUG "${@}"
+}
+
+# -----------------------------------------------------------------------------
# find /var/run/fireqos
# the default
@@ -52,60 +93,24 @@ if [ ! -d "${fireqos_run_dir}" ]
if [ -d "${LOCALSTATEDIR}/run/fireqos" ]
then
fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos"
+ else
+ warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)."
fi
+ else
+ warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'."
fi
+ else
+ warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/wiki/You-should-install-QoS-on-all-your-servers"
fi
fi
# -----------------------------------------------------------------------------
-# logging functions
-
-PROGRAM_FILE="$0"
-PROGRAM_NAME="$(basename $0)"
-PROGRAM_NAME="${PROGRAM_NAME/.plugin}"
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- exit 1
-}
-
-debug=0
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
-}
-
-
-# -----------------------------------------------------------------------------
[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
-[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d"
plugins_dir="${NETDATA_PLUGINS_DIR}"
-config_dir="${NETDATA_CONFIG_DIR}"
tc="$(which tc 2>/dev/null || command -v tc 2>/dev/null)"
@@ -134,10 +139,17 @@ update_every=$((t))
# -----------------------------------------------------------------------------
# allow the user to override our defaults
-if [ -f "${config_dir}/tc-qos-helper.conf" ]
- then
- source "${config_dir}/tc-qos-helper.conf"
-fi
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf"
+do
+ if [ -f "${CONFIG}" ]
+ then
+ info "Loading config file '${CONFIG}'..."
+ source "${CONFIG}"
+ [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+done
case "${tc_show}" in
qdisc|class)
diff --git a/plugins.d/alarm-email.sh b/health/notifications/alarm-email.sh
index df083c655..69c4c3f8d 100755
--- a/plugins.d/alarm-email.sh
+++ b/health/notifications/alarm-email.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
# OBSOLETE - REPLACED WITH
# alarm-notify.sh
diff --git a/plugins.d/alarm-notify.sh b/health/notifications/alarm-notify.sh
index 3e23a164f..33a59590e 100755..100644
--- a/plugins.d/alarm-notify.sh
+++ b/health/notifications/alarm-notify.sh
@@ -3,7 +3,7 @@
# netdata
# real-time performance and health monitoring, done right!
# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
+# SPDX-License-Identifier: GPL-3.0-or-later
#
# Script to send alarm notifications for netdata
#
@@ -26,11 +26,15 @@
# - pagerduty.com notifications by Jim Cooley @jimcooley #1373
# - messagebird.com notifications by @tech_no_logical #1453
# - hipchat notifications by @ktsaou #1561
+# - fleep notifications by @Ferroin
# - custom notifications by @ktsaou
+# - syslog messages by @Ferroin
+# - Microsoft Team notification by @tioumen
# -----------------------------------------------------------------------------
# testing notifications
+
if [ \( "${1}" = "test" -o "${2}" = "test" \) -a "${#}" -le 2 ]
then
if [ "${2}" = "test" ]
@@ -44,6 +48,7 @@ then
id=1
last="CLEAR"
+ test_res=0
for x in "WARNING" "CRITICAL" "CLEAR"
do
echo >&2
@@ -53,6 +58,7 @@ then
if [ $? -ne 0 ]
then
echo >&2 "# FAILED"
+ test_res=1
else
echo >&2 "# OK"
fi
@@ -61,7 +67,7 @@ then
id=$((id + 1))
done
- exit 1
+ exit $test_res
fi
export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
@@ -153,9 +159,10 @@ custom_sender() {
# -----------------------------------------------------------------------------
# defaults to allow running this script by hand
-[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
-[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="$(dirname "${0}")/../../../../var/cache/netdata"
-[ -z "${NETDATA_REGISTRY_URL}" ] && NETDATA_REGISTRY_URL="https://registry.my-netdata.io"
+[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata"
+[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d"
+[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="/usr/local/var/cache/netdata"
+[ -z "${NETDATA_REGISTRY_URL}" ] && NETDATA_REGISTRY_URL="https://registry.my-netdata.io"
# -----------------------------------------------------------------------------
# parse command line parameters
@@ -214,7 +221,7 @@ fi
images_base_url="https://registry.my-netdata.io"
# curl options to use
-curl_options=
+curl_options=""
# needed commands
# if empty they will be searched in the system path
@@ -223,6 +230,7 @@ sendmail=
# enable / disable features
SEND_SLACK="YES"
+SEND_MSTEAM="YES"
SEND_ALERTA="YES"
SEND_FLOCK="YES"
SEND_DISCORD="YES"
@@ -236,7 +244,10 @@ SEND_EMAIL="YES"
SEND_PUSHBULLET="YES"
SEND_KAFKA="YES"
SEND_PD="YES"
+SEND_FLEEP="YES"
SEND_IRC="YES"
+SEND_AWSSNS="YES"
+SEND_SYSLOG="NO"
SEND_CUSTOM="YES"
# slack configs
@@ -244,6 +255,16 @@ SLACK_WEBHOOK_URL=
DEFAULT_RECIPIENT_SLACK=
declare -A role_recipients_slack=()
+# Microsoft Team configs
+MSTEAM_WEBHOOK_URL=
+DEFAULT_RECIPIENT_MSTEAM=
+declare -A role_recipients_msteam=()
+
+# rocketchat configs
+ROCKETCHAT_WEBHOOK_URL=
+DEFAULT_RECIPIENT_ROCKETCHAT=
+declare -A role_recipients_rocketchat=()
+
# alerta configs
ALERTA_WEBHOOK_URL=
ALERTA_API_KEY=
@@ -310,6 +331,20 @@ PD_SERVICE_KEY=
DEFAULT_RECIPIENT_PD=
declare -A role_recipients_pd=()
+# fleep.io configs
+FLEEP_SENDER="${host}"
+DEFAULT_RECIPIENT_FLEEP=
+declare -A role_recipients_fleep=()
+
+# Amazon SNS configs
+DEFAULT_RECIPIENT_AWSSNS=
+AWSSNS_MESSAGE_FORMAT=
+declare -A role_recipients_awssns=()
+
+# syslog configs
+SYSLOG_FACILITY=
+declare -A role_recipients_syslog=()
+
# custom configs
DEFAULT_RECIPIENT_CUSTOM=
declare -A role_recipients_custom=()
@@ -318,6 +353,7 @@ declare -A role_recipients_custom=()
EMAIL_SENDER=
DEFAULT_RECIPIENT_EMAIL="root"
EMAIL_CHARSET=$(locale charmap 2>/dev/null)
+EMAIL_THREADING=
declare -A role_recipients_email=()
# irc configs
@@ -327,14 +363,20 @@ DEFAULT_RECIPIENT_IRC=
IRC_NETWORK=
declare -A role_recipients_irc=()
-# load the user configuration
-# this will overwrite the variables above
-if [ -f "${NETDATA_CONFIG_DIR}/health_alarm_notify.conf" ]
- then
- source "${NETDATA_CONFIG_DIR}/health_alarm_notify.conf"
-else
- error "Cannot find file ${NETDATA_CONFIG_DIR}/health_alarm_notify.conf. Using internal defaults."
-fi
+# load the stock and user configuration files
+# these will overwrite the variables above
+
+for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/health_alarm_notify.conf" "${NETDATA_USER_CONFIG_DIR}/health_alarm_notify.conf"
+do
+ if [ -f "${CONFIG}" ]
+ then
+ debug "Loading config file '${CONFIG}'..."
+ source "${CONFIG}"
+ [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
+ else
+ warning "Cannot find file '${CONFIG}'."
+ fi
+done
# If we didn't autodetect the character set for e-mail and it wasn't
# set by the user, we need to set it to a reasonable default. UTF-8
@@ -405,6 +447,8 @@ filter_recipient_by_criticality() {
# find the recipients' addresses per method
declare -A arr_slack=()
+declare -A arr_msteam=()
+declare -A arr_rocketchat=()
declare -A arr_alerta=()
declare -A arr_flock=()
declare -A arr_discord=()
@@ -418,7 +462,10 @@ declare -A arr_email=()
declare -A arr_custom=()
declare -A arr_messagebird=()
declare -A arr_kavenegar=()
+declare -A arr_fleep=()
declare -A arr_irc=()
+declare -A arr_syslog=()
+declare -A arr_awssns=()
# netdata may call us with multiple roles, and roles may have multiple but
# overlapping recipients - so, here we find the unique recipients.
@@ -500,6 +547,22 @@ do
[ "${r}" != "disabled" ] && filter_recipient_by_criticality slack "${r}" && arr_slack[${r/|*/}]="1"
done
+ # Microsoft Team
+ a="${role_recipients_msteam[${x}]}"
+ [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_MSTEAM}"
+ for r in ${a//,/ }
+ do
+ [ "${r}" != "disabled" ] && filter_recipient_by_criticality msteam "${r}" && arr_msteam[${r/|*/}]="1"
+ done
+
+ # rocketchat
+ a="${role_recipients_rocketchat[${x}]}"
+ [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_ROCKETCHAT}"
+ for r in ${a//,/ }
+ do
+ [ "${r}" != "disabled" ] && filter_recipient_by_criticality rocketchat "${r}" && arr_rocketchat[${r/|*/}]="1"
+ done
+
# alerta
a="${role_recipients_alerta[${x}]}"
[ -z "${a}" ] && a="${DEFAULT_RECIPIENT_ALERTA}"
@@ -531,7 +594,15 @@ do
do
[ "${r}" != "disabled" ] && filter_recipient_by_criticality pd "${r}" && arr_pd[${r/|*/}]="1"
done
-
+
+ # fleep.io
+ a="${role_recipients_fleep[${x}]}"
+ [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_FLEEP}"
+ for r in ${a//,/ }
+ do
+ [ "${r}" != "disabled" ] && filter_recipient_by_criticality fleep "${r}" && arr_fleep[${r/|*/}]="1"
+ done
+
# irc
a="${role_recipients_irc[${x}]}"
[ -z "${a}" ] && a="${DEFAULT_RECIPIENT_IRC}"
@@ -540,6 +611,22 @@ do
[ "${r}" != "disabled" ] && filter_recipient_by_criticality irc "${r}" && arr_irc[${r/|*/}]="1"
done
+ # amazon sns
+ a="${role_recipients_awssns[${x}]}"
+ [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_AWSSNS}"
+ for r in ${a//,/ }
+ do
+ [ "${r}" != "disabled" ] && filter_recipient_by_criticality awssns "${r}" && arr_awssns[${r/|*/}]="1"
+ done
+
+ # syslog
+ a="${role_recipients_syslog[${x}]}"
+ [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_SYSLOG}"
+ for r in ${a//,/ }
+ do
+ [ "${r}" != "disabled" ] && filter_recipient_by_criticality syslog "${r}" && arr_syslog[${r/|*/}]="1"
+ done
+
# custom
a="${role_recipients_custom[${x}]}"
[ -z "${a}" ] && a="${DEFAULT_RECIPIENT_CUSTOM}"
@@ -554,6 +641,14 @@ done
to_slack="${!arr_slack[*]}"
[ -z "${to_slack}" ] && SEND_SLACK="NO"
+# build the list of Microsoft team recipients (channels)
+to_msteam="${!arr_msteam[*]}"
+[ -z "${to_msteam}" ] && SEND_MSTEAM="NO"
+
+# build the list of rocketchat recipients (channels)
+to_rocketchat="${!arr_rocketchat[*]}"
+[ -z "${to_rocketchat}" ] && SEND_ROCKETCHAT="NO"
+
# build the list of alerta recipients (channels)
to_alerta="${!arr_alerta[*]}"
[ -z "${to_alerta}" ] && SEND_ALERTA="NO"
@@ -598,6 +693,10 @@ to_telegram="${!arr_telegram[*]}"
to_pd="${!arr_pd[*]}"
[ -z "${to_pd}" ] && SEND_PD="NO"
+# build the list of fleep recipients (conversation webhooks)
+to_fleep="${!arr_fleep[*]}"
+[ -z "${to_fleep}" ] && SEND_FLEEP="NO"
+
# build the list of custom recipients
to_custom="${!arr_custom[*]}"
[ -z "${to_custom}" ] && SEND_CUSTOM="NO"
@@ -615,12 +714,23 @@ done
to_irc="${!arr_irc[*]}"
[ -z "${to_irc}" ] && SEND_IRC="NO"
+# build the list of awssns recipients (facilities, servers, and prefixes)
+to_awssns="${!arr_awssns[*]}"
+[ -z "${to_awssns}" ] && SEND_AWSSNS="NO"
+
+# build the list of syslog recipients (facilities, servers, and prefixes)
+to_syslog="${!arr_syslog[*]}"
+[ -z "${to_syslog}" ] && SEND_SYSLOG="NO"
+
# -----------------------------------------------------------------------------
# verify the delivery methods supported
# check slack
[ -z "${SLACK_WEBHOOK_URL}" ] && SEND_SLACK="NO"
+# check rocketchat
+[ -z "${ROCKETCHAT_WEBHOOK_URL}" ] && SEND_ROCKETCHAT="NO"
+
# check alerta
[ -z "${ALERTA_WEBHOOK_URL}" ] && SEND_ALERTA="NO"
@@ -657,6 +767,9 @@ to_irc="${!arr_irc[*]}"
# check irc
[ -z "${IRC_NETWORK}" ] && SEND_IRC="NO"
+# check fleep
+[ -z "${FLEEP_SERVER}" -o -z "${FLEEP_SENDER}" ] && SEND_FLEEP="NO"
+
# check pagerduty.com
# if we need pd-send, check for the pd-send command
# https://www.pagerduty.com/docs/guides/agent-install-guide/
@@ -674,6 +787,7 @@ fi
if [ \( \
"${SEND_PUSHOVER}" = "YES" \
-o "${SEND_SLACK}" = "YES" \
+ -o "${SEND_ROCKETCHAT}" = "YES" \
-o "${SEND_ALERTA}" = "YES" \
-o "${SEND_FLOCK}" = "YES" \
-o "${SEND_DISCORD}" = "YES" \
@@ -684,7 +798,9 @@ if [ \( \
-o "${SEND_TELEGRAM}" = "YES" \
-o "${SEND_PUSHBULLET}" = "YES" \
-o "${SEND_KAFKA}" = "YES" \
+ -o "${SEND_FLEEP}" = "YES" \
-o "${SEND_CUSTOM}" = "YES" \
+ -o "${SEND_MSTEAM}" = "YES" \
\) -a -z "${curl}" ]
then
curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)"
@@ -695,6 +811,8 @@ if [ \( \
SEND_PUSHBULLET="NO"
SEND_TELEGRAM="NO"
SEND_SLACK="NO"
+ SEND_MSTEAM="NO"
+ SEND_ROCKETCHAT="NO"
SEND_ALERTA="NO"
SEND_FLOCK="NO"
SEND_DISCORD="NO"
@@ -703,6 +821,7 @@ if [ \( \
SEND_MESSAGEBIRD="NO"
SEND_KAVENEGAR="NO"
SEND_KAFKA="NO"
+ SEND_FLEEP="NO"
SEND_CUSTOM="NO"
fi
fi
@@ -718,11 +837,34 @@ if [ "${SEND_EMAIL}" = "YES" -a -z "${sendmail}" ]
fi
fi
+# if we need logger, check for the logger command
+if [ "${SEND_SYSLOG}" = "YES" -a -z "${logger}" ]
+ then
+ logger="$(which logger 2>/dev/null || command -v logger 2>/dev/null)"
+ if [ -z "${logger}" ]
+ then
+ debug "Cannot find logger command in the system path. Disabling syslog notifications."
+ SEND_SYSLOG="NO"
+ fi
+fi
+
+# if we need aws, check for the aws command
+if [ "${SEND_AWSSNS}" = "YES" -a -z "${aws}" ]
+ then
+ aws="$(which aws 2>/dev/null || command -v aws 2>/dev/null)"
+ if [ -z "${aws}" ]
+ then
+ debug "Cannot find aws command in the system path. Disabling Amazon SNS notifications."
+ SEND_AWSSNS="NO"
+ fi
+fi
+
# check that we have at least a method enabled
if [ "${SEND_EMAIL}" != "YES" \
-a "${SEND_PUSHOVER}" != "YES" \
-a "${SEND_TELEGRAM}" != "YES" \
-a "${SEND_SLACK}" != "YES" \
+ -a "${SEND_ROCKETCHAT}" != "YES" \
-a "${SEND_ALERTA}" != "YES" \
-a "${SEND_FLOCK}" != "YES" \
-a "${SEND_DISCORD}" != "YES" \
@@ -733,8 +875,12 @@ if [ "${SEND_EMAIL}" != "YES" \
-a "${SEND_PUSHBULLET}" != "YES" \
-a "${SEND_KAFKA}" != "YES" \
-a "${SEND_PD}" != "YES" \
+ -a "${SEND_FLEEP}" != "YES" \
-a "${SEND_CUSTOM}" != "YES" \
-a "${SEND_IRC}" != "YES" \
+ -a "${SEND_AWSSNS}" != "YES" \
+ -a "${SEND_SYSLOG}" != "YES" \
+ -a "${SEND_MSTEAM}" != "YES" \
]
then
fatal "All notification methods are disabled. Not sending notification for host '${host}', chart '${chart}' to '${roles}' for '${name}' = '${value}' for status '${status}'."
@@ -743,8 +889,18 @@ fi
# -----------------------------------------------------------------------------
# get the date the alarm happened
-date="$(date --date=@${when} 2>/dev/null)"
-[ -z "${date}" ] && date="$(date 2>/dev/null)"
+date=$(date --date=@${when} "${date_format}" 2>/dev/null)
+[ -z "${date}" ] && date=$(date "${date_format}" 2>/dev/null)
+[ -z "${date}" ] && date=$(date --date=@${when} 2>/dev/null)
+[ -z "${date}" ] && date=$(date 2>/dev/null)
+
+# ----------------------------------------------------------------------------
+# prepare some extra headers if we've been asked to thread e-mails
+if [ "${SEND_EMAIL}" == "YES" -a "${EMAIL_THREADING}" == "YES" ] ; then
+ email_thread_headers="In-Reply-To: <${chart}-${name}@${host}>\nReferences: <${chart}-${name}@${host}>"
+else
+ email_thread_headers=
+fi
# -----------------------------------------------------------------------------
# function to URL encode a string
@@ -830,39 +986,42 @@ duration4human() {
# email sender
send_email() {
- local ret= opts=
+ local ret= opts=() sender_email="${EMAIL_SENDER}" sender_name=
if [ "${SEND_EMAIL}" = "YES" ]
then
if [ ! -z "${EMAIL_SENDER}" ]
then
- if [[ "${EMAIL_SENDER}" =~ \".*\"\ \<.*\> ]]
- then
- # the name includes single quotes
- opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F $(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)"
- elif [[ "${EMAIL_SENDER}" =~ \'.*\'\ \<.*\> ]]
+ if [[ "${EMAIL_SENDER}" =~ ^\".*\"\ \<.*\>$ ]]
then
# the name includes double quotes
- opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F $(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)"
- elif [[ "${EMAIL_SENDER}" =~ .*\ \<.*\> ]]
+ sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)"
+ sender_name="$(echo "${EMAIL_SENDER}" | cut -d '"' -f 2)"
+ elif [[ "${EMAIL_SENDER}" =~ ^\'.*\'\ \<.*\>$ ]]
+ then
+ # the name includes single quotes
+ sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)"
+ sender_name="$(echo "${EMAIL_SENDER}" | cut -d "'" -f 2)"
+ elif [[ "${EMAIL_SENDER}" =~ ^.*\ \<.*\>$ ]]
then
# the name does not have any quotes
- opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F '$(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)'"
- else
- # no name at all
- opts=" -f ${EMAIL_SENDER}"
+ sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)"
+ sender_name="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)"
fi
fi
+ [ ! -z "${sender_email}" ] && opts+=(-f "${sender_email}")
+ [ ! -z "${sender_name}" ] && opts+=(-F "${sender_name}")
+
if [ "${debug}" = "1" ]
then
echo >&2 "--- BEGIN sendmail command ---"
- printf >&2 "%q " "${sendmail}" -t ${opts}
+ printf >&2 "%q " "${sendmail}" -t "${opts[@]}"
echo >&2
echo >&2 "--- END sendmail command ---"
fi
- "${sendmail}" -t ${opts}
+ "${sendmail}" -t "${opts[@]}"
ret=$?
if [ ${ret} -eq 0 ]
@@ -1242,6 +1401,65 @@ send_telegram() {
}
# -----------------------------------------------------------------------------
+# Microsoft Team sender
+
+send_msteam() {
+
+ local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload
+
+ [ "${SEND_MSTEAM}" != "YES" ] && return 1
+
+ case "${status}" in
+ WARNING) icon="${MSTEAM_ICON_WARNING}" && color="${MSTEAM_COLOR_WARNING}";;
+ CRITICAL) icon="${MSTEAM_ICON_CRITICAL}" && color="${MSTEAM_COLOR_CRITICAL}";;
+ CLEAR) icon="${MSTEAM_ICON_CLEAR}" && color="${MSTEAM_COLOR_CLEAR}";;
+ *) icon="${MSTEAM_ICON_DEFAULT}" && color="${MSTEAM_COLOR_DEFAULT}";;
+ esac
+
+ for channel in ${channels}
+ do
+ ## More details are available here regarding the payload syntax options : https://docs.microsoft.com/en-us/outlook/actionable-messages/message-card-reference
+ ## Online designer : https://acdesignerbeta.azurewebsites.net/
+ payload="$(cat <<EOF
+ {
+ "@context": "http://schema.org/extensions",
+ "@type": "MessageCard",
+ "themeColor": "${color}",
+ "title": "$icon Alert ${status} from netdata for ${host}",
+ "text": "${host} ${status_message}, ${chart} (_${family}_), *${alarm}*",
+ "potentialAction": [
+ {
+ "@type": "OpenUri",
+ "name": "Netdata",
+ "targets": [
+ { "os": "default", "uri": "${goto_url}" }
+ ]
+ }
+ ]
+ }
+EOF
+ )"
+
+ # Replacing in the webhook CHANNEL string by the MS Teams channel name from conf file.
+ webhook="${webhook//CHANNEL/${channel}}"
+
+ httpcode=$(docurl -H "Content-Type: application/json" -d "${payload}" "${webhook}")
+
+ if [ "${httpcode}" = "200" ]
+ then
+ info "sent Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${webhook}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${webhook}', with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+
# slack sender
send_slack() {
@@ -1305,6 +1523,71 @@ EOF
return 1
}
+
+# -----------------------------------------------------------------------------
+# rocketchat sender
+
+send_rocketchat() {
+ local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload
+
+ [ "${SEND_ROCKETCHAT}" != "YES" ] && return 1
+
+ case "${status}" in
+ WARNING) color="warning" ;;
+ CRITICAL) color="danger" ;;
+ CLEAR) color="good" ;;
+ *) color="#777777" ;;
+ esac
+
+ for channel in ${channels}
+ do
+ payload="$(cat <<EOF
+ {
+ "channel": "#${channel}",
+ "alias": "netdata on ${host}",
+ "avatar": "${images_base_url}/images/seo-performance-128.png",
+ "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*",
+ "attachments": [
+ {
+ "color": "${color}",
+ "title": "${alarm}",
+ "title_link": "${goto_url}",
+ "text": "${info}",
+ "fields": [
+ {
+ "title": "${chart}",
+ "short": true,
+ "value": "chart"
+ },
+ {
+ "title": "${family}",
+ "short": true,
+ "value": "family"
+ }
+ ],
+ "thumb_url": "${image}",
+ "ts": "${when}"
+ }
+ ]
+ }
+EOF
+ )"
+
+ httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}")
+ if [ "${httpcode}" = "200" ]
+ then
+ info "sent rocketchat notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send rocketchat notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
# -----------------------------------------------------------------------------
# alerta sender
@@ -1471,6 +1754,36 @@ EOF
}
# -----------------------------------------------------------------------------
+# fleep sender
+
+send_fleep() {
+ local httpcode sent=0 webhooks="${1}" data message
+ if [ "${SEND_FLEEP}" = "YES" ] ; then
+ message="${host} ${status_message}, \`${chart}\` (${family}), *${alarm}*\\n${info}"
+
+ for hook in "${webhooks}" ; do
+ data="{ "
+ data="${data} 'message': '${message}', "
+ data="${data} 'user': '${FLEEP_SENDER}' "
+ data="${data} }"
+
+ httpcode=$(docurl -X POST --data "${data}" "https://fleep.io/hook/${hook}")
+
+ if [ "${httpcode}" = "200" ] ; then
+ info "sent fleep data for: ${host} ${chart}.${name} is ${status} and user '${FLEEP_SENDER}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send fleep data for: ${host} ${chart}.${name} is ${status} and user '${FLEEP_SENDER}' with HTTP error code ${httpcode}."
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+ fi
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
# irc sender
send_irc() {
@@ -1510,6 +1823,106 @@ send_irc() {
return 1
}
+# -----------------------------------------------------------------------------
+# Amazon SNS sender
+
+send_awssns() {
+ local targets="${1}" message='' sent=0 region=''
+ local default_format="${status} on ${host} at ${date}: ${chart} ${value_string}"
+
+ [ "${SEND_AWSSNS}" = "YES" ] || return 1
+
+ message=${AWSSNS_MESSAGE_FORMAT:-${default_format}}
+
+ for target in ${targets} ; do
+ # Extract the region from the target ARN. We need to explicitly specify the region so that it matches up correctly.
+ region="$(echo ${target} | cut -f 4 -d ':')"
+ ${aws} sns publish --region "${region}" --subject "${host} ${status_message} - ${name//_/ } - ${chart}" --message "${message}" --target-arn ${target} &>/dev/null
+ if [ $? = 0 ]; then
+ info "sent Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'"
+ sent=$((sent + 1))
+ else
+ error "failed to send Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'"
+ fi
+ done
+
+ [ ${sent} -gt 0 ] && return 0
+
+ return 1
+}
+
+# -----------------------------------------------------------------------------
+# syslog sender
+
+send_syslog() {
+ local facility=${SYSLOG_FACILITY:-"local6"} level='info' targets="${1}"
+ local priority='' message='' host='' port='' prefix=''
+ local temp1='' temp2=''
+
+ [ "${SEND_SYSLOG}" = "YES" ] || return 1
+
+ if [ "${status}" = "CRITICAL" ] ; then
+ level='crit'
+ elif [ "${status}" = "WARNING" ] ; then
+ level='warning'
+ fi
+
+ for target in ${targets} ; do
+ priority="${facility}.${level}"
+ message=''
+ host=''
+ port=''
+ prefix=''
+ temp1=''
+ temp2=''
+
+ prefix=$(echo ${target} | cut -d '/' -f 2)
+ temp1=$(echo ${target} | cut -d '/' -f 1)
+
+ if [ ${prefix} != ${temp1} ] ; then
+ if (echo ${temp1} | grep -q '@' ) ; then
+ temp2=$(echo ${temp1} | cut -d '@' -f 1)
+ host=$(echo ${temp1} | cut -d '@' -f 2)
+
+ if [ ${temp2} != ${host} ] ; then
+ priority=${temp2}
+ fi
+
+ port=$(echo ${host} | rev | cut -d ':' -f 1 | rev)
+
+ if ( echo ${host} | grep -E -q '\[.*\]' ) ; then
+ if ( echo ${port} | grep -q ']' ) ; then
+ port=''
+ else
+ host=$(echo ${host} | rev | cut -d ':' -f 2- | rev)
+ fi
+ else
+ if [ ${port} = ${host} ] ; then
+ port=''
+ else
+ host=$(echo ${host} | cut -d ':' -f 1)
+ fi
+ fi
+ else
+ priority=${temp1}
+ fi
+ fi
+
+ message="${prefix} ${status} on ${host} at ${date}: ${chart} ${value_string}"
+
+ if [ ${host} ] ; then
+ logger_options="${logger_options} -n ${host}"
+ if [ ${port} ] ; then
+ logger_options="${logger_options} -P ${port}"
+ fi
+ fi
+
+ ${logger} -p ${priority} ${logger_options} "${message}"
+ done
+
+ return $?
+}
+
# -----------------------------------------------------------------------------
# prepare the content of the notification
@@ -1611,6 +2024,24 @@ send_slack "${SLACK_WEBHOOK_URL}" "${to_slack}"
SENT_SLACK=$?
# -----------------------------------------------------------------------------
+# send the Microsoft notification
+
+# Microsoft team aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_msteam "${MSTEAM_WEBHOOK_URL}" "${to_msteam}"
+SENT_MSTEAM=$?
+
+# -----------------------------------------------------------------------------
+# send the rocketchat notification
+
+# rocketchat aggregates posts from the same username
+# so we use "${host} ${status}" as the bot username, to make them diff
+
+send_rocketchat "${ROCKETCHAT_WEBHOOK_URL}" "${to_rocketchat}"
+SENT_ROCKETCHAT=$?
+
+# -----------------------------------------------------------------------------
# send the alerta notification
# alerta aggregates posts from the same username
@@ -1725,6 +2156,12 @@ send_pd "${to_pd}"
SENT_PD=$?
# -----------------------------------------------------------------------------
+# send the fleep message
+
+send_fleep "${to_fleep}"
+SENT_FLEEP=$?
+
+# -----------------------------------------------------------------------------
# send the irc message
send_irc "${IRC_NICKNAME}" "${IRC_REALNAME}" "${to_irc}" "${IRC_NETWORK}" "${host}" "${host} ${status_message} - ${name//_/ } - ${chart} ----- ${alarm}
@@ -1769,6 +2206,22 @@ SENT_HIPCHAT=$?
# -----------------------------------------------------------------------------
+# send the Amazon SNS message
+
+send_awssns ${to_awssns}
+
+SENT_AWSSNS=$?
+
+
+# -----------------------------------------------------------------------------
+# send the syslog message
+
+send_syslog ${to_syslog}
+
+SENT_SYSLOG=$?
+
+
+# -----------------------------------------------------------------------------
# send the email
send_email <<EOF
@@ -1776,6 +2229,7 @@ To: ${to_email}
Subject: ${host} ${status_message} - ${name//_/ } - ${chart}
MIME-Version: 1.0
Content-Type: multipart/alternative; boundary="multipart-boundary"
+${email_thread_headers}
This is a MIME-encoded multipart message
@@ -1897,6 +2351,8 @@ if [ ${SENT_EMAIL} -eq 0 \
-o ${SENT_PUSHOVER} -eq 0 \
-o ${SENT_TELEGRAM} -eq 0 \
-o ${SENT_SLACK} -eq 0 \
+ -o ${SENT_MSTEAM} -eq 0 \
+ -o ${SENT_ROCKETCHAT} -eq 0 \
-o ${SENT_ALERTA} -eq 0 \
-o ${SENT_FLOCK} -eq 0 \
-o ${SENT_DISCORD} -eq 0 \
@@ -1907,8 +2363,11 @@ if [ ${SENT_EMAIL} -eq 0 \
-o ${SENT_PUSHBULLET} -eq 0 \
-o ${SENT_KAFKA} -eq 0 \
-o ${SENT_PD} -eq 0 \
+ -o ${SENT_FLEEP} -eq 0 \
-o ${SENT_IRC} -eq 0 \
+ -o ${SENT_AWSSNS} -eq 0 \
-o ${SENT_CUSTOM} -eq 0 \
+ -o ${SENT_SYSLOG} -eq 0 \
]
then
# we did send something
diff --git a/plugins.d/alarm-test.sh b/health/notifications/alarm-test.sh
index 9df5361a9..828aa756b 100755
--- a/plugins.d/alarm-test.sh
+++ b/health/notifications/alarm-test.sh
@@ -3,10 +3,10 @@
# netdata
# real-time performance and health monitoring, done right!
# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
+# SPDX-License-Identifier: GPL-3.0-or-later
#
# Script to test alarm notifications for netdata
dir="$(dirname "${0}")"
-${dir}/alarm-notify.sh test "${1}"
+"${dir}/alarm-notify.sh" test "${1}"
exit $?
diff --git a/plugins.d/Makefile.am b/plugins.d/Makefile.am
deleted file mode 100644
index 41e6d5366..000000000
--- a/plugins.d/Makefile.am
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
-#
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
-
-dist_plugins_DATA = \
- README.md \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- alarm-email.sh \
- alarm-notify.sh \
- alarm-test.sh \
- cgroup-name.sh \
- cgroup-network-helper.sh \
- charts.d.dryrun-helper.sh \
- charts.d.plugin \
- fping.plugin \
- node.d.plugin \
- python.d.plugin \
- tc-qos-helper.sh \
- loopsleepms.sh.inc \
- $(NULL)
diff --git a/plugins.d/README.md b/plugins.d/README.md
deleted file mode 100644
index 35b9a2d99..000000000
--- a/plugins.d/README.md
+++ /dev/null
@@ -1,236 +0,0 @@
-netdata plugins
-===============
-
-Any program that can print a few values to its standard output can become
-a netdata plugin.
-
-There are 5 lines netdata parses. lines starting with:
-
-- `CHART` - create a new chart
-- `DIMENSION` - add a dimension to the chart just created
-- `BEGIN` - initialize data collection for a chart
-- `SET` - set the value of a dimension for the initialized chart
-- `END` - complete data collection for the initialized chart
-
-a single program can produce any number of charts with any number of dimensions
-each.
-
-charts can also be added any time (not just the beginning).
-
-### command line parameters
-
-The plugin should accept just **one** parameter: **the number of seconds it is
-expected to update the values for its charts**. The value passed by netdata
-to the plugin is controlled via its configuration file (so there is not need
-for the plugin to handle this configuration option).
-
-The script can overwrite the update frequency. For example, the server may
-request per second updates, but the script may overwrite this to one update
-every 5 seconds.
-
-### environment variables
-
-There are a few environment variables that are set by `netdata` and are
-available for the plugin to use.
-
-variable|description
-:------:|:----------
-`NETDATA_CONFIG_DIR`|The directory where all netdata related configuration should be stored. If the plugin requires custom configuration, this is the place to save it.
-`NETDATA_PLUGINS_DIR`|The directory where all netdata plugins are stored.
-`NETDATA_WEB_DIR`|The directory where the web files of netdata are saved.
-`NETDATA_CACHE_DIR`|The directory where the cache files of netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory.
-`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of netdata.
-`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path.
-`NETDATA_DEBUG_FLAGS`|This is number (probably in hex starting with `0x`), that enables certain netdata debugging features.
-`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds.
-
-
-# the output of the plugin
-
-The plugin should output instructions for netdata to its output (`stdout`).
-
-## CHART
-
-`CHART` defines a new chart.
-
-the template is:
-
-> CHART type.id name title units [family [category [charttype [priority [update_every]]]]]
-
- where:
- - `type.id`
-
- uniquely identifies the chart,
- this is what will be needed to add values to the chart
-
- - `name`
-
- is the name that will be presented to the used for this chart
-
- - `title`
-
- the text above the chart
-
- - `units`
-
- the label of the vertical axis of the chart,
- all dimensions added to a chart should have the same units
- of measurement
-
- - `family`
-
- is used to group charts together
- (for example all eth0 charts should say: eth0),
- if empty or missing, the `id` part of `type.id` will be used
-
- - `category`
-
- the section under which the chart will appear
- (for example mem.ram should appear in the 'system' section),
- the special word 'none' means: do not show this chart on the home page,
- if empty or missing, the `type` part of `type.id` will be used
-
- - `charttype`
-
- one of `line`, `area` or `stacked`,
- if empty or missing, the `line` will be used
-
- - `priority`
-
- is the relative priority of the charts as rendered on the web page,
- lower numbers make the charts appear before the ones with higher numbers,
- if empty or missing, `1000` will be used
-
- - `update_every`
-
- overwrite the update frequency set by the server,
- if empty or missing, the user configured value will be used
-
-
-## DIMENSION
-
-`DIMENSION` defines a new dimension for the chart
-
-the template is:
-
-> DIMENSION id [name [algorithm [multiplier [divisor [hidden]]]]]
-
- where:
-
- - `id`
-
- the `id` of this dimension (it is a text value, not numeric),
- this will be needed later to add values to the dimension
-
- - `name`
-
- the name of the dimension as it will appear at the legend of the chart,
- if empty or missing the `id` will be used
-
- - `algorithm`
-
- one of:
-
- * `absolute`
-
- the value is to drawn as-is (interpolated to second boundary),
- if `algorithm` is empty, invalid or missing, `absolute` is used
-
- * `incremental`
-
- the value increases over time,
- the difference from the last value is presented in the chart,
- the server interpolates the value and calculates a per second figure
-
- * `percentage-of-absolute-row`
-
- the % of this value compared to the total of all dimensions
-
- * `percentage-of-incremental-row`
-
- the % of this value compared to the incremental total of
- all dimensions
-
- - `multiplier`
-
- an integer value to multiply the collected value,
- if empty or missing, `1` is used
-
- - `divisor`
-
- an integer value to divide the collected value,
- if empty or missing, `1` is used
-
- - `hidden`
-
- giving the keyword `hidden` will make this dimension hidden,
- it will take part in the calculations but will not be presented in the chart
-
-
-## data collection
-
-data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines
-
-> BEGIN type.id [microseconds]
-
- - `type.id`
-
- is the unique identification of the chart (as given in `CHART`)
-
- - `microseconds`
-
- is the number of microseconds since the last update of the chart,
- it is optional.
-
- Under heavy system load, the system may have some latency transfering
- data from the plugins to netdata via the pipe. This number improves
- accuracy significantly, since the plugin is able to calculate the
- duration between its iterations better than netdata.
-
- The first time the plugin is started, no microseconds should be given
- to netdata.
-
-> SET id = value
-
- - `id`
-
- is the unique identification of the dimension (of the chart just began)
-
- - `value`
-
- is the collected value
-
-> END
-
- END does not take any parameters, it commits the collected values to the chart.
-
-More `SET` lines may appear to update all the dimensions of the chart.
-All of them in one `BEGIN` -> `END` block.
-
-All `SET` lines within a single `BEGIN` -> `END` block have to refer to the
-same chart.
-
-If more charts need to be updated, each chart should have its own
-`BEGIN` -> `SET` -> `END` block.
-
-If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it,
-it can issue a `FLUSH`. The `FLUSH` command will instruct netdata to ignore
-the last `BEGIN` command.
-
-If a plugin does not behave properly (outputs invalid lines, or does not
-follow these guidelines), will be disabled by netdata.
-
-
-### collected values
-
-netdata will collect any **signed** value in the 64bit range:
-`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807`
-
-Internally, all calculations are made using 128 bit double precision and are
-stored in 30 bits as floating point.
-
-If a value is not collected, leave it empty, like this:
-
-`SET id = `
-
-or do not output the line at all.