summaryrefslogtreecommitdiffstats
path: root/plugins.d
diff options
context:
space:
mode:
Diffstat (limited to 'plugins.d')
-rw-r--r--plugins.d/Makefile.am23
-rw-r--r--plugins.d/Makefile.in567
-rw-r--r--plugins.d/README.md236
-rwxr-xr-xplugins.d/alarm-email.sh6
-rwxr-xr-xplugins.d/alarm-notify.sh1919
-rwxr-xr-xplugins.d/alarm-test.sh12
-rwxr-xr-xplugins.d/cgroup-name.sh189
-rwxr-xr-xplugins.d/cgroup-network-helper.sh251
-rwxr-xr-xplugins.d/charts.d.dryrun-helper.sh73
-rwxr-xr-xplugins.d/charts.d.plugin713
-rwxr-xr-xplugins.d/fping.plugin188
-rw-r--r--plugins.d/loopsleepms.sh.inc189
-rwxr-xr-xplugins.d/node.d.plugin294
-rwxr-xr-xplugins.d/python.d.plugin382
-rwxr-xr-xplugins.d/tc-qos-helper.sh303
15 files changed, 0 insertions, 5345 deletions
diff --git a/plugins.d/Makefile.am b/plugins.d/Makefile.am
deleted file mode 100644
index 41e6d536..00000000
--- a/plugins.d/Makefile.am
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
-#
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
-
-dist_plugins_DATA = \
- README.md \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- alarm-email.sh \
- alarm-notify.sh \
- alarm-test.sh \
- cgroup-name.sh \
- cgroup-network-helper.sh \
- charts.d.dryrun-helper.sh \
- charts.d.plugin \
- fping.plugin \
- node.d.plugin \
- python.d.plugin \
- tc-qos-helper.sh \
- loopsleepms.sh.inc \
- $(NULL)
diff --git a/plugins.d/Makefile.in b/plugins.d/Makefile.in
deleted file mode 100644
index 059d68f6..00000000
--- a/plugins.d/Makefile.in
+++ /dev/null
@@ -1,567 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = plugins.d
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_plugins_SCRIPTS) $(dist_plugins_DATA)
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
- $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
- $(top_srcdir)/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \
- $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pluginsdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_plugins_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-
-#
-# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
-#
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_plugins_DATA = \
- README.md \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- alarm-email.sh \
- alarm-notify.sh \
- alarm-test.sh \
- cgroup-name.sh \
- cgroup-network-helper.sh \
- charts.d.dryrun-helper.sh \
- charts.d.plugin \
- fping.plugin \
- node.d.plugin \
- python.d.plugin \
- tc-qos-helper.sh \
- loopsleepms.sh.inc \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu plugins.d/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu plugins.d/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-install-dist_pluginsDATA: $(dist_plugins_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_DATA)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pluginsdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pluginsdir)" || exit $$?; \
- done
-
-uninstall-dist_pluginsDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_DATA)'; test -n "$(pluginsdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pluginsdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_pluginsDATA install-dist_pluginsSCRIPTS
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_pluginsDATA uninstall-dist_pluginsSCRIPTS
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_pluginsDATA \
- install-dist_pluginsSCRIPTS install-dvi install-dvi-am \
- install-exec install-exec-am install-html install-html-am \
- install-info install-info-am install-man install-pdf \
- install-pdf-am install-ps install-ps-am install-strip \
- installcheck installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am \
- uninstall-dist_pluginsDATA uninstall-dist_pluginsSCRIPTS
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/plugins.d/README.md b/plugins.d/README.md
deleted file mode 100644
index 35b9a2d9..00000000
--- a/plugins.d/README.md
+++ /dev/null
@@ -1,236 +0,0 @@
-netdata plugins
-===============
-
-Any program that can print a few values to its standard output can become
-a netdata plugin.
-
-There are 5 lines netdata parses. lines starting with:
-
-- `CHART` - create a new chart
-- `DIMENSION` - add a dimension to the chart just created
-- `BEGIN` - initialize data collection for a chart
-- `SET` - set the value of a dimension for the initialized chart
-- `END` - complete data collection for the initialized chart
-
-a single program can produce any number of charts with any number of dimensions
-each.
-
-charts can also be added any time (not just the beginning).
-
-### command line parameters
-
-The plugin should accept just **one** parameter: **the number of seconds it is
-expected to update the values for its charts**. The value passed by netdata
-to the plugin is controlled via its configuration file (so there is not need
-for the plugin to handle this configuration option).
-
-The script can overwrite the update frequency. For example, the server may
-request per second updates, but the script may overwrite this to one update
-every 5 seconds.
-
-### environment variables
-
-There are a few environment variables that are set by `netdata` and are
-available for the plugin to use.
-
-variable|description
-:------:|:----------
-`NETDATA_CONFIG_DIR`|The directory where all netdata related configuration should be stored. If the plugin requires custom configuration, this is the place to save it.
-`NETDATA_PLUGINS_DIR`|The directory where all netdata plugins are stored.
-`NETDATA_WEB_DIR`|The directory where the web files of netdata are saved.
-`NETDATA_CACHE_DIR`|The directory where the cache files of netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory.
-`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of netdata.
-`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path.
-`NETDATA_DEBUG_FLAGS`|This is number (probably in hex starting with `0x`), that enables certain netdata debugging features.
-`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds.
-
-
-# the output of the plugin
-
-The plugin should output instructions for netdata to its output (`stdout`).
-
-## CHART
-
-`CHART` defines a new chart.
-
-the template is:
-
-> CHART type.id name title units [family [category [charttype [priority [update_every]]]]]
-
- where:
- - `type.id`
-
- uniquely identifies the chart,
- this is what will be needed to add values to the chart
-
- - `name`
-
- is the name that will be presented to the used for this chart
-
- - `title`
-
- the text above the chart
-
- - `units`
-
- the label of the vertical axis of the chart,
- all dimensions added to a chart should have the same units
- of measurement
-
- - `family`
-
- is used to group charts together
- (for example all eth0 charts should say: eth0),
- if empty or missing, the `id` part of `type.id` will be used
-
- - `category`
-
- the section under which the chart will appear
- (for example mem.ram should appear in the 'system' section),
- the special word 'none' means: do not show this chart on the home page,
- if empty or missing, the `type` part of `type.id` will be used
-
- - `charttype`
-
- one of `line`, `area` or `stacked`,
- if empty or missing, the `line` will be used
-
- - `priority`
-
- is the relative priority of the charts as rendered on the web page,
- lower numbers make the charts appear before the ones with higher numbers,
- if empty or missing, `1000` will be used
-
- - `update_every`
-
- overwrite the update frequency set by the server,
- if empty or missing, the user configured value will be used
-
-
-## DIMENSION
-
-`DIMENSION` defines a new dimension for the chart
-
-the template is:
-
-> DIMENSION id [name [algorithm [multiplier [divisor [hidden]]]]]
-
- where:
-
- - `id`
-
- the `id` of this dimension (it is a text value, not numeric),
- this will be needed later to add values to the dimension
-
- - `name`
-
- the name of the dimension as it will appear at the legend of the chart,
- if empty or missing the `id` will be used
-
- - `algorithm`
-
- one of:
-
- * `absolute`
-
- the value is to drawn as-is (interpolated to second boundary),
- if `algorithm` is empty, invalid or missing, `absolute` is used
-
- * `incremental`
-
- the value increases over time,
- the difference from the last value is presented in the chart,
- the server interpolates the value and calculates a per second figure
-
- * `percentage-of-absolute-row`
-
- the % of this value compared to the total of all dimensions
-
- * `percentage-of-incremental-row`
-
- the % of this value compared to the incremental total of
- all dimensions
-
- - `multiplier`
-
- an integer value to multiply the collected value,
- if empty or missing, `1` is used
-
- - `divisor`
-
- an integer value to divide the collected value,
- if empty or missing, `1` is used
-
- - `hidden`
-
- giving the keyword `hidden` will make this dimension hidden,
- it will take part in the calculations but will not be presented in the chart
-
-
-## data collection
-
-data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines
-
-> BEGIN type.id [microseconds]
-
- - `type.id`
-
- is the unique identification of the chart (as given in `CHART`)
-
- - `microseconds`
-
- is the number of microseconds since the last update of the chart,
- it is optional.
-
- Under heavy system load, the system may have some latency transfering
- data from the plugins to netdata via the pipe. This number improves
- accuracy significantly, since the plugin is able to calculate the
- duration between its iterations better than netdata.
-
- The first time the plugin is started, no microseconds should be given
- to netdata.
-
-> SET id = value
-
- - `id`
-
- is the unique identification of the dimension (of the chart just began)
-
- - `value`
-
- is the collected value
-
-> END
-
- END does not take any parameters, it commits the collected values to the chart.
-
-More `SET` lines may appear to update all the dimensions of the chart.
-All of them in one `BEGIN` -> `END` block.
-
-All `SET` lines within a single `BEGIN` -> `END` block have to refer to the
-same chart.
-
-If more charts need to be updated, each chart should have its own
-`BEGIN` -> `SET` -> `END` block.
-
-If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it,
-it can issue a `FLUSH`. The `FLUSH` command will instruct netdata to ignore
-the last `BEGIN` command.
-
-If a plugin does not behave properly (outputs invalid lines, or does not
-follow these guidelines), will be disabled by netdata.
-
-
-### collected values
-
-netdata will collect any **signed** value in the 64bit range:
-`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807`
-
-Internally, all calculations are made using 128 bit double precision and are
-stored in 30 bits as floating point.
-
-If a value is not collected, leave it empty, like this:
-
-`SET id = `
-
-or do not output the line at all.
diff --git a/plugins.d/alarm-email.sh b/plugins.d/alarm-email.sh
deleted file mode 100755
index df083c65..00000000
--- a/plugins.d/alarm-email.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env bash
-
-# OBSOLETE - REPLACED WITH
-# alarm-notify.sh
-
-${0/alarm-email.sh/alarm-notify.sh} "${@}"
diff --git a/plugins.d/alarm-notify.sh b/plugins.d/alarm-notify.sh
deleted file mode 100755
index 3e23a164..00000000
--- a/plugins.d/alarm-notify.sh
+++ /dev/null
@@ -1,1919 +0,0 @@
-#!/usr/bin/env bash
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-# Script to send alarm notifications for netdata
-#
-# Features:
-# - multiple notification methods
-# - multiple roles per alarm
-# - multiple recipients per role
-# - severity filtering per recipient
-#
-# Supported notification methods:
-# - emails by @ktsaou
-# - slack.com notifications by @ktsaou
-# - alerta.io notifications by @kattunga
-# - discordapp.com notifications by @lowfive
-# - pushover.net notifications by @ktsaou
-# - pushbullet.com push notifications by Tiago Peralta @tperalta82 #1070
-# - telegram.org notifications by @hashworks #1002
-# - twilio.com notifications by Levi Blaney @shadycuz #1211
-# - kafka notifications by @ktsaou #1342
-# - pagerduty.com notifications by Jim Cooley @jimcooley #1373
-# - messagebird.com notifications by @tech_no_logical #1453
-# - hipchat notifications by @ktsaou #1561
-# - custom notifications by @ktsaou
-
-# -----------------------------------------------------------------------------
-# testing notifications
-
-if [ \( "${1}" = "test" -o "${2}" = "test" \) -a "${#}" -le 2 ]
-then
- if [ "${2}" = "test" ]
- then
- recipient="${1}"
- else
- recipient="${2}"
- fi
-
- [ -z "${recipient}" ] && recipient="sysadmin"
-
- id=1
- last="CLEAR"
- for x in "WARNING" "CRITICAL" "CLEAR"
- do
- echo >&2
- echo >&2 "# SENDING TEST ${x} ALARM TO ROLE: ${recipient}"
-
- "${0}" "${recipient}" "$(hostname)" 1 1 "${id}" "$(date +%s)" "test_alarm" "test.chart" "test.family" "${x}" "${last}" 100 90 "${0}" 1 $((0 + id)) "units" "this is a test alarm to verify notifications work" "new value" "old value"
- if [ $? -ne 0 ]
- then
- echo >&2 "# FAILED"
- else
- echo >&2 "# OK"
- fi
-
- last="${x}"
- id=$((id + 1))
- done
-
- exit 1
-fi
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
-export LC_ALL=C
-
-# -----------------------------------------------------------------------------
-
-PROGRAM_NAME="$(basename "${0}")"
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- exit 1
-}
-
-debug=${NETDATA_ALARM_NOTIFY_DEBUG-0}
-debug() {
- [ "${debug}" = "1" ] && log DEBUG "${@}"
-}
-
-docurl() {
- if [ -z "${curl}" ]
- then
- error "\${curl} is unset."
- return 1
- fi
-
- if [ "${debug}" = "1" ]
- then
- echo >&2 "--- BEGIN curl command ---"
- printf >&2 "%q " ${curl} "${@}"
- echo >&2
- echo >&2 "--- END curl command ---"
-
- local out=$(mktemp /tmp/netdata-health-alarm-notify-XXXXXXXX)
- local code=$(${curl} ${curl_options} --write-out %{http_code} --output "${out}" --silent --show-error "${@}")
- local ret=$?
- echo >&2 "--- BEGIN received response ---"
- cat >&2 "${out}"
- echo >&2
- echo >&2 "--- END received response ---"
- echo >&2 "RECEIVED HTTP RESPONSE CODE: ${code}"
- rm "${out}"
- echo "${code}"
- return ${ret}
- fi
-
- ${curl} ${curl_options} --write-out %{http_code} --output /dev/null --silent --show-error "${@}"
- return $?
-}
-
-# -----------------------------------------------------------------------------
-# this is to be overwritten by the config file
-
-custom_sender() {
- info "not sending custom notification for ${status} of '${host}.${chart}.${name}'"
-}
-
-
-# -----------------------------------------------------------------------------
-
-# check for BASH v4+ (required for associative arrays)
-[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && \
- fatal "BASH version 4 or later is required (this is ${BASH_VERSION})."
-
-# -----------------------------------------------------------------------------
-# defaults to allow running this script by hand
-
-[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
-[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="$(dirname "${0}")/../../../../var/cache/netdata"
-[ -z "${NETDATA_REGISTRY_URL}" ] && NETDATA_REGISTRY_URL="https://registry.my-netdata.io"
-
-# -----------------------------------------------------------------------------
-# parse command line parameters
-
-roles="${1}" # the roles that should be notified for this event
-host="${2}" # the host generated this event
-unique_id="${3}" # the unique id of this event
-alarm_id="${4}" # the unique id of the alarm that generated this event
-event_id="${5}" # the incremental id of the event, for this alarm id
-when="${6}" # the timestamp this event occurred
-name="${7}" # the name of the alarm, as given in netdata health.d entries
-chart="${8}" # the name of the chart (type.id)
-family="${9}" # the family of the chart
-status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
-old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL
-value="${12}" # the current value of the alarm
-old_value="${13}" # the previous value of the alarm
-src="${14}" # the line number and file the alarm has been configured
-duration="${15}" # the duration in seconds of the previous alarm state
-non_clear_duration="${16}" # the total duration in seconds this is/was non-clear
-units="${17}" # the units of the value
-info="${18}" # a short description of the alarm
-value_string="${19}" # friendly value (with units)
-old_value_string="${20}" # friendly old value (with units)
-
-# -----------------------------------------------------------------------------
-# find a suitable hostname to use, if netdata did not supply a hostname
-
-this_host=$(hostname -s 2>/dev/null)
-[ -z "${host}" ] && host="${this_host}"
-
-# -----------------------------------------------------------------------------
-# screen statuses we don't need to send a notification
-
-# don't do anything if this is not WARNING, CRITICAL or CLEAR
-if [ "${status}" != "WARNING" -a "${status}" != "CRITICAL" -a "${status}" != "CLEAR" ]
-then
- info "not sending notification for ${status} of '${host}.${chart}.${name}'"
- exit 1
-fi
-
-# don't do anything if this is CLEAR, but it was not WARNING or CRITICAL
-if [ "${old_status}" != "WARNING" -a "${old_status}" != "CRITICAL" -a "${status}" = "CLEAR" ]
-then
- info "not sending notification for ${status} of '${host}.${chart}.${name}' (last status was ${old_status})"
- exit 1
-fi
-
-# -----------------------------------------------------------------------------
-# load configuration
-
-# By default fetch images from the global public registry.
-# This is required by default, since all notification methods need to download
-# images via the Internet, and private registries might not be reachable.
-# This can be overwritten at the configuration file.
-images_base_url="https://registry.my-netdata.io"
-
-# curl options to use
-curl_options=
-
-# needed commands
-# if empty they will be searched in the system path
-curl=
-sendmail=
-
-# enable / disable features
-SEND_SLACK="YES"
-SEND_ALERTA="YES"
-SEND_FLOCK="YES"
-SEND_DISCORD="YES"
-SEND_PUSHOVER="YES"
-SEND_TWILIO="YES"
-SEND_HIPCHAT="YES"
-SEND_MESSAGEBIRD="YES"
-SEND_KAVENEGAR="YES"
-SEND_TELEGRAM="YES"
-SEND_EMAIL="YES"
-SEND_PUSHBULLET="YES"
-SEND_KAFKA="YES"
-SEND_PD="YES"
-SEND_IRC="YES"
-SEND_CUSTOM="YES"
-
-# slack configs
-SLACK_WEBHOOK_URL=
-DEFAULT_RECIPIENT_SLACK=
-declare -A role_recipients_slack=()
-
-# alerta configs
-ALERTA_WEBHOOK_URL=
-ALERTA_API_KEY=
-DEFAULT_RECIPIENT_ALERTA=
-declare -A role_recipients_alerta=()
-
-# flock configs
-FLOCK_WEBHOOK_URL=
-DEFAULT_RECIPIENT_FLOCK=
-declare -A role_recipients_flock=()
-
-# discord configs
-DISCORD_WEBHOOK_URL=
-DEFAULT_RECIPIENT_DISCORD=
-declare -A role_recipients_discord=()
-
-# pushover configs
-PUSHOVER_APP_TOKEN=
-DEFAULT_RECIPIENT_PUSHOVER=
-declare -A role_recipients_pushover=()
-
-# pushbullet configs
-PUSHBULLET_ACCESS_TOKEN=
-PUSHBULLET_SOURCE_DEVICE=
-DEFAULT_RECIPIENT_PUSHBULLET=
-declare -A role_recipients_pushbullet=()
-
-# twilio configs
-TWILIO_ACCOUNT_SID=
-TWILIO_ACCOUNT_TOKEN=
-TWILIO_NUMBER=
-DEFAULT_RECIPIENT_TWILIO=
-declare -A role_recipients_twilio=()
-
-# hipchat configs
-HIPCHAT_SERVER=
-HIPCHAT_AUTH_TOKEN=
-DEFAULT_RECIPIENT_HIPCHAT=
-declare -A role_recipients_hipchat=()
-
-# messagebird configs
-MESSAGEBIRD_ACCESS_KEY=
-MESSAGEBIRD_NUMBER=
-DEFAULT_RECIPIENT_MESSAGEBIRD=
-declare -A role_recipients_messagebird=()
-
-# kavenegar configs
-KAVENEGAR_API_KEY=""
-KAVENEGAR_SENDER=""
-DEFAULT_RECIPIENT_KAVENEGAR=()
-declare -A role_recipients_kavenegar=""
-
-# telegram configs
-TELEGRAM_BOT_TOKEN=
-DEFAULT_RECIPIENT_TELEGRAM=
-declare -A role_recipients_telegram=()
-
-# kafka configs
-KAFKA_URL=
-KAFKA_SENDER_IP=
-
-# pagerduty.com configs
-PD_SERVICE_KEY=
-DEFAULT_RECIPIENT_PD=
-declare -A role_recipients_pd=()
-
-# custom configs
-DEFAULT_RECIPIENT_CUSTOM=
-declare -A role_recipients_custom=()
-
-# email configs
-EMAIL_SENDER=
-DEFAULT_RECIPIENT_EMAIL="root"
-EMAIL_CHARSET=$(locale charmap 2>/dev/null)
-declare -A role_recipients_email=()
-
-# irc configs
-IRC_NICKNAME=
-IRC_REALNAME=
-DEFAULT_RECIPIENT_IRC=
-IRC_NETWORK=
-declare -A role_recipients_irc=()
-
-# load the user configuration
-# this will overwrite the variables above
-if [ -f "${NETDATA_CONFIG_DIR}/health_alarm_notify.conf" ]
- then
- source "${NETDATA_CONFIG_DIR}/health_alarm_notify.conf"
-else
- error "Cannot find file ${NETDATA_CONFIG_DIR}/health_alarm_notify.conf. Using internal defaults."
-fi
-
-# If we didn't autodetect the character set for e-mail and it wasn't
-# set by the user, we need to set it to a reasonable default. UTF-8
-# should be correct for almost all modern UNIX systems.
-if [ -z ${EMAIL_CHARSET} ]
- then
- EMAIL_CHARSET="UTF-8"
-fi
-
-# -----------------------------------------------------------------------------
-# filter a recipient based on alarm event severity
-
-filter_recipient_by_criticality() {
- local method="${1}" x="${2}" r s
- shift
-
- r="${x/|*/}" # the recipient
- s="${x/*|/}" # the severity required for notifying this recipient
-
- # no severity filtering for this person
- [ "${r}" = "${s}" ] && return 0
-
- # the severity is invalid
- s="${s^^}"
- if [ "${s}" != "CRITICAL" ]
- then
- error "SEVERITY FILTERING for ${x} VIA ${method}: invalid severity '${s,,}', only 'critical' is supported."
- return 0
- fi
-
- # create the status tracking directory for this user
- [ ! -d "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" ] && \
- mkdir -p "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}"
-
- case "${status}" in
- CRITICAL)
- # make sure he will get future notifications for this alarm too
- touch "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}"
- debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: the alarm is CRITICAL (will now receive next status change)"
- return 0
- ;;
-
- WARNING)
- if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ]
- then
- # we do not remove the file, so that he will get future notifications of this alarm
- debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm in the past (will still receive next status change)"
- return 0
- fi
- ;;
-
- *)
- if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ]
- then
- # remove the file, so that he will only receive notifications for CRITICAL states for this alarm
- rm "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}"
- debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm (will only receive CRITICAL notifications from now on)"
- return 0
- fi
- ;;
- esac
-
- debug "SEVERITY FILTERING for ${x} VIA ${method}: BLOCK: recipient should not receive this notification"
- return 1
-}
-
-# -----------------------------------------------------------------------------
-# find the recipients' addresses per method
-
-declare -A arr_slack=()
-declare -A arr_alerta=()
-declare -A arr_flock=()
-declare -A arr_discord=()
-declare -A arr_pushover=()
-declare -A arr_pushbullet=()
-declare -A arr_twilio=()
-declare -A arr_hipchat=()
-declare -A arr_telegram=()
-declare -A arr_pd=()
-declare -A arr_email=()
-declare -A arr_custom=()
-declare -A arr_messagebird=()
-declare -A arr_kavenegar=()
-declare -A arr_irc=()
-
-# netdata may call us with multiple roles, and roles may have multiple but
-# overlapping recipients - so, here we find the unique recipients.
-for x in ${roles//,/ }
-do
- # the roles 'silent' and 'disabled' mean:
- # don't send a notification for this role
- [ "${x}" = "silent" -o "${x}" = "disabled" ] && continue
-
- # email
- a="${role_recipients_email[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_EMAIL}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality email "${r}" && arr_email[${r/|*/}]="1"
- done
-
- # pushover
- a="${role_recipients_pushover[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PUSHOVER}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality pushover "${r}" && arr_pushover[${r/|*/}]="1"
- done
-
- # pushbullet
- a="${role_recipients_pushbullet[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PUSHBULLET}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality pushbullet "${r}" && arr_pushbullet[${r/|*/}]="1"
- done
-
- # twilio
- a="${role_recipients_twilio[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_TWILIO}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality twilio "${r}" && arr_twilio[${r/|*/}]="1"
- done
-
- # hipchat
- a="${role_recipients_hipchat[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_HIPCHAT}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality hipchat "${r}" && arr_hipchat[${r/|*/}]="1"
- done
-
- # messagebird
- a="${role_recipients_messagebird[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_MESSAGEBIRD}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality messagebird "${r}" && arr_messagebird[${r/|*/}]="1"
- done
-
- # kavenegar
- a="${role_recipients_kavenegar[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_KAVENEGAR}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality kavenegar "${r}" && arr_kavenegar[${r/|*/}]="1"
- done
-
- # telegram
- a="${role_recipients_telegram[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_TELEGRAM}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality telegram "${r}" && arr_telegram[${r/|*/}]="1"
- done
-
- # slack
- a="${role_recipients_slack[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_SLACK}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality slack "${r}" && arr_slack[${r/|*/}]="1"
- done
-
- # alerta
- a="${role_recipients_alerta[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_ALERTA}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality alerta "${r}" && arr_alerta[${r/|*/}]="1"
- done
-
- # flock
- a="${role_recipients_flock[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_FLOCK}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality flock "${r}" && arr_flock[${r/|*/}]="1"
- done
-
- # discord
- a="${role_recipients_discord[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_DISCORD}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality discord "${r}" && arr_discord[${r/|*/}]="1"
- done
-
- # pagerduty.com
- a="${role_recipients_pd[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PD}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality pd "${r}" && arr_pd[${r/|*/}]="1"
- done
-
- # irc
- a="${role_recipients_irc[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_IRC}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality irc "${r}" && arr_irc[${r/|*/}]="1"
- done
-
- # custom
- a="${role_recipients_custom[${x}]}"
- [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_CUSTOM}"
- for r in ${a//,/ }
- do
- [ "${r}" != "disabled" ] && filter_recipient_by_criticality custom "${r}" && arr_custom[${r/|*/}]="1"
- done
-
-done
-
-# build the list of slack recipients (channels)
-to_slack="${!arr_slack[*]}"
-[ -z "${to_slack}" ] && SEND_SLACK="NO"
-
-# build the list of alerta recipients (channels)
-to_alerta="${!arr_alerta[*]}"
-[ -z "${to_alerta}" ] && SEND_ALERTA="NO"
-
-# build the list of flock recipients (channels)
-to_flock="${!arr_flock[*]}"
-[ -z "${to_flock}" ] && SEND_FLOCK="NO"
-
-# build the list of discord recipients (channels)
-to_discord="${!arr_discord[*]}"
-[ -z "${to_discord}" ] && SEND_DISCORD="NO"
-
-# build the list of pushover recipients (user tokens)
-to_pushover="${!arr_pushover[*]}"
-[ -z "${to_pushover}" ] && SEND_PUSHOVER="NO"
-
-# build the list of pushbulet recipients (user tokens)
-to_pushbullet="${!arr_pushbullet[*]}"
-[ -z "${to_pushbullet}" ] && SEND_PUSHBULLET="NO"
-
-# build the list of twilio recipients (phone numbers)
-to_twilio="${!arr_twilio[*]}"
-[ -z "${to_twilio}" ] && SEND_TWILIO="NO"
-
-# build the list of hipchat recipients (rooms)
-to_hipchat="${!arr_hipchat[*]}"
-[ -z "${to_hipchat}" ] && SEND_HIPCHAT="NO"
-
-# build the list of messagebird recipients (phone numbers)
-to_messagebird="${!arr_messagebird[*]}"
-[ -z "${to_messagebird}" ] && SEND_MESSAGEBIRD="NO"
-
-# build the list of kavenegar recipients (phone numbers)
-to_kavenegar="${!arr_kavenegar[*]}"
-[ -z "${to_kavenegar}" ] && SEND_KAVENEGAR="NO"
-
-# check array of telegram recipients (chat ids)
-to_telegram="${!arr_telegram[*]}"
-[ -z "${to_telegram}" ] && SEND_TELEGRAM="NO"
-
-# build the list of pagerduty recipients (service keys)
-to_pd="${!arr_pd[*]}"
-[ -z "${to_pd}" ] && SEND_PD="NO"
-
-# build the list of custom recipients
-to_custom="${!arr_custom[*]}"
-[ -z "${to_custom}" ] && SEND_CUSTOM="NO"
-
-# build the list of email recipients (email addresses)
-to_email=
-for x in "${!arr_email[@]}"
-do
- [ ! -z "${to_email}" ] && to_email="${to_email}, "
- to_email="${to_email}${x}"
-done
-[ -z "${to_email}" ] && SEND_EMAIL="NO"
-
-# build the list of irc recipients (channels)
-to_irc="${!arr_irc[*]}"
-[ -z "${to_irc}" ] && SEND_IRC="NO"
-
-# -----------------------------------------------------------------------------
-# verify the delivery methods supported
-
-# check slack
-[ -z "${SLACK_WEBHOOK_URL}" ] && SEND_SLACK="NO"
-
-# check alerta
-[ -z "${ALERTA_WEBHOOK_URL}" ] && SEND_ALERTA="NO"
-
-# check flock
-[ -z "${FLOCK_WEBHOOK_URL}" ] && SEND_FLOCK="NO"
-
-# check discord
-[ -z "${DISCORD_WEBHOOK_URL}" ] && SEND_DISCORD="NO"
-
-# check pushover
-[ -z "${PUSHOVER_APP_TOKEN}" ] && SEND_PUSHOVER="NO"
-
-# check pushbullet
-[ -z "${PUSHBULLET_ACCESS_TOKEN}" ] && SEND_PUSHBULLET="NO"
-
-# check twilio
-[ -z "${TWILIO_ACCOUNT_TOKEN}" -o -z "${TWILIO_ACCOUNT_SID}" -o -z "${TWILIO_NUMBER}" ] && SEND_TWILIO="NO"
-
-# check hipchat
-[ -z "${HIPCHAT_AUTH_TOKEN}" ] && SEND_HIPCHAT="NO"
-
-# check messagebird
-[ -z "${MESSAGEBIRD_ACCESS_KEY}" -o -z "${MESSAGEBIRD_NUMBER}" ] && SEND_MESSAGEBIRD="NO"
-
-# check kavenegar
-[ -z "${KAVENEGAR_API_KEY}" -o -z "${KAVENEGAR_SENDER}" ] && SEND_KAVENEGAR="NO"
-
-# check telegram
-[ -z "${TELEGRAM_BOT_TOKEN}" ] && SEND_TELEGRAM="NO"
-
-# check kafka
-[ -z "${KAFKA_URL}" -o -z "${KAFKA_SENDER_IP}" ] && SEND_KAFKA="NO"
-
-# check irc
-[ -z "${IRC_NETWORK}" ] && SEND_IRC="NO"
-
-# check pagerduty.com
-# if we need pd-send, check for the pd-send command
-# https://www.pagerduty.com/docs/guides/agent-install-guide/
-if [ "${SEND_PD}" = "YES" ]
- then
- pd_send="$(which pd-send 2>/dev/null || command -v pd-send 2>/dev/null)"
- if [ -z "${pd_send}" ]
- then
- error "Cannot find pd-send command in the system path. Disabling pagerduty.com notifications."
- SEND_PD="NO"
- fi
-fi
-
-# if we need curl, check for the curl command
-if [ \( \
- "${SEND_PUSHOVER}" = "YES" \
- -o "${SEND_SLACK}" = "YES" \
- -o "${SEND_ALERTA}" = "YES" \
- -o "${SEND_FLOCK}" = "YES" \
- -o "${SEND_DISCORD}" = "YES" \
- -o "${SEND_HIPCHAT}" = "YES" \
- -o "${SEND_TWILIO}" = "YES" \
- -o "${SEND_MESSAGEBIRD}" = "YES" \
- -o "${SEND_KAVENEGAR}" = "YES" \
- -o "${SEND_TELEGRAM}" = "YES" \
- -o "${SEND_PUSHBULLET}" = "YES" \
- -o "${SEND_KAFKA}" = "YES" \
- -o "${SEND_CUSTOM}" = "YES" \
- \) -a -z "${curl}" ]
- then
- curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)"
- if [ -z "${curl}" ]
- then
- error "Cannot find curl command in the system path. Disabling all curl based notifications."
- SEND_PUSHOVER="NO"
- SEND_PUSHBULLET="NO"
- SEND_TELEGRAM="NO"
- SEND_SLACK="NO"
- SEND_ALERTA="NO"
- SEND_FLOCK="NO"
- SEND_DISCORD="NO"
- SEND_TWILIO="NO"
- SEND_HIPCHAT="NO"
- SEND_MESSAGEBIRD="NO"
- SEND_KAVENEGAR="NO"
- SEND_KAFKA="NO"
- SEND_CUSTOM="NO"
- fi
-fi
-
-# if we need sendmail, check for the sendmail command
-if [ "${SEND_EMAIL}" = "YES" -a -z "${sendmail}" ]
- then
- sendmail="$(which sendmail 2>/dev/null || command -v sendmail 2>/dev/null)"
- if [ -z "${sendmail}" ]
- then
- debug "Cannot find sendmail command in the system path. Disabling email notifications."
- SEND_EMAIL="NO"
- fi
-fi
-
-# check that we have at least a method enabled
-if [ "${SEND_EMAIL}" != "YES" \
- -a "${SEND_PUSHOVER}" != "YES" \
- -a "${SEND_TELEGRAM}" != "YES" \
- -a "${SEND_SLACK}" != "YES" \
- -a "${SEND_ALERTA}" != "YES" \
- -a "${SEND_FLOCK}" != "YES" \
- -a "${SEND_DISCORD}" != "YES" \
- -a "${SEND_TWILIO}" != "YES" \
- -a "${SEND_HIPCHAT}" != "YES" \
- -a "${SEND_MESSAGEBIRD}" != "YES" \
- -a "${SEND_KAVENEGAR}" != "YES" \
- -a "${SEND_PUSHBULLET}" != "YES" \
- -a "${SEND_KAFKA}" != "YES" \
- -a "${SEND_PD}" != "YES" \
- -a "${SEND_CUSTOM}" != "YES" \
- -a "${SEND_IRC}" != "YES" \
- ]
- then
- fatal "All notification methods are disabled. Not sending notification for host '${host}', chart '${chart}' to '${roles}' for '${name}' = '${value}' for status '${status}'."
-fi
-
-# -----------------------------------------------------------------------------
-# get the date the alarm happened
-
-date="$(date --date=@${when} 2>/dev/null)"
-[ -z "${date}" ] && date="$(date 2>/dev/null)"
-
-# -----------------------------------------------------------------------------
-# function to URL encode a string
-
-urlencode() {
- local string="${1}" strlen encoded pos c o
-
- strlen=${#string}
- for (( pos=0 ; pos<strlen ; pos++ ))
- do
- c=${string:${pos}:1}
- case "${c}" in
- [-_.~a-zA-Z0-9])
- o="${c}"
- ;;
-
- *)
- printf -v o '%%%02x' "'${c}"
- ;;
- esac
- encoded+="${o}"
- done
-
- REPLY="${encoded}"
- echo "${REPLY}"
-}
-
-# -----------------------------------------------------------------------------
-# function to convert a duration in seconds, to a human readable duration
-# using DAYS, MINUTES, SECONDS
-
-duration4human() {
- local s="${1}" d=0 h=0 m=0 ds="day" hs="hour" ms="minute" ss="second" ret
- d=$(( s / 86400 ))
- s=$(( s - (d * 86400) ))
- h=$(( s / 3600 ))
- s=$(( s - (h * 3600) ))
- m=$(( s / 60 ))
- s=$(( s - (m * 60) ))
-
- if [ ${d} -gt 0 ]
- then
- [ ${m} -ge 30 ] && h=$(( h + 1 ))
- [ ${d} -gt 1 ] && ds="days"
- [ ${h} -gt 1 ] && hs="hours"
- if [ ${h} -gt 0 ]
- then
- ret="${d} ${ds} and ${h} ${hs}"
- else
- ret="${d} ${ds}"
- fi
- elif [ ${h} -gt 0 ]
- then
- [ ${s} -ge 30 ] && m=$(( m + 1 ))
- [ ${h} -gt 1 ] && hs="hours"
- [ ${m} -gt 1 ] && ms="minutes"
- if [ ${m} -gt 0 ]
- then
- ret="${h} ${hs} and ${m} ${ms}"
- else
- ret="${h} ${hs}"
- fi
- elif [ ${m} -gt 0 ]
- then
- [ ${m} -gt 1 ] && ms="minutes"
- [ ${s} -gt 1 ] && ss="seconds"
- if [ ${s} -gt 0 ]
- then
- ret="${m} ${ms} and ${s} ${ss}"
- else
- ret="${m} ${ms}"
- fi
- else
- [ ${s} -gt 1 ] && ss="seconds"
- ret="${s} ${ss}"
- fi
-
- REPLY="${ret}"
- echo "${REPLY}"
-}
-
-# -----------------------------------------------------------------------------
-# email sender
-
-send_email() {
- local ret= opts=
- if [ "${SEND_EMAIL}" = "YES" ]
- then
-
- if [ ! -z "${EMAIL_SENDER}" ]
- then
- if [[ "${EMAIL_SENDER}" =~ \".*\"\ \<.*\> ]]
- then
- # the name includes single quotes
- opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F $(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)"
- elif [[ "${EMAIL_SENDER}" =~ \'.*\'\ \<.*\> ]]
- then
- # the name includes double quotes
- opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F $(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)"
- elif [[ "${EMAIL_SENDER}" =~ .*\ \<.*\> ]]
- then
- # the name does not have any quotes
- opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F '$(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)'"
- else
- # no name at all
- opts=" -f ${EMAIL_SENDER}"
- fi
- fi
-
- if [ "${debug}" = "1" ]
- then
- echo >&2 "--- BEGIN sendmail command ---"
- printf >&2 "%q " "${sendmail}" -t ${opts}
- echo >&2
- echo >&2 "--- END sendmail command ---"
- fi
-
- "${sendmail}" -t ${opts}
- ret=$?
-
- if [ ${ret} -eq 0 ]
- then
- info "sent email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}'"
- return 0
- else
- error "failed to send email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}' with error code ${ret}."
- return 1
- fi
- fi
-
- return 1
-}
-
-# -----------------------------------------------------------------------------
-# pushover sender
-
-send_pushover() {
- local apptoken="${1}" usertokens="${2}" when="${3}" url="${4}" status="${5}" title="${6}" message="${7}" httpcode sent=0 user priority
-
- if [ "${SEND_PUSHOVER}" = "YES" -a ! -z "${apptoken}" -a ! -z "${usertokens}" -a ! -z "${title}" -a ! -z "${message}" ]
- then
-
- # https://pushover.net/api
- priority=-2
- case "${status}" in
- CLEAR) priority=-1;; # low priority: no sound or vibration
- WARNING) priority=0;; # normal priority: respect quiet hours
- CRITICAL) priority=1;; # high priority: bypass quiet hours
- *) priority=-2;; # lowest priority: no notification at all
- esac
-
- for user in ${usertokens}
- do
- httpcode=$(docurl \
- --form-string "token=${apptoken}" \
- --form-string "user=${user}" \
- --form-string "html=1" \
- --form-string "title=${title}" \
- --form-string "message=${message}" \
- --form-string "timestamp=${when}" \
- --form-string "url=${url}" \
- --form-string "url_title=Open netdata dashboard to view the alarm" \
- --form-string "priority=${priority}" \
- https://api.pushover.net/1/messages.json)
-
- if [ "${httpcode}" = "200" ]
- then
- info "sent pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}'"
- sent=$((sent + 1))
- else
- error "failed to send pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}."
- fi
- done
-
- [ ${sent} -gt 0 ] && return 0
- fi
-
- return 1
-}
-
-# -----------------------------------------------------------------------------
-# pushbullet sender
-
-send_pushbullet() {
- local userapikey="${1}" source_device="${2}" recipients="${3}" url="${4}" title="${5}" message="${6}" httpcode sent=0 user
- if [ "${SEND_PUSHBULLET}" = "YES" -a ! -z "${userapikey}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ]
- then
- #https://docs.pushbullet.com/#create-push
- for user in ${recipients}
- do
- httpcode=$(docurl \
- --header 'Access-Token: '${userapikey}'' \
- --header 'Content-Type: application/json' \
- --data-binary @<(cat <<EOF
- {"title": "${title}",
- "type": "link",
- "email": "${user}",
- "body": "$( echo -n ${message})",
- "url": "${url}",
- "source_device_iden": "${source_device}"}
-EOF
- ) "https://api.pushbullet.com/v2/pushes" -X POST)
-
- if [ "${httpcode}" = "200" ]
- then
- info "sent pushbullet notification for: ${host} ${chart}.${name} is ${status} to '${user}'"
- sent=$((sent + 1))
- else
- error "failed to send pushbullet notification for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}."
- fi
- done
-
- [ ${sent} -gt 0 ] && return 0
- fi
-
- return 1
-}
-
-# -----------------------------------------------------------------------------
-# kafka sender
-
-send_kafka() {
- local httpcode sent=0
- if [ "${SEND_KAFKA}" = "YES" ]
- then
- httpcode=$(docurl -X POST \
- --data "{host_ip:\"${KAFKA_SENDER_IP}\",when:${when},name:\"${name}\",chart:\"${chart}\",family:\"${family}\",status:\"${status}\",old_status:\"${old_status}\",value:${value},old_value:${old_value},duration:${duration},non_clear_duration:${non_clear_duration},units:\"${units}\",info:\"${info}\"}" \
- "${KAFKA_URL}")
-
- if [ "${httpcode}" = "204" ]
- then
- info "sent kafka data for: ${host} ${chart}.${name} is ${status} and ip '${KAFKA_SENDER_IP}'"
- sent=$((sent + 1))
- else
- error "failed to send kafka data for: ${host} ${chart}.${name} is ${status} and ip '${KAFKA_SENDER_IP}' with HTTP error code ${httpcode}."
- fi
-
- [ ${sent} -gt 0 ] && return 0
- fi
-
- return 1
-}
-
-# -----------------------------------------------------------------------------
-# pagerduty.com sender
-
-send_pd() {
- local recipients="${1}" sent=0
- unset t
- case ${status} in
- CLEAR) t='resolve';;
- WARNING) t='trigger';;
- CRITICAL) t='trigger';;
- esac
-
- if [ ${SEND_PD} = "YES" -a ! -z "${t}" ]
- then
- for PD_SERVICE_KEY in ${recipients}
- do
- d="${status} ${name} = ${value_string} - ${host}, ${family}"
- ${pd_send} -k ${PD_SERVICE_KEY} \
- -t ${t} \
- -d "${d}" \
- -i ${host}:${chart}:${name} \
- -f 'info'="${info}" \
- -f 'value_w_units'="${value_string}" \
- -f 'when'="${when}" \
- -f 'duration'="${duration}" \
- -f 'roles'="${roles}" \
- -f 'host'="${host}" \
- -f 'unique_id'="${unique_id}" \
- -f 'alarm_id'="${alarm_id}" \
- -f 'event_id'="${event_id}" \
- -f 'name'="${name}" \
- -f 'chart'="${chart}" \
- -f 'family'="${family}" \
- -f 'status'="${status}" \
- -f 'old_status'="${old_status}" \
- -f 'value'="${value}" \
- -f 'old_value'="${old_value}" \
- -f 'src'="${src}" \
- -f 'non_clear_duration'="${non_clear_duration}" \
- -f 'units'="${units}"
- retval=$?
- if [ ${retval} -eq 0 ]
- then
- info "sent pagerduty.com notification for host ${host} ${chart}.${name} using service key ${PD_SERVICE_KEY::-26}....: ${d}"
- sent=$((sent + 1))
- else
- error "failed to send pagerduty.com notification for ${host} ${chart}.${name} using service key ${PD_SERVICE_KEY::-26}.... (error code ${retval}): ${d}"
- fi
- done
-
- [ ${sent} -gt 0 ] && return 0
- fi
-
- return 1
-}
-
-# -----------------------------------------------------------------------------
-# twilio sender
-
-send_twilio() {
- local accountsid="${1}" accounttoken="${2}" twilionumber="${3}" recipients="${4}" title="${5}" message="${6}" httpcode sent=0 user
- if [ "${SEND_TWILIO}" = "YES" -a ! -z "${accountsid}" -a ! -z "${accounttoken}" -a ! -z "${twilionumber}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ]
- then
- #https://www.twilio.com/packages/labs/code/bash/twilio-sms
- for user in ${recipients}
- do
- httpcode=$(docurl -X POST \
- --data-urlencode "From=${twilionumber}" \
- --data-urlencode "To=${user}" \
- --data-urlencode "Body=${title} ${message}" \
- -u "${accountsid}:${accounttoken}" \
- "https://api.twilio.com/2010-04-01/Accounts/${accountsid}/Messages.json")
-
- if [ "${httpcode}" = "201" ]
- then
- info "sent Twilio SMS for: ${host} ${chart}.${name} is ${status} to '${user}'"
- sent=$((sent + 1))
- else
- error "failed to send Twilio SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}."
- fi
- done
-
- [ ${sent} -gt 0 ] && return 0
- fi
-
- return 1
-}
-
-
-# -----------------------------------------------------------------------------
-# hipchat sender
-
-send_hipchat() {
- local authtoken="${1}" recipients="${2}" message="${3}" httpcode sent=0 room color sender msg_format notify
-
- # remove <small></small> from the message
- message="${message//<small>/}"
- message="${message//<\/small>/}"
-
- if [ "${SEND_HIPCHAT}" = "YES" -a ! -z "${HIPCHAT_SERVER}" -a ! -z "${authtoken}" -a ! -z "${recipients}" -a ! -z "${message}" ]
- then
- # A label to be shown in addition to the sender's name
- # Valid length range: 0 - 64.
- sender="netdata"
-
- # Valid values: html, text.
- # Defaults to 'html'.
- msg_format="html"
-
- # Background color for message. Valid values: yellow, green, red, purple, gray, random. Defaults to 'yellow'.
- case "${status}" in
- WARNING) color="yellow" ;;
- CRITICAL) color="red" ;;
- CLEAR) color="green" ;;
- *) color="gray" ;;
- esac
-
- # Whether this message should trigger a user notification (change the tab color, play a sound, notify mobile phones, etc).
- # Each recipient's notification preferences are taken into account.
- # Defaults to false.
- notify="true"
-
- for room in ${recipients}
- do
- httpcode=$(docurl -X POST \
- -H "Content-type: application/json" \
- -H "Authorization: Bearer ${authtoken}" \
- -d "{\"color\": \"${color}\", \"from\": \"${host}\", \"message_format\": \"${msg_format}\", \"message\": \"${message}\", \"notify\": \"${notify}\"}" \
- "https://${HIPCHAT_SERVER}/v2/room/${room}/notification")
-
- if [ "${httpcode}" = "204" ]
- then
- info "sent HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}'"
- sent=$((sent + 1))
- else
- error "failed to send HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}' with HTTP error code ${httpcode}."
- fi
- done
-
- [ ${sent} -gt 0 ] && return 0
- fi
-
- return 1
-}
-
-
-# -----------------------------------------------------------------------------
-# messagebird sender
-
-send_messagebird() {
- local accesskey="${1}" messagebirdnumber="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user
- if [ "${SEND_MESSAGEBIRD}" = "YES" -a ! -z "${accesskey}" -a ! -z "${messagebirdnumber}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ]
- then
- #https://developers.messagebird.com/docs/messaging
- for user in ${recipients}
- do
- httpcode=$(docurl -X POST \
- --data-urlencode "originator=${messagebirdnumber}" \
- --data-urlencode "recipients=${user}" \
- --data-urlencode "body=${title} ${message}" \
- --data-urlencode "datacoding=auto" \
- -H "Authorization: AccessKey ${accesskey}" \
- "https://rest.messagebird.com/messages")
-
- if [ "${httpcode}" = "201" ]
- then
- info "sent Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}'"
- sent=$((sent + 1))
- else
- error "failed to send Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}."
- fi
- done
-
- [ ${sent} -gt 0 ] && return 0
- fi
-
- return 1
-}
-
-# -----------------------------------------------------------------------------
-# kavenegar sender
-
-send_kavenegar() {
- local API_KEY="${1}" kavenegarsender="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user
- if [ "${SEND_KAVENEGAR}" = "YES" -a ! -z "${API_KEY}" -a ! -z "${kavenegarsender}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ]
- then
- # http://api.kavenegar.com/v1/{API-KEY}/sms/send.json
- for user in ${recipients}
- do
- httpcode=$(docurl -X POST http://api.kavenegar.com/v1/${API_KEY}/sms/send.json \
- --data-urlencode "sender=${kavenegarsender}" \
- --data-urlencode "receptor=${user}" \
- --data-urlencode "message=${title} ${message}")
-
- if [ "${httpcode}" = "201" ]
- then
- info "sent Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}'"
- sent=$((sent + 1))
- else
- error "failed to send Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}."
- fi
- done
-
- [ ${sent} -gt 0 ] && return 0
- fi
-
- return 1
-}
-
-# -----------------------------------------------------------------------------
-# telegram sender
-
-send_telegram() {
- local bottoken="${1}" chatids="${2}" message="${3}" httpcode sent=0 chatid emoji disableNotification=""
-
- if [ "${status}" = "CLEAR" ]; then disableNotification="--data-urlencode disable_notification=true"; fi
-
- case "${status}" in
- WARNING) emoji="⚠️" ;;
- CRITICAL) emoji="🔴" ;;
- CLEAR) emoji="✅" ;;
- *) emoji="⚪️" ;;
- esac
-
- if [ "${SEND_TELEGRAM}" = "YES" -a ! -z "${bottoken}" -a ! -z "${chatids}" -a ! -z "${message}" ];
- then
- for chatid in ${chatids}
- do
- # https://core.telegram.org/bots/api#sendmessage
- httpcode=$(docurl ${disableNotification} \
- --data-urlencode "parse_mode=HTML" \
- --data-urlencode "disable_web_page_preview=true" \
- --data-urlencode "text=${emoji} ${message}" \
- "https://api.telegram.org/bot${bottoken}/sendMessage?chat_id=${chatid}")
-
- if [ "${httpcode}" = "200" ]
- then
- info "sent telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}'"
- sent=$((sent + 1))
- elif [ "${httpcode}" = "401" ]
- then
- error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}': Wrong bot token."
- else
- error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}' with HTTP error code ${httpcode}."
- fi
- done
-
- [ ${sent} -gt 0 ] && return 0
- fi
-
- return 1
-}
-
-# -----------------------------------------------------------------------------
-# slack sender
-
-send_slack() {
- local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload
-
- [ "${SEND_SLACK}" != "YES" ] && return 1
-
- case "${status}" in
- WARNING) color="warning" ;;
- CRITICAL) color="danger" ;;
- CLEAR) color="good" ;;
- *) color="#777777" ;;
- esac
-
- for channel in ${channels}
- do
- payload="$(cat <<EOF
- {
- "channel": "#${channel}",
- "username": "netdata on ${host}",
- "icon_url": "${images_base_url}/images/seo-performance-128.png",
- "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*",
- "attachments": [
- {
- "fallback": "${alarm} - ${chart} (${family}) - ${info}",
- "color": "${color}",
- "title": "${alarm}",
- "title_link": "${goto_url}",
- "text": "${info}",
- "fields": [
- {
- "title": "${chart}",
- "short": true
- },
- {
- "title": "${family}",
- "short": true
- }
- ],
- "thumb_url": "${image}",
- "footer": "by <${goto_url}|${this_host}>",
- "ts": ${when}
- }
- ]
- }
-EOF
- )"
-
- httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}")
- if [ "${httpcode}" = "200" ]
- then
- info "sent slack notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
- sent=$((sent + 1))
- else
- error "failed to send slack notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}."
- fi
- done
-
- [ ${sent} -gt 0 ] && return 0
-
- return 1
-}
-
-# -----------------------------------------------------------------------------
-# alerta sender
-
-send_alerta() {
- local webhook="${1}" channels="${2}" httpcode sent=0 channel severity content
-
- [ "${SEND_ALERTA}" != "YES" ] && return 1
-
- case "${status}" in
- WARNING) severity="warning" ;;
- CRITICAL) severity="critical" ;;
- CLEAR) severity="cleared" ;;
- *) severity="unknown" ;;
- esac
-
- info=$( echo -n ${info})
-
- # the "event" property must be unique and repetible between states to let alerta do automatic correlation using severity value
- for channel in ${channels}
- do
- content="{"
- content="$content \"environment\": \"${channel}\","
- content="$content \"service\": [\"${host}\"],"
- content="$content \"resource\": \"${host}\","
- content="$content \"event\": \"${name}.${chart} (${family})\","
- content="$content \"severity\": \"${severity}\","
- content="$content \"value\": \"${alarm}\","
- content="$content \"text\": \"${info}\""
- content="$content }"
-
-
- httpcode=$(docurl -X POST "${webhook}/alert" -H "Content-Type: application/json" -H "Authorization: Key $ALERTA_API_KEY" -d "$content" )
-
- if [[ "${httpcode}" = "200" || "${httpcode}" = "201" ]]
- then
- info "sent alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
- sent=$((sent + 1))
- else
- error "failed to send alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}."
- fi
- done
-
- [ ${sent} -gt 0 ] && return 0
-
- return 1
-}
-
-# -----------------------------------------------------------------------------
-# flock sender
-
-send_flock() {
- local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload
-
- [ "${SEND_FLOCK}" != "YES" ] && return 1
-
- case "${status}" in
- WARNING) color="warning" ;;
- CRITICAL) color="danger" ;;
- CLEAR) color="good" ;;
- *) color="#777777" ;;
- esac
-
- for channel in ${channels}
- do
- httpcode=$(docurl -X POST "${webhook}" -H "Content-Type: application/json" -d "{
- \"sendAs\": {
- \"name\" : \"netdata on ${host}\",
- \"profileImage\" : \"${images_base_url}/images/seo-performance-128.png\"
- },
- \"text\": \"${host} *${status_message}*\",
- \"timestamp\": \"${when}\",
- \"attachments\": [
- {
- \"description\": \"${chart} (${family}) - ${info}\",
- \"color\": \"${color}\",
- \"title\": \"${alarm}\",
- \"url\": \"${goto_url}\",
- \"text\": \"${info}\",
- \"views\": {
- \"image\": {
- \"original\": { \"src\": \"${image}\", \"width\": 400, \"height\": 400 },
- \"thumbnail\": { \"src\": \"${image}\", \"width\": 50, \"height\": 50 },
- \"filename\": \"${image}\"
- }
- }
- }
- ]
- }" )
- if [ "${httpcode}" = "200" ]
- then
- info "sent flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
- sent=$((sent + 1))
- else
- error "failed to send flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}."
- fi
- done
-
- [ ${sent} -gt 0 ] && return 0
-
- return 1
-}
-
-# -----------------------------------------------------------------------------
-# discord sender
-
-send_discord() {
- local webhook="${1}/slack" channels="${2}" httpcode sent=0 channel color payload username
-
- [ "${SEND_DISCORD}" != "YES" ] && return 1
-
- case "${status}" in
- WARNING) color="warning" ;;
- CRITICAL) color="danger" ;;
- CLEAR) color="good" ;;
- *) color="#777777" ;;
- esac
-
- for channel in ${channels}
- do
- username="netdata on ${host}"
- [ ${#username} -gt 32 ] && username="${username:0:29}..."
-
- payload="$(cat <<EOF
- {
- "channel": "#${channel}",
- "username": "${username}",
- "text": "${host} ${status_message}, \`${chart}\` (_${family}_), *${alarm}*",
- "icon_url": "${images_base_url}/images/seo-performance-128.png",
- "attachments": [
- {
- "color": "${color}",
- "title": "${alarm}",
- "title_link": "${goto_url}",
- "text": "${info}",
- "fields": [
- {
- "title": "${chart}",
- "value": "${family}"
- }
- ],
- "thumb_url": "${image}",
- "footer_icon": "${images_base_url}/images/seo-performance-128.png",
- "footer": "${this_host}",
- "ts": ${when}
- }
- ]
- }
-EOF
- )"
-
- httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}")
- if [ "${httpcode}" = "200" ]
- then
- info "sent discord notification for: ${host} ${chart}.${name} is ${status} to '${channel}'"
- sent=$((sent + 1))
- else
- error "failed to send discord notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}."
- fi
- done
-
- [ ${sent} -gt 0 ] && return 0
-
- return 1
-}
-
-# -----------------------------------------------------------------------------
-# irc sender
-
-send_irc() {
- local NICKNAME="${1}" REALNAME="${2}" CHANNELS="${3}" NETWORK="${4}" SERVERNAME="${5}" MESSAGE="${6}" sent=0 channel color send_alarm reply_codes error
-
- if [ "${SEND_IRC}" = "YES" -a ! -z "${NICKNAME}" -a ! -z "${REALNAME}" -a ! -z "${CHANNELS}" -a ! -z "${NETWORK}" -a ! -z "${SERVERNAME}" ]
- then
- case "${status}" in
- WARNING) color="warning" ;;
- CRITICAL) color="danger" ;;
- CLEAR) color="good" ;;
- *) color="#777777" ;;
- esac
-
- for CHANNEL in ${CHANNELS}
- do
- error=0
- send_alarm=$(echo -e "USER ${NICKNAME} guest ${REALNAME} ${SERVERNAME}\nNICK ${NICKNAME}\nJOIN ${CHANNEL}\nPRIVMSG ${CHANNEL} :${MESSAGE}\nQUIT\n" \ | nc ${NETWORK} 6667)
- reply_codes=$(echo ${send_alarm} | cut -d ' ' -f 2 | grep -o '[0-9]*')
- for code in ${reply_codes}
- do
- [ "${code}" -ge 400 -a "${code}" -le 599 ] && error=1 && break
- done
-
- if [ "${error}" -eq 0 ]
- then
- info "sent irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}'"
- sent=$((sent + 1))
- else
- error "failed to send irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}', with error code ${code}."
- fi
- done
- fi
-
- [ ${sent} -gt 0 ] && return 0
-
- return 1
-}
-
-
-# -----------------------------------------------------------------------------
-# prepare the content of the notification
-
-# the url to send the user on click
-urlencode "${host}" >/dev/null; url_host="${REPLY}"
-urlencode "${chart}" >/dev/null; url_chart="${REPLY}"
-urlencode "${family}" >/dev/null; url_family="${REPLY}"
-urlencode "${name}" >/dev/null; url_name="${REPLY}"
-goto_url="${NETDATA_REGISTRY_URL}/goto-host-from-alarm.html?host=${url_host}&chart=${url_chart}&family=${url_family}&alarm=${url_name}&alarm_unique_id=${unique_id}&alarm_id=${alarm_id}&alarm_event_id=${event_id}"
-
-# the severity of the alarm
-severity="${status}"
-
-# the time the alarm was raised
-duration4human ${duration} >/dev/null; duration_txt="${REPLY}"
-duration4human ${non_clear_duration} >/dev/null; non_clear_duration_txt="${REPLY}"
-raised_for="(was ${old_status,,} for ${duration_txt})"
-
-# the key status message
-status_message="status unknown"
-
-# the color of the alarm
-color="grey"
-
-# the alarm value
-alarm="${name//_/ } = ${value_string}"
-
-# the image of the alarm
-image="${images_base_url}/images/seo-performance-128.png"
-
-# prepare the title based on status
-case "${status}" in
- CRITICAL)
- image="${images_base_url}/images/alert-128-red.png"
- status_message="is critical"
- color="#ca414b"
- ;;
-
- WARNING)
- image="${images_base_url}/images/alert-128-orange.png"
- status_message="needs attention"
- color="#ffc107"
- ;;
-
- CLEAR)
- image="${images_base_url}/images/check-mark-2-128-green.png"
- status_message="recovered"
- color="#77ca6d"
- ;;
-esac
-
-if [ "${status}" = "CLEAR" ]
-then
- severity="Recovered from ${old_status}"
- if [ ${non_clear_duration} -gt ${duration} ]
- then
- raised_for="(alarm was raised for ${non_clear_duration_txt})"
- fi
-
- # don't show the value when the status is CLEAR
- # for certain alarms, this value might not have any meaning
- alarm="${name//_/ } ${raised_for}"
-
-elif [ "${old_status}" = "WARNING" -a "${status}" = "CRITICAL" ]
-then
- severity="Escalated to ${status}"
- if [ ${non_clear_duration} -gt ${duration} ]
- then
- raised_for="(alarm is raised for ${non_clear_duration_txt})"
- fi
-
-elif [ "${old_status}" = "CRITICAL" -a "${status}" = "WARNING" ]
-then
- severity="Demoted to ${status}"
- if [ ${non_clear_duration} -gt ${duration} ]
- then
- raised_for="(alarm is raised for ${non_clear_duration_txt})"
- fi
-
-else
- raised_for=
-fi
-
-# prepare HTML versions of elements
-info_html=
-[ ! -z "${info}" ] && info_html=" <small><br/>${info}</small>"
-
-raised_for_html=
-[ ! -z "${raised_for}" ] && raised_for_html="<br/><small>${raised_for}</small>"
-
-# -----------------------------------------------------------------------------
-# send the slack notification
-
-# slack aggregates posts from the same username
-# so we use "${host} ${status}" as the bot username, to make them diff
-
-send_slack "${SLACK_WEBHOOK_URL}" "${to_slack}"
-SENT_SLACK=$?
-
-# -----------------------------------------------------------------------------
-# send the alerta notification
-
-# alerta aggregates posts from the same username
-# so we use "${host} ${status}" as the bot username, to make them diff
-
-send_alerta "${ALERTA_WEBHOOK_URL}" "${to_alerta}"
-SENT_ALERTA=$?
-
-# -----------------------------------------------------------------------------
-# send the flock notification
-
-# flock aggregates posts from the same username
-# so we use "${host} ${status}" as the bot username, to make them diff
-
-send_flock "${FLOCK_WEBHOOK_URL}" "${to_flock}"
-SENT_FLOCK=$?
-
-# -----------------------------------------------------------------------------
-# send the discord notification
-
-# discord aggregates posts from the same username
-# so we use "${host} ${status}" as the bot username, to make them diff
-
-send_discord "${DISCORD_WEBHOOK_URL}" "${to_discord}"
-SENT_DISCORD=$?
-
-# -----------------------------------------------------------------------------
-# send the pushover notification
-
-send_pushover "${PUSHOVER_APP_TOKEN}" "${to_pushover}" "${when}" "${goto_url}" "${status}" "${host} ${status_message} - ${name//_/ } - ${chart}" "
-<font color=\"${color}\"><b>${alarm}</b></font>${info_html}<br/>&nbsp;
-<small><b>${chart}</b><br/>Chart<br/>&nbsp;</small>
-<small><b>${family}</b><br/>Family<br/>&nbsp;</small>
-<small><b>${severity}</b><br/>Severity<br/>&nbsp;</small>
-<small><b>${date}${raised_for_html}</b><br/>Time<br/>&nbsp;</small>
-<a href=\"${goto_url}\">View Netdata</a><br/>&nbsp;
-<small><small>The source of this alarm is line ${src}</small></small>
-"
-
-SENT_PUSHOVER=$?
-
-# -----------------------------------------------------------------------------
-# send the pushbullet notification
-
-send_pushbullet "${PUSHBULLET_ACCESS_TOKEN}" "${PUSHBULLET_SOURCE_DEVICE}" "${to_pushbullet}" "${goto_url}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}\n
-Severity: ${severity}\n
-Chart: ${chart}\n
-Family: ${family}\n
-$(date -d @${when})\n
-The source of this alarm is line ${src}"
-
-SENT_PUSHBULLET=$?
-
-# -----------------------------------------------------------------------------
-# send the twilio SMS
-
-send_twilio "${TWILIO_ACCOUNT_SID}" "${TWILIO_ACCOUNT_TOKEN}" "${TWILIO_NUMBER}" "${to_twilio}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}
-Severity: ${severity}
-Chart: ${chart}
-Family: ${family}
-${info}"
-
-SENT_TWILIO=$?
-
-# -----------------------------------------------------------------------------
-# send the messagebird SMS
-
-send_messagebird "${MESSAGEBIRD_ACCESS_KEY}" "${MESSAGEBIRD_NUMBER}" "${to_messagebird}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}
-Severity: ${severity}
-Chart: ${chart}
-Family: ${family}
-${info}"
-
-SENT_MESSAGEBIRD=$?
-
-
-# -----------------------------------------------------------------------------
-# send the kavenegar SMS
-
-send_kavenegar "${KAVENEGAR_API_KEY}" "${KAVENEGAR_SENDER}" "${to_kavenegar}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}
-Severity: ${severity}
-Chart: ${chart}
-Family: ${family}
-${info}"
-
-SENT_KAVENEGAR=$?
-
-
-# -----------------------------------------------------------------------------
-# send the telegram.org message
-
-# https://core.telegram.org/bots/api#formatting-options
-send_telegram "${TELEGRAM_BOT_TOKEN}" "${to_telegram}" "${host} ${status_message} - <b>${name//_/ }</b>
-${chart} (${family})
-<a href=\"${goto_url}\">${alarm}</a>
-<i>${info}</i>"
-
-SENT_TELEGRAM=$?
-
-
-# -----------------------------------------------------------------------------
-# send the kafka message
-
-send_kafka
-SENT_KAFKA=$?
-
-
-# -----------------------------------------------------------------------------
-# send the pagerduty.com message
-
-send_pd "${to_pd}"
-SENT_PD=$?
-
-# -----------------------------------------------------------------------------
-# send the irc message
-
-send_irc "${IRC_NICKNAME}" "${IRC_REALNAME}" "${to_irc}" "${IRC_NETWORK}" "${host}" "${host} ${status_message} - ${name//_/ } - ${chart} ----- ${alarm}
-Severity: ${severity}
-Chart: ${chart}
-Family: ${family}
-${info}"
-
-SENT_IRC=$?
-
-# -----------------------------------------------------------------------------
-# send the custom message
-
-send_custom() {
- # is it enabled?
- [ "${SEND_CUSTOM}" != "YES" ] && return 1
-
- # do we have any sender?
- [ -z "${1}" ] && return 1
-
- # call the custom_sender function
- custom_sender "${@}"
-}
-
-send_custom "${to_custom}"
-SENT_CUSTOM=$?
-
-
-# -----------------------------------------------------------------------------
-# send hipchat message
-
-send_hipchat "${HIPCHAT_AUTH_TOKEN}" "${to_hipchat}" " \
-${host} ${status_message}<br/> \
-<b>${alarm}</b> ${info_html}<br/> \
-<b>${chart}</b> (family <b>${family}</b>)<br/> \
-<b>${date}${raised_for_html}</b><br/> \
-<a href=\\\"${goto_url}\\\">View netdata dashboard</a> \
-(source of alarm ${src}) \
-"
-
-SENT_HIPCHAT=$?
-
-
-# -----------------------------------------------------------------------------
-# send the email
-
-send_email <<EOF
-To: ${to_email}
-Subject: ${host} ${status_message} - ${name//_/ } - ${chart}
-MIME-Version: 1.0
-Content-Type: multipart/alternative; boundary="multipart-boundary"
-
-This is a MIME-encoded multipart message
-
---multipart-boundary
-Content-Type: text/plain; encoding=${EMAIL_CHARSET}
-Content-Disposition: inline
-Content-Transfer-Encoding: 8bit
-
-${host} ${status_message}
-
-${alarm} ${info}
-${raised_for}
-
-Chart : ${chart}
-Family : ${family}
-Severity: ${severity}
-URL : ${goto_url}
-Source : ${src}
-Date : ${date}
-Notification generated on ${this_host}
-
---multipart-boundary
-Content-Type: text/html; encoding=${EMAIL_CHARSET}
-Content-Disposition: inline
-Content-Transfer-Encoding: 8bit
-
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 14px; margin: 0; padding: 0;">
-<body style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 14px; width: 100% !important; min-height: 100%; line-height: 1.6; background: #f6f6f6; margin:0; padding: 0;">
-<table>
- <tbody>
- <tr>
- <td style="vertical-align: top;" valign="top"></td>
- <td width="700" style="vertical-align: top; display: block !important; max-width: 700px !important; clear: both !important; margin: 0 auto; padding: 0;" valign="top">
- <div style="max-width: 700px; display: block; margin: 0 auto; padding: 20px;">
- <table width="100%" cellpadding="0" cellspacing="0" style="background: #fff; border: 1px solid #e9e9e9;">
- <tbody>
- <tr>
- <td bgcolor="#eee" style="padding: 5px 20px 5px 20px; background-color: #eee;">
- <div style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 20px; color: #777; font-weight: bold;">netdata notification</div>
- </td>
- </tr>
- <tr>
- <td bgcolor="${color}" style="font-size: 16px; vertical-align: top; font-weight: 400; text-align: center; margin: 0; padding: 10px; color: #ffffff; background: ${color} !important; border: 1px solid ${color}; border-top-color: ${color};" align="center" valign="top">
- <h1 style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-weight: 400; margin: 0;">${host} ${status_message}</h1>
- </td>
- </tr>
- <tr>
- <td style="vertical-align: top;" valign="top">
- <div style="margin: 0; padding: 20px; max-width: 700px;">
- <table width="100%" cellpadding="0" cellspacing="0" style="max-width:700px">
- <tbody>
- <tr>
- <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding:0 0 20px;" align="left" valign="top">
- <span>${chart}</span>
- <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Chart</span>
- </td>
- </tr>
- <tr style="margin: 0; padding: 0;">
- <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
- <span><b>${alarm}</b>${info_html}</span>
- <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Alarm</span>
- </td>
- </tr>
- <tr>
- <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
- <span>${family}</span>
- <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Family</span>
- </td>
- </tr>
- <tr style="margin: 0; padding: 0;">
- <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top">
- <span>${severity}</span>
- <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Severity</span>
- </td>
- </tr>
- <tr style="margin: 0; padding: 0;">
- <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;" align="left" valign="top"><span>${date}</span>
- <span>${raised_for_html}</span> <span style="display: block; color: #666666; font-size: 12px; font-weight: 300; line-height: 1; text-transform: uppercase;">Time</span>
- </td>
- </tr>
- <tr style="margin: 0; padding: 0;">
- <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 18px; vertical-align: top; margin: 0; padding: 0 0 20px;">
- <a href="${goto_url}" style="font-size: 14px; color: #ffffff; text-decoration: none; line-height: 1.5; font-weight: bold; text-align: center; display: inline-block; text-transform: capitalize; background: #35568d; border-width: 1px; border-style: solid; border-color: #2b4c86; margin: 0; padding: 10px 15px;" target="_blank">View Netdata</a>
- </td>
- </tr>
- <tr style="text-align: center; margin: 0; padding: 0;">
- <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 11px; vertical-align: top; margin: 0; padding: 10px 0 0 0; color: #666666;" align="center" valign="bottom">The source of this alarm is line <code>${src}</code><br/>(alarms are configurable, edit this file to adapt the alarm to your needs)
- </td>
- </tr>
- <tr style="text-align: center; margin: 0; padding: 0;">
- <td style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; vertical-align: top; margin:0; padding: 20px 0 0 0; color: #666666; border-top: 1px solid #f0f0f0;" align="center" valign="bottom">Sent by
- <a href="https://mynetdata.io/" target="_blank">netdata</a>, the real-time performance and health monitoring, on <code>${this_host}</code>.
- </td>
- </tr>
- </tbody>
- </table>
- </div>
- </td>
- </tr>
- </tbody>
- </table>
- </div>
- </td>
- </tr>
- </tbody>
-</table>
-</body>
-</html>
---multipart-boundary--
-EOF
-
-SENT_EMAIL=$?
-
-# -----------------------------------------------------------------------------
-# let netdata know
-
-if [ ${SENT_EMAIL} -eq 0 \
- -o ${SENT_PUSHOVER} -eq 0 \
- -o ${SENT_TELEGRAM} -eq 0 \
- -o ${SENT_SLACK} -eq 0 \
- -o ${SENT_ALERTA} -eq 0 \
- -o ${SENT_FLOCK} -eq 0 \
- -o ${SENT_DISCORD} -eq 0 \
- -o ${SENT_TWILIO} -eq 0 \
- -o ${SENT_HIPCHAT} -eq 0 \
- -o ${SENT_MESSAGEBIRD} -eq 0 \
- -o ${SENT_KAVENEGAR} -eq 0 \
- -o ${SENT_PUSHBULLET} -eq 0 \
- -o ${SENT_KAFKA} -eq 0 \
- -o ${SENT_PD} -eq 0 \
- -o ${SENT_IRC} -eq 0 \
- -o ${SENT_CUSTOM} -eq 0 \
- ]
- then
- # we did send something
- exit 0
-fi
-
-# we did not send anything
-exit 1
diff --git a/plugins.d/alarm-test.sh b/plugins.d/alarm-test.sh
deleted file mode 100755
index 9df5361a..00000000
--- a/plugins.d/alarm-test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-# Script to test alarm notifications for netdata
-
-dir="$(dirname "${0}")"
-${dir}/alarm-notify.sh test "${1}"
-exit $?
diff --git a/plugins.d/cgroup-name.sh b/plugins.d/cgroup-name.sh
deleted file mode 100755
index 3c8ad720..00000000
--- a/plugins.d/cgroup-name.sh
+++ /dev/null
@@ -1,189 +0,0 @@
-#!/usr/bin/env bash
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-# Script to find a better name for cgroups
-#
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
-export LC_ALL=C
-
-# -----------------------------------------------------------------------------
-
-PROGRAM_NAME="$(basename "${0}")"
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- exit 1
-}
-
-debug=0
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
-}
-
-# -----------------------------------------------------------------------------
-
-[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
-CONFIG="${NETDATA_CONFIG_DIR}/cgroups-names.conf"
-CGROUP="${1}"
-NAME=
-
-# -----------------------------------------------------------------------------
-
-if [ -z "${CGROUP}" ]
- then
- fatal "called without a cgroup name. Nothing to do."
-fi
-
-if [ -f "${CONFIG}" ]
- then
- NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed "s/[[:space:]]\+/ /g" | cut -d ' ' -f 2)"
- if [ -z "${NAME}" ]
- then
- info "cannot find cgroup '${CGROUP}' in '${CONFIG}'."
- fi
-#else
-# info "configuration file '${CONFIG}' is not available."
-fi
-
-function docker_get_name_classic {
- local id="${1}"
- info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\""
- NAME="$( docker ps --filter=id="${id}" --format="{{.Names}}" )"
- return 0
-}
-
-function docker_get_name_api {
- local id="${1}"
- if [ ! -S "/var/run/docker.sock" ]
- then
- warning "Can't find /var/run/docker.sock"
- return 1
- fi
- info "Running API command: /containers/${id}/json"
- JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\r\n" | nc -U /var/run/docker.sock | grep '^{.*')
- NAME=$(echo $JSON | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||')
- return 0
-}
-
-function docker_get_name {
- local id="${1}"
- if hash docker 2>/dev/null
- then
- docker_get_name_classic "${id}"
- else
- docker_get_name_api "${id}" || docker_get_name_classic "${id}"
- fi
- if [ -z "${NAME}" ]
- then
- warning "cannot find the name of docker container '${id}'"
- NAME="${id:0:12}"
- else
- info "docker container '${id}' is named '${NAME}'"
- fi
-}
-
-if [ -z "${NAME}" ]
- then
- if [[ "${CGROUP}" =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]
- then
- # docker containers
-
- DOCKERID="$( echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|" )"
- # echo "DOCKERID=${DOCKERID}"
-
- if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ]
- then
- docker_get_name "${DOCKERID}"
- else
- error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
- fi
- elif [[ "${CGROUP}" =~ ^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]+[_/][a-fA-F0-9]+$ ]]
- then
- # kubernetes
-
- DOCKERID="$( echo "${CGROUP}" | sed "s|^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]\+[_/]\([a-fA-F0-9]\+\)$|\1|" )"
- # echo "DOCKERID=${DOCKERID}"
-
- if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ]
- then
- docker_get_name "${DOCKERID}"
- else
- error "a docker id cannot be extracted from kubernetes cgroup '${CGROUP}'."
- fi
- elif [[ "${CGROUP}" =~ machine.slice[_/].*\.service ]]
- then
- # systemd-nspawn
-
- NAME="$(echo ${CGROUP} | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
-
- elif [[ "${CGROUP}" =~ machine.slice_machine.*-qemu ]]
- then
- # libvirtd / qemu virtual machines
-
- # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
- NAME="qemu_$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')"
-
- elif [[ "${CGROUP}" =~ machine_.*\.libvirt-qemu ]]
- then
- # libvirtd / qemu virtual machines
- NAME="qemu_$(echo ${CGROUP} | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
-
- elif [[ "${CGROUP}" =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]
- then
- # Proxmox VMs
-
- FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
- if [[ -f $FILENAME && -r $FILENAME ]]
- then
- NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- elif [[ "${CGROUP}" =~ lxc_([0-9]+) && -d /etc/pve ]]
- then
- # Proxmox Containers (LXC)
-
- FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
- if [[ -f ${FILENAME} && -r ${FILENAME} ]]
- then
- NAME=$(grep -e '^hostname: ' /etc/pve/lxc/${BASH_REMATCH[1]}.conf | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- fi
-
- [ -z "${NAME}" ] && NAME="${CGROUP}"
- [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
-fi
-
-info "cgroup '${CGROUP}' is called '${NAME}'"
-echo "${NAME}"
diff --git a/plugins.d/cgroup-network-helper.sh b/plugins.d/cgroup-network-helper.sh
deleted file mode 100755
index f0705998..00000000
--- a/plugins.d/cgroup-network-helper.sh
+++ /dev/null
@@ -1,251 +0,0 @@
-#!/usr/bin/env bash
-
-# cgroup-network-helper.sh
-# detect container and virtual machine interfaces
-#
-# (C) 2017 Costa Tsaousis
-# GPL v3+
-#
-# This script is called as root (by cgroup-network), with either a pid, or a cgroup path.
-# It tries to find all the network interfaces that belong to the same cgroup.
-#
-# It supports several method for this detection:
-#
-# 1. cgroup-network (the binary father of this script) detects veth network interfaces,
-# by examining iflink and ifindex IDs and switching namespaces
-# (it also detects the interface name as it is used by the container).
-#
-# 2. this script, uses /proc/PID/fdinfo to find tun/tap network interfaces.
-#
-# 3. this script, calls virsh to find libvirt network interfaces.
-#
-
-# -----------------------------------------------------------------------------
-
-# the system path is cleared by cgroup-network
-[ -f /etc/profile ] && source /etc/profile
-
-export LC_ALL=C
-
-PROGRAM_NAME="$(basename "${0}")"
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- exit 1
-}
-
-debug=0
-debug() {
- [ "${debug}" = "1" ] && log DEBUG "${@}"
-}
-
-# -----------------------------------------------------------------------------
-# check for BASH v4+ (required for associative arrays)
-
-[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && \
- fatal "BASH version 4 or later is required (this is ${BASH_VERSION})."
-
-# -----------------------------------------------------------------------------
-# parse the arguments
-
-pid=
-cgroup=
-while [ ! -z "${1}" ]
-do
- case "${1}" in
- --cgroup) cgroup="${2}"; shift 1;;
- --pid|-p) pid="${2}"; shift 1;;
- --debug|debug) debug=1;;
- *) fatal "Cannot understand argument '${1}'";;
- esac
-
- shift
-done
-
-if [ -z "${pid}" -a -z "${cgroup}" ]
-then
- fatal "Either --pid or --cgroup is required"
-fi
-
-# -----------------------------------------------------------------------------
-
-set_source() {
- [ ${debug} -eq 1 ] && echo "SRC ${*}"
-}
-
-
-# -----------------------------------------------------------------------------
-# veth interfaces via cgroup
-
-# cgroup-network can detect veth interfaces by itself (written in C).
-# If you seek for a shell version of what it does, check this:
-# https://github.com/firehol/netdata/issues/474#issuecomment-317866709
-
-
-# -----------------------------------------------------------------------------
-# tun/tap interfaces via /proc/PID/fdinfo
-
-# find any tun/tap devices linked to a pid
-proc_pid_fdinfo_iff() {
- local p="${1}" # the pid
-
- debug "Searching for tun/tap interfaces for pid ${p}..."
- set_source "fdinfo"
- grep ^iff:.* "${NETDATA_HOST_PREFIX}/proc/${p}/fdinfo"/* 2>/dev/null | cut -f 2
-}
-
-find_tun_tap_interfaces_for_cgroup() {
- local c="${1}" # the cgroup path
-
- # for each pid of the cgroup
- # find any tun/tap devices linked to the pid
- if [ -f "${c}/emulator/cgroup.procs" ]
- then
- local p
- for p in $(< "${c}/emulator/cgroup.procs" )
- do
- proc_pid_fdinfo_iff ${p}
- done
- fi
-}
-
-
-# -----------------------------------------------------------------------------
-# virsh domain network interfaces
-
-virsh_cgroup_to_domain_name() {
- local c="${1}" # the cgroup path
-
- debug "extracting a possible virsh domain from cgroup ${c}..."
-
- # extract for the cgroup path
- sed -n -e "s|.*/machine-qemu\\\\x2d[0-9]\+\\\\x2d\(.*\)\.scope$|\1|p" \
- -e "s|.*/machine/\(.*\)\.libvirt-qemu$|\1|p" \
- <<EOF
-${c}
-EOF
-}
-
-virsh_find_all_interfaces_for_cgroup() {
- local c="${1}" # the cgroup path
-
- # the virsh command
- local virsh="$(which virsh 2>/dev/null || command -v virsh 2>/dev/null)"
-
- if [ ! -z "${virsh}" ]
- then
- local d="$(virsh_cgroup_to_domain_name "${c}")"
-
- if [ ! -z "${d}" ]
- then
- debug "running: virsh domiflist ${d}; to find the network interfaces"
-
- # match only 'network' interfaces from virsh output
-
- set_source "virsh"
- "${virsh}" -r domiflist ${d} |\
- sed -n \
- -e "s|^\([^[:space:]]\+\)[[:space:]]\+network[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p" \
- -e "s|^\([^[:space:]]\+\)[[:space:]]\+bridge[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p"
- else
- debug "no virsh domain extracted from cgroup ${c}"
- fi
- else
- debug "virsh command is not available"
- fi
-}
-
-# -----------------------------------------------------------------------------
-
-find_all_interfaces_of_pid_or_cgroup() {
- local p="${1}" c="${2}" # the pid and the cgroup path
-
- if [ ! -z "${pid}" ]
- then
- # we have been called with a pid
-
- proc_pid_fdinfo_iff ${p}
-
- elif [ ! -z "${c}" ]
- then
- # we have been called with a cgroup
-
- info "searching for network interfaces of cgroup '${c}'"
-
- find_tun_tap_interfaces_for_cgroup "${c}"
- virsh_find_all_interfaces_for_cgroup "${c}"
-
- else
-
- error "Either a pid or a cgroup path is needed"
- return 1
-
- fi
-
- return 0
-}
-
-# -----------------------------------------------------------------------------
-
-# an associative array to store the interfaces
-# the index is the interface name as seen by the host
-# the value is the interface name as seen by the guest / container
-declare -A devs=()
-
-# store all interfaces found in the associative array
-# this will also give the unique devices, as seen by the host
-last_src=
-while read host_device guest_device
-do
- [ -z "${host_device}" ] && continue
-
- [ "${host_device}" = "SRC" ] && last_src="${guest_device}" && continue
-
- # the default guest_device is the host_device
- [ -z "${guest_device}" ] && guest_device="${host_device}"
-
- # when we run in debug, show the source
- debug "Found host device '${host_device}', guest device '${guest_device}', detected via '${last_src}'"
-
- [ -z "${devs[${host_device}]}" -o "${devs[${host_device}]}" = "${host_device}" ] && \
- devs[${host_device}]="${guest_device}"
-
-done < <( find_all_interfaces_of_pid_or_cgroup "${pid}" "${cgroup}" )
-
-# print the interfaces found, in the format netdata expects them
-found=0
-for x in "${!devs[@]}"
-do
- found=$((found + 1))
- echo "${x} ${devs[${x}]}"
-done
-
-debug "found ${found} network interfaces for pid '${pid}', cgroup '${cgroup}', run as ${USER}, ${UID}"
-
-# let netdata know if we found any
-[ ${found} -eq 0 ] && exit 1
-exit 0
diff --git a/plugins.d/charts.d.dryrun-helper.sh b/plugins.d/charts.d.dryrun-helper.sh
deleted file mode 100755
index 8142f988..00000000
--- a/plugins.d/charts.d.dryrun-helper.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env bash
-
-# will stop the script for any error
-set -e
-
-me="$0"
-name="$1"
-chart="$2"
-conf="$3"
-
-can_diff=1
-
-tmp1="`mktemp`"
-tmp2="`mktemp`"
-
-myset() {
- set | grep -v "^_=" | grep -v "^PIPESTATUS=" | grep -v "^BASH_LINENO="
-}
-
-# save 2 'set'
-myset >"$tmp1"
-myset >"$tmp2"
-
-# make sure they don't differ
-diff "$tmp1" "$tmp2" >/dev/null 2>&1
-if [ $? -ne 0 ]
-then
- # they differ, we cannot do the check
- echo >&2 "$me: cannot check with diff."
- can_diff=0
-fi
-
-# do it again, now including the script
-myset >"$tmp1"
-
-# include the plugin and its config
-if [ -f "$conf" ]
-then
- . "$conf"
- if [ $? -ne 0 ]
- then
- echo >&2 "$me: cannot load config file $conf"
- rm "$tmp1" "$tmp2"
- exit 1
- fi
-fi
-
-. "$chart"
-if [ $? -ne 0 ]
-then
- echo >&2 "$me: cannot load chart file $chart"
- rm "$tmp1" "$tmp2"
- exit 1
-fi
-
-# remove all variables starting with the plugin name
-myset | grep -v "^$name" >"$tmp2"
-
-if [ $can_diff -eq 1 ]
-then
- # check if they are different
- # make sure they don't differ
- diff "$tmp1" "$tmp2" >&2
- if [ $? -ne 0 ]
- then
- # they differ
- rm "$tmp1" "$tmp2"
- exit 1
- fi
-fi
-
-rm "$tmp1" "$tmp2"
-exit 0
diff --git a/plugins.d/charts.d.plugin b/plugins.d/charts.d.plugin
deleted file mode 100755
index 9bd03fd4..00000000
--- a/plugins.d/charts.d.plugin
+++ /dev/null
@@ -1,713 +0,0 @@
-#!/usr/bin/env bash
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-# charts.d.plugin allows easy development of BASH plugins
-#
-# if you need to run parallel charts.d processes, link this file to a different name
-# in the same directory, with a .plugin suffix and netdata will start both of them,
-# each will have a different config file and modules configuration directory.
-#
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
-
-PROGRAM_FILE="$0"
-PROGRAM_NAME="$(basename $0)"
-PROGRAM_NAME="${PROGRAM_NAME/.plugin}"
-MODULE_NAME="main"
-
-# -----------------------------------------------------------------------------
-# create temp dir
-
-debug=0
-TMP_DIR=
-chartsd_cleanup() {
- trap '' EXIT QUIT HUP INT TERM
-
- if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]
- then
- [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..."
- rm -rf "$TMP_DIR"
- fi
- exit 0
-}
-trap chartsd_cleanup EXIT QUIT HUP INT TERM
-
-if [ $UID = "0" ]
-then
- TMP_DIR="$( mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX )"
-else
- TMP_DIR="$( mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX )"
-fi
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- echo "DISABLE"
- exit 1
-}
-
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
-}
-
-# -----------------------------------------------------------------------------
-# check a few commands
-
-require_cmd() {
- local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null)
- if [ -z "${x}" -o ! -x "${x}" ]
- then
- warning "command '${1}' is not found in ${PATH}."
- eval "${1^^}_CMD=\"\""
- return 1
- fi
-
- eval "${1^^}_CMD=\"${x}\""
- return 0
-}
-
-require_cmd date || exit 1
-require_cmd sed || exit 1
-require_cmd basename || exit 1
-require_cmd dirname || exit 1
-require_cmd cat || exit 1
-require_cmd grep || exit 1
-require_cmd egrep || exit 1
-require_cmd mktemp || exit 1
-require_cmd awk || exit 1
-require_cmd timeout || exit 1
-require_cmd curl || exit 1
-
-# -----------------------------------------------------------------------------
-
-[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade."
-
-info "started from '$PROGRAM_FILE' with options: $*"
-
-# -----------------------------------------------------------------------------
-# internal defaults
-# netdata exposes a few environment variables for us
-
-[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
-[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
-
-pluginsd="${NETDATA_PLUGINS_DIR}"
-confd="${NETDATA_CONFIG_DIR}"
-chartsd="$pluginsd/../charts.d"
-
-myconfig="$confd/$PROGRAM_NAME.conf"
-
-minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}"
-update_every=${minimum_update_frequency} # this will be overwritten by the command line
-
-# work around for non BASH shells
-charts_create="_create"
-charts_update="_update"
-charts_check="_check"
-charts_undescore="_"
-
-# when making iterations, charts.d can loop more frequently
-# to prevent plugins missing iterations.
-# this is a percentage relative to update_every to align its
-# iterations.
-# The minimum is 10%, the maximum 100%.
-# So, if update_every is 1 second and time_divisor is 50,
-# charts.d will iterate every 500ms.
-# Charts will be called to collect data only if the time
-# passed since the last time the collected data is equal or
-# above their update_every.
-time_divisor=50
-
-# number of seconds to run without restart
-# after this time, charts.d.plugin will exit
-# netdata will restart it
-restart_timeout=$((3600 * 4))
-
-# check if the charts.d plugins are using global variables
-# they should not.
-# It does not currently support BASH v4 arrays, so it is
-# disabled
-dryrunner=0
-
-# check for timeout command
-check_for_timeout=1
-
-# the default enable/disable value for all charts
-enable_all_charts="yes"
-
-# -----------------------------------------------------------------------------
-# parse parameters
-
-check=0
-chart_only=
-while [ ! -z "$1" ]
-do
- if [ "$1" = "check" ]
- then
- check=1
- shift
- continue
- fi
-
- if [ "$1" = "debug" -o "$1" = "all" ]
- then
- debug=1
- shift
- continue
- fi
-
- if [ -f "$chartsd/$1.chart.sh" ]
- then
- debug=1
- chart_only="$( echo $1.chart.sh | sed "s/\.chart\.sh$//g" )"
- shift
- continue
- fi
-
- if [ -f "$chartsd/$1" ]
- then
- debug=1
- chart_only="$( echo $1 | sed "s/\.chart\.sh$//g" )"
- shift
- continue
- fi
-
- # number check
- n="$1"
- x=$(( n ))
- if [ "$x" = "$n" ]
- then
- shift
- update_every=$x
- [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency
- continue
- fi
-
- fatal "Cannot understand parameter $1. Aborting."
-done
-
-
-# -----------------------------------------------------------------------------
-# loop control
-
-# default sleep function
-LOOPSLEEPMS_HIGHRES=0
-now_ms=
-current_time_ms_default() {
- now_ms="$(date +'%s')000"
-}
-current_time_ms="current_time_ms_default"
-current_time_ms_accuracy=1
-mysleep="sleep"
-
-# if found and included, this file overwrites loopsleepms()
-# and current_time_ms() with a high resolution timer function
-# for precise looping.
-. "$pluginsd/loopsleepms.sh.inc"
-
-# -----------------------------------------------------------------------------
-# load my configuration
-
-if [ -f "$myconfig" ]
- then
- . "$myconfig"
- [ $? -ne 0 ] && fatal "cannot load $myconfig"
-
- time_divisor=$((time_divisor))
- [ $time_divisor -lt 10 ] && time_divisor=10
- [ $time_divisor -gt 100 ] && time_divisor=100
-else
- info "configuration file '$myconfig' not found. Using defaults."
-fi
-
-# we check for the timeout command, after we load our
-# configuration, so that the user may overwrite the
-# timeout command we use, providing a function that
-# can emulate the timeout command we need:
-# > timeout SECONDS command ...
-if [ $check_for_timeout -eq 1 ]
- then
- require_cmd timeout || exit 1
-fi
-
-# -----------------------------------------------------------------------------
-# internal checks
-
-# netdata passes the requested update frequency as the first argument
-update_every=$(( update_every + 1 - 1)) # makes sure it is a number
-test $update_every -eq 0 && update_every=1 # if it is zero, make it 1
-
-# check the charts.d directory
-[ ! -d "$chartsd" ] && fatal "cannot find charts directory '$chartsd'"
-
-# -----------------------------------------------------------------------------
-# library functions
-
-fixid() {
- echo "$*" |\
- tr -c "[A-Z][a-z][0-9]" "_" |\
- sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |\
- tr "[A-Z]" "[a-z]"
-}
-
-run() {
- local ret pid="${BASHPID}" t
-
- if [ "z${1}" = "z-t" -a "${2}" != "0" ]
- then
- t="${2}"
- shift 2
- timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}"
- ret=$?
- else
- "${@}" 2>"${TMP_DIR}/run.${pid}"
- ret=$?
- fi
-
- if [ ${ret} -ne 0 ]
- then
- {
- printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '"
- printf "%q " "${@}"
- printf "' failed:\n --- BEGIN TRACE ---\n"
- cat "${TMP_DIR}/run.${pid}"
- printf " --- END TRACE ---\n"
- } >&2
- fi
- rm "${TMP_DIR}/run.${pid}"
-
- return ${ret}
-}
-
-# convert any floating point number
-# to integer, give a multiplier
-# the result is stored in ${FLOAT2INT_RESULT}
-# so that no fork is necessary
-# the multiplier must be a power of 10
-float2int() {
- local f m="$2" a b l v=($1)
- f=${v[0]}
-
- # the length of the multiplier - 1
- l=$(( ${#m} - 1 ))
-
- # check if the number is in scientific notation
- if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]]
- then
- # convert it to decimal
- # unfortunately, this fork cannot be avoided
- # if you know of a way to avoid it, please let me know
- f=$(printf "%0.${l}f" ${f})
- fi
-
- # split the floating point number
- # in integer (a) and decimal (b)
- a=${f/.*/}
- b=${f/*./}
-
- # if the integer part is missing
- # set it to zero
- [ -z "${a}" ] && a="0"
-
- # strip leading zeros from the integer part
- # base 10 convertion
- a=$((10#$a))
-
- # check the length of the decimal part
- # against the length of the multiplier
- if [ ${#b} -gt ${l} ]
- then
- # too many digits - take the most significant
- b=${b:0:${l}}
-
- elif [ ${#b} -lt ${l} ]
- then
- # too few digits - pad with zero on the right
- local z="00000000000000000000000" r=$((l - ${#b}))
- b="${b}${z:0:${r}}"
- fi
-
- # strip leading zeros from the decimal part
- # base 10 convertion
- b=$((10#$b))
-
- # store the result
- FLOAT2INT_RESULT=$(( (a * m) + b ))
-}
-
-
-# -----------------------------------------------------------------------------
-# charts check functions
-
-all_charts() {
- cd "$chartsd"
- [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1
-
- ls *.chart.sh | sed "s/\.chart\.sh$//g"
-}
-
-declare -A charts_enable_keyword=(
- ['apache']="force"
- ['cpu_apps']="force"
- ['cpufreq']="force"
- ['example']="force"
- ['exim']="force"
- ['hddtemp']="force"
- ['load_average']="force"
- ['mem_apps']="force"
- ['mysql']="force"
- ['nginx']="force"
- ['phpfpm']="force"
- ['postfix']="force"
- ['sensors']="force"
- ['squid']="force"
- ['tomcat']="force"
- )
-
-all_enabled_charts() {
- local charts= enabled= required=
-
- # find all enabled charts
-
- for chart in $( all_charts )
- do
- MODULE_NAME="${chart}"
-
- eval "enabled=\$$chart"
- if [ -z "${enabled}" ]
- then
- enabled="${enable_all_charts}"
- fi
-
- required="${charts_enable_keyword[${chart}]}"
- [ -z "${required}" ] && required="yes"
-
- if [ ! "${enabled}" = "${required}" ]
- then
- info "is disabled. Add a line with $chart=$required in $myconfig to enable it (or remove the line that disables it)."
- else
- debug "is enabled for auto-detection."
- local charts="$charts $chart"
- fi
- done
- MODULE_NAME="main"
-
- local charts2=
- for chart in $charts
- do
- MODULE_NAME="${chart}"
-
- # check the enabled charts
- local check="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()" )"
- if [ -z "$check" ]
- then
- error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it."
- continue
- fi
-
- local create="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()" )"
- if [ -z "$create" ]
- then
- error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it."
- continue
- fi
-
- local update="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()" )"
- if [ -z "$update" ]
- then
- error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it."
- continue
- fi
-
- # check its config
- #if [ -f "$confd/$chart.conf" ]
- #then
- # if [ ! -z "$( cat "$confd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ]
- # then
- # error "module's $chart config $confd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it."
- # continue
- # fi
- #fi
-
- #if [ $dryrunner -eq 1 ]
- # then
- # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$confd/$chart.conf" >/dev/null
- # if [ $? -ne 0 ]
- # then
- # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it."
- # continue
- # fi
- #fi
-
- local charts2="$charts2 $chart"
- done
- MODULE_NAME="main"
-
- echo $charts2
- debug "enabled charts: $charts2"
-}
-
-# -----------------------------------------------------------------------------
-# load the charts
-
-suffix_retries="_retries"
-suffix_update_every="_update_every"
-active_charts=
-for chart in $( all_enabled_charts )
-do
- MODULE_NAME="${chart}"
-
- debug "loading module: '$chartsd/$chart.chart.sh'"
-
- . "$chartsd/$chart.chart.sh"
-
- if [ -f "$confd/$PROGRAM_NAME/$chart.conf" ]
- then
- debug "loading module configuration: '$confd/$PROGRAM_NAME/$chart.conf'"
- . "$confd/$PROGRAM_NAME/$chart.conf"
- elif [ -f "$confd/$chart.conf" ]
- then
- debug "loading module configuration: '$confd/$chart.conf'"
- . "$confd/$chart.conf"
- else
- warning "configuration file '$confd/$PROGRAM_NAME/$chart.conf' not found. Using defaults."
- fi
-
- eval "dt=\$$chart$suffix_update_every"
- dt=$(( dt + 1 - 1 )) # make sure it is a number
- if [ $dt -lt $update_every ]
- then
- eval "$chart$suffix_update_every=$update_every"
- fi
-
- $chart$charts_check
- if [ $? -eq 0 ]
- then
- debug "module '$chart' activated"
- active_charts="$active_charts $chart"
- else
- error "module's '$chart' check() function reports failure."
- fi
-done
-MODULE_NAME="main"
-debug "activated modules: $active_charts"
-
-
-# -----------------------------------------------------------------------------
-# check overwrites
-
-# enable work time reporting
-debug_time=
-test $debug -eq 1 && debug_time=tellwork
-
-# if we only need a specific chart, remove all the others
-if [ ! -z "${chart_only}" ]
-then
- debug "requested to run only for: '${chart_only}'"
- check_charts=
- for chart in $active_charts
- do
- if [ "$chart" = "$chart_only" ]
- then
- check_charts="$chart"
- break
- fi
- done
- active_charts="$check_charts"
-fi
-debug "activated charts: $active_charts"
-
-# stop if we just need a pre-check
-if [ $check -eq 1 ]
-then
- info "CHECK RESULT"
- info "Will run the charts: $active_charts"
- exit 0
-fi
-
-# -----------------------------------------------------------------------------
-
-cd "${TMP_DIR}" || exit 1
-
-# -----------------------------------------------------------------------------
-# create charts
-
-run_charts=
-for chart in $active_charts
-do
- MODULE_NAME="${chart}"
-
- debug "calling '$chart$charts_create()'..."
- $chart$charts_create
- if [ $? -eq 0 ]
- then
- run_charts="$run_charts $chart"
- debug "'$chart' initialized."
- else
- error "module's '$chart' function '$chart$charts_create()' reports failure."
- fi
-done
-MODULE_NAME="main"
-debug "run_charts='$run_charts'"
-
-
-# -----------------------------------------------------------------------------
-# update dimensions
-
-[ -z "$run_charts" ] && fatal "No charts to collect data from."
-
-declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=()
-global_update() {
- local exit_at \
- c=0 dt ret last_ms exec_start_ms exec_end_ms \
- chart now_charts=() next_charts=($run_charts) \
- next_ms x seconds millis
-
- # return the current time in ms in $now_ms
- ${current_time_ms}
-
- exit_at=$(( now_ms + (restart_timeout * 1000) ))
-
- for chart in $run_charts
- do
- eval "charts_update_every[$chart]=\$$chart$suffix_update_every"
- test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every
-
- eval "charts_retries[$chart]=\$$chart$suffix_retries"
- test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10
-
- charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000) ) ))
- charts_next_update[$chart]=$(( charts_last_update[$chart] + (charts_update_every[$chart] * 1000) ))
- charts_run_counter[$chart]=0
- charts_serial_failures[$chart]=0
-
- echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}"
- echo "DIMENSION run_time 'run time' absolute 1 1"
- done
-
- # the main loop
- while [ "${#next_charts[@]}" -gt 0 ]
- do
- c=$((c + 1))
- now_charts=("${next_charts[@]}")
- next_charts=()
-
- # return the current time in ms in $now_ms
- ${current_time_ms}
-
- for chart in "${now_charts[@]}"
- do
- MODULE_NAME="${chart}"
-
- if [ ${now_ms} -ge ${charts_next_update[$chart]} ]
- then
- last_ms=${charts_last_update[$chart]}
- dt=$(( (now_ms - last_ms) ))
-
- charts_last_update[$chart]=${now_ms}
-
- while [ ${charts_next_update[$chart]} -lt ${now_ms} ]
- do
- charts_next_update[$chart]=$(( charts_next_update[$chart] + (charts_update_every[$chart] * 1000) ))
- done
-
- # the first call should not give a duration
- # so that netdata calibrates to current time
- dt=$(( dt * 1000 ))
- charts_run_counter[$chart]=$(( charts_run_counter[$chart] + 1 ))
- if [ ${charts_run_counter[$chart]} -eq 1 ]
- then
- dt=
- fi
-
- exec_start_ms=$now_ms
- $chart$charts_update $dt
- ret=$?
-
- # return the current time in ms in $now_ms
- ${current_time_ms}; exec_end_ms=$now_ms
-
- echo "BEGIN netdata.plugin_chartsd_$chart $dt"
- echo "SET run_time = $(( exec_end_ms - exec_start_ms ))"
- echo "END"
-
- if [ $ret -eq 0 ]
- then
- charts_serial_failures[$chart]=0
- next_charts+=($chart)
- else
- charts_serial_failures[$chart]=$(( charts_serial_failures[$chart] + 1 ))
-
- if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ]
- then
- error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it."
- else
- error "module's '$chart' update() function reports failure. Will keep trying for a while."
- next_charts+=($chart)
- fi
- fi
- else
- next_charts+=($chart)
- fi
- done
- MODULE_NAME="${chart}"
-
- # wait the time you are required to
- next_ms=$((now_ms + (update_every * 1000 * 100) ))
- for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done
- next_ms=$((next_ms - now_ms))
-
- if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ]
- then
- next_ms=$(( next_ms + current_time_ms_accuracy ))
- seconds=$(( next_ms / 1000 ))
- millis=$(( next_ms % 1000 ))
- if [ ${millis} -lt 10 ]
- then
- millis="00${millis}"
- elif [ ${millis} -lt 100 ]
- then
- millis="0${millis}"
- fi
-
- debug "sleeping for ${seconds}.${millis} seconds."
- ${mysleep} ${seconds}.${millis}
- else
- debug "sleeping for ${update_every} seconds."
- ${mysleep} $update_every
- fi
-
- test ${now_ms} -ge ${exit_at} && exit 0
- done
-
- fatal "nothing left to do, exiting..."
-}
-
-global_update
diff --git a/plugins.d/fping.plugin b/plugins.d/fping.plugin
deleted file mode 100755
index f38a8dde..00000000
--- a/plugins.d/fping.plugin
+++ /dev/null
@@ -1,188 +0,0 @@
-#!/usr/bin/env bash
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-# This plugin requires a latest version of fping.
-# You can compile it from source, by running me with option: install
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
-export LC_ALL=C
-
-if [ "${1}" = "install" ]
- then
- [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/local/bin/fping." && exit 1
-
- run() {
- printf >&2 " > "
- printf >&2 "%q " "${@}"
- printf >&2 "\n"
- "${@}" || exit 1
- }
-
- download() {
- local curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)"
- [ ! -z "${curl}" ] && run curl -s -L "${1}" && return 0
-
- local wget="$(which wget 2>/dev/null || command -v wget 2>/dev/null)"
- [ ! -z "${wget}" ] && run wget -q -O - "${1}" && return 0
-
- echo >&2 "Cannot find 'curl' or 'wget' in this system." && exit 1
- }
-
- [ ! -d /usr/src ] && run mkdir -p /usr/src
- [ ! -d /usr/local/bin ] && run mkdir -p /usr/local/bin
-
- run cd /usr/src
-
- if [ -d fping-4.0 ]
- then
- run rm -rf fping-4.0 || exit 1
- fi
-
- download 'https://github.com/schweikert/fping/releases/download/v4.0/fping-4.0.tar.gz' | run tar -zxvpf -
- [ $? -ne 0 ] && exit 1
- run cd fping-4.0 || exit 1
-
- run ./configure --prefix=/usr/local
- run make clean
- run make
- if [ -f /usr/local/bin/fping ]
- then
- run mv -f /usr/local/bin/fping /usr/local/bin/fping.old
- fi
- run mv src/fping /usr/local/bin/fping
- run chown root:root /usr/local/bin/fping
- run chmod 4755 /usr/local/bin/fping
- echo >&2
- echo >&2 "All done, you have a compatible fping now at /usr/local/bin/fping."
- echo >&2
-
- fping="$(which fping 2>/dev/null || command -v fping 2>/dev/null)"
- if [ "${fping}" != "/usr/local/bin/fping" ]
- then
- echo >&2 "You have another fping installed at: ${fping}."
- echo >&2 "Please set:"
- echo >&2
- echo >&2 " fping=\"/usr/local/bin/fping\""
- echo >&2
- echo >&2 "at /etc/netdata/fping.conf"
- echo >&2
- fi
- exit 0
-fi
-
-# -----------------------------------------------------------------------------
-
-PROGRAM_NAME="$(basename "${0}")"
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- echo "DISABLE"
- exit 1
-}
-
-debug=0
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
-}
-
-# -----------------------------------------------------------------------------
-
-# store in ${plugin} the name we run under
-# this allows us to copy/link fping.plugin under a different name
-# to have multiple fping plugins running with different settings
-plugin="${PROGRAM_NAME/.plugin/}"
-
-
-# -----------------------------------------------------------------------------
-
-# the frequency to send info to netdata
-# passed by netdata as the first parameter
-update_every="${1-1}"
-
-# the netdata configuration directory
-# passed by netdata as an environment variable
-[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
-
-# -----------------------------------------------------------------------------
-# configuration options
-# can be overwritten at /etc/netdata/fping.conf
-
-# the fping binary to use
-# we need one that can output netdata friendly info (supporting: -N)
-# if you have multiple versions, put here the full filename of the right one
-fping="$( which fping 2>/dev/null || command -v fping 2>/dev/null )"
-
-# a space separated list of hosts to fping
-# we suggest to put names here and the IPs of these names in /etc/hosts
-hosts=""
-
-# the time in milliseconds (1 sec = 1000 ms)
-# to ping the hosts - by default 5 pings per host per iteration
-ping_every="$((update_every * 1000 / 5))"
-
-# fping options
-fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
-
-# -----------------------------------------------------------------------------
-# load the configuration file
-
-if [ ! -f "${NETDATA_CONFIG_DIR}/${plugin}.conf" ]
-then
- fatal "configuration file '${NETDATA_CONFIG_DIR}/${plugin}.conf' not found - nothing to do."
-fi
-
-source "${NETDATA_CONFIG_DIR}/${plugin}.conf"
-
-if [ -z "${hosts}" ]
-then
- fatal "no hosts configured in '${NETDATA_CONFIG_DIR}/${plugin}.conf' - nothing to do."
-fi
-
-if [ -z "${fping}" -o ! -x "${fping}" ]
-then
- fatal "command '${fping}' is not found or is not executable - cannot proceed."
-fi
-
-if [ ${ping_every} -lt 20 ]
- then
- warning "ping every was set to ${ping_every} but 20 is the minimum for non-root users. Setting it to 20 ms."
- ping_every=20
-fi
-
-# the fping options we will use
-options=( -N -l -Q ${update_every} -p ${ping_every} ${fping_opts} ${hosts} )
-
-# execute fping
-info "starting fping: ${fping} ${options[*]}"
-exec "${fping}" "${options[@]}"
-
-# if we cannot execute fping, stop
-fatal "command '${fping} ${options[@]}' failed to be executed."
diff --git a/plugins.d/loopsleepms.sh.inc b/plugins.d/loopsleepms.sh.inc
deleted file mode 100644
index ef3db192..00000000
--- a/plugins.d/loopsleepms.sh.inc
+++ /dev/null
@@ -1,189 +0,0 @@
-# no need for shebang - this file is included from other scripts
-
-LOOPSLEEP_DATE="$(which date 2>/dev/null || command -v date 2>/dev/null)"
-if [ -z "$LOOPSLEEP_DATE" ]
- then
- echo >&2 "$0: ERROR: Cannot find the command 'date' in the system path."
- exit 1
-fi
-
-# -----------------------------------------------------------------------------
-# use the date command as a high resolution timer
-
-now_ms=
-LOOPSLEEPMS_HIGHRES=1
-test "$($LOOPSLEEP_DATE +%N)" = "%N" && LOOPSLEEPMS_HIGHRES=0
-test -z "$($LOOPSLEEP_DATE +%N)" && LOOPSLEEPMS_HIGHRES=0
-current_time_ms_from_date() {
- if [ $LOOPSLEEPMS_HIGHRES -eq 0 ]
- then
- now_ms="$($LOOPSLEEP_DATE +'%s')000"
- else
- now_ms="$(( $( $LOOPSLEEP_DATE +'%s * 1000 + %-N / 1000000' ) ))"
- fi
-}
-
-# -----------------------------------------------------------------------------
-# use /proc/uptime as a high resolution timer
-
-current_time_ms_from_date
-current_time_ms_from_uptime_started="${now_ms}"
-current_time_ms_from_uptime_last="${now_ms}"
-current_time_ms_from_uptime_first=0
-current_time_ms_from_uptime() {
- local up rest arr=() n
-
- read up rest </proc/uptime
- if [ $? -ne 0 ]
- then
- echo >&2 "$0: Cannot read /proc/uptime - falling back to current_time_ms_from_date()."
- current_time_ms="current_time_ms_from_date"
- current_time_ms_from_date
- current_time_ms_accuracy=1
- return
- fi
-
- arr=(${up//./ })
-
- if [ ${#arr[1]} -lt 1 ]
- then
- n="${arr[0]}000"
- elif [ ${#arr[1]} -lt 2 ]
- then
- n="${arr[0]}${arr[1]}00"
- elif [ ${#arr[1]} -lt 3 ]
- then
- n="${arr[0]}${arr[1]}0"
- else
- n="${arr[0]}${arr[1]}"
- fi
-
- now_ms=$((current_time_ms_from_uptime_started - current_time_ms_from_uptime_first + n))
-
- if [ "${now_ms}" -lt "${current_time_ms_from_uptime_last}" ]
- then
- echo >&2 "$0: Cannot use current_time_ms_from_uptime() - new time ${now_ms} is older than the last ${current_time_ms_from_uptime_last} - falling back to current_time_ms_from_date()."
- current_time_ms="current_time_ms_from_date"
- current_time_ms_from_date
- current_time_ms_accuracy=1
- fi
-
- current_time_ms_from_uptime_last="${now_ms}"
-}
-current_time_ms_from_uptime
-current_time_ms_from_uptime_first="$((now_ms - current_time_ms_from_uptime_started))"
-current_time_ms_from_uptime_last="${current_time_ms_from_uptime_first}"
-current_time_ms="current_time_ms_from_uptime"
-current_time_ms_accuracy=10
-if [ "${current_time_ms_from_uptime_first}" -eq 0 ]
- then
- echo >&2 "$0: Invalid setup for current_time_ms_from_uptime() - falling back to current_time_ms_from_date()."
- current_time_ms="current_time_ms_from_date"
- current_time_ms_accuracy=1
-fi
-
-# -----------------------------------------------------------------------------
-# use read with timeout for sleep
-
-mysleep="mysleep_read"
-
-mysleep_fifo="${NETDATA_CACHE_DIR-/tmp}/.netdata_bash_sleep_timer_fifo"
-[ ! -e "${mysleep_fifo}" ] && mkfifo "${mysleep_fifo}"
-[ ! -e "${mysleep_fifo}" ] && mysleep="sleep"
-
-mysleep_read() {
- read -t "${1}" <>"${mysleep_fifo}"
- ret=$?
- if [ $ret -le 128 ]
- then
- echo >&2 "$0: Cannot use read for sleeping (return code ${ret})."
- mysleep="sleep"
- ${mysleep} "${1}"
- fi
-}
-
-
-# -----------------------------------------------------------------------------
-# this function is used to sleep a fraction of a second
-# it calculates the difference between every time is called
-# and tries to align the sleep time to give you exactly the
-# loop you need.
-
-LOOPSLEEPMS_LASTRUN=0
-LOOPSLEEPMS_NEXTRUN=0
-LOOPSLEEPMS_LASTSLEEP=0
-LOOPSLEEPMS_LASTWORK=0
-
-loopsleepms() {
- local tellwork=0 t="${1}" div s m now mstosleep
-
- if [ "${t}" = "tellwork" ]
- then
- tellwork=1
- shift
- t="${1}"
- fi
-
- # $t = the time in seconds to wait
-
- # if high resolution is not supported
- # just sleep the time requested, in seconds
- if [ ${LOOPSLEEPMS_HIGHRES} -eq 0 ]
- then
- sleep ${t}
- return
- fi
-
- # get the current time, in ms in ${now_ms}
- ${current_time_ms}
-
- # calculate ms since last run
- [ ${LOOPSLEEPMS_LASTRUN} -gt 0 ] && \
- LOOPSLEEPMS_LASTWORK=$((now_ms - LOOPSLEEPMS_LASTRUN - LOOPSLEEPMS_LASTSLEEP + current_time_ms_accuracy))
- # echo "# last loop's work took $LOOPSLEEPMS_LASTWORK ms"
-
- # remember this run
- LOOPSLEEPMS_LASTRUN=${now_ms}
-
- # calculate the next run
- LOOPSLEEPMS_NEXTRUN=$(( ( now_ms - ( now_ms % ( t * 1000 ) ) ) + ( t * 1000 ) ))
-
- # calculate ms to sleep
- mstosleep=$(( LOOPSLEEPMS_NEXTRUN - now_ms + current_time_ms_accuracy ))
- # echo "# mstosleep is $mstosleep ms"
-
- # if we are too slow, sleep some time
- test ${mstosleep} -lt 200 && mstosleep=200
-
- s=$(( mstosleep / 1000 ))
- m=$(( mstosleep - (s * 1000) ))
- [ "${m}" -lt 100 ] && m="0${m}"
- [ "${m}" -lt 10 ] && m="0${m}"
-
- test $tellwork -eq 1 && echo >&2 " >>> PERFORMANCE >>> WORK TOOK ${LOOPSLEEPMS_LASTWORK} ms ( $((LOOPSLEEPMS_LASTWORK * 100 / 1000)).$((LOOPSLEEPMS_LASTWORK % 10))% cpu ) >>> SLEEPING ${mstosleep} ms"
-
- # echo "# sleeping ${s}.${m}"
- # echo
- ${mysleep} ${s}.${m}
-
- # keep the values we need
- # for our next run
- LOOPSLEEPMS_LASTSLEEP=$mstosleep
-}
-
-# test it
-#while [ 1 ]
-#do
-# r=$(( (RANDOM * 2000 / 32767) ))
-# s=$((r / 1000))
-# m=$((r - (s * 1000)))
-# [ "${m}" -lt 100 ] && m="0${m}"
-# [ "${m}" -lt 10 ] && m="0${m}"
-# echo "${r} = ${s}.${m}"
-#
-# # the work
-# ${mysleep} ${s}.${m}
-#
-# # the alignment loop
-# loopsleepms tellwork 1
-#done
diff --git a/plugins.d/node.d.plugin b/plugins.d/node.d.plugin
deleted file mode 100755
index b1620391..00000000
--- a/plugins.d/node.d.plugin
+++ /dev/null
@@ -1,294 +0,0 @@
-#!/usr/bin/env bash
-':' //; exec "$(command -v nodejs || command -v node || command -v js || echo "ERROR node.js IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@"
-
-// shebang hack from:
-// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang
-
-// Initially this is run as a shell script.
-// Then, the second line, finds nodejs or node or js in the system path
-// and executes it with the shell parameters.
-
-// netdata
-// real-time performance and health monitoring, done right!
-// (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-// GPL v3+
-
-// --------------------------------------------------------------------------------------------------------------------
-
-'use strict';
-
-// --------------------------------------------------------------------------------------------------------------------
-// get NETDATA environment variables
-
-var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname;
-var NETDATA_CONFIG_DIR = process.env.NETDATA_CONFIG_DIR || __dirname + '/../../../../etc/netdata';
-var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1;
-var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d';
-
-// make sure the modules are found
-process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules');
-process.mainModule.paths.unshift(NODE_D_DIR);
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// load required modules
-
-var fs = require('fs');
-var url = require('url');
-var util = require('util');
-var http = require('http');
-var path = require('path');
-var extend = require('extend');
-var netdata = require('netdata');
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// configuration
-
-function pluginConfig(filename) {
- var f = path.basename(filename);
-
- // node.d.plugin configuration
- var m = f.match('.plugin' + '$');
- if(m !== null)
- return netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf';
-
- // node.d modules configuration
- m = f.match('.node.js' + '$');
- if(m !== null)
- return netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf';
-
- return netdata.options.paths.config + '/node.d/' + f + '.conf';
-}
-
-// internal defaults
-extend(true, netdata.options, {
- filename: path.basename(__filename),
-
- update_every: NETDATA_UPDATE_EVERY,
-
- paths: {
- plugins: NETDATA_PLUGINS_DIR,
- config: NETDATA_CONFIG_DIR,
- modules: [],
- },
-
- modules_enable_autodetect: true,
- modules_enable_all: true,
- modules: {},
-});
-netdata.options.config_filename = pluginConfig(__filename);
-
-// load configuration file
-try {
- netdata.options_loaded = JSON.parse(fs.readFileSync(netdata.options.config_filename, 'utf8'));
- extend(true, netdata.options, netdata.options_loaded);
-
- if(!netdata.options.paths.plugins)
- netdata.options.paths.plugins = NETDATA_PLUGINS_DIR;
-
- if(!netdata.options.paths.config)
- netdata.options.paths.config = NETDATA_CONFIG_DIR;
-
- // console.error('merged netdata object:');
- // console.error(util.inspect(netdata, {depth: 10}));
-}
-catch(e) {
- netdata.error('Cannot read configuration file ' + netdata.options.config_filename + ': ' + e.message + ', using internal defaults.');
- netdata.options_loaded = undefined;
- dumpError(e);
-}
-
-
-// apply module paths to node.js process
-function applyModulePaths() {
- var len = netdata.options.paths.modules.length;
- while(len--)
- process.mainModule.paths.unshift(netdata.options.paths.modules[len]);
-}
-applyModulePaths();
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// tracing
-
-function dumpError(err) {
- if (typeof err === 'object') {
- if (err.stack) {
- netdata.debug(err.stack);
- }
- }
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// get command line arguments
-{
- var found_myself = false;
- var found_number = false;
- var found_modules = false;
- process.argv.forEach(function (val, index, array) {
- netdata.debug('PARAM: ' + val);
-
- if(!found_myself) {
- if(val === __filename)
- found_myself = true;
- }
- else {
- switch(val) {
- case 'debug':
- netdata.options.DEBUG = true;
- netdata.debug('DEBUG enabled');
- break;
-
- default:
- if(found_number === true) {
- if(found_modules === false) {
- for(var i in netdata.options.modules)
- netdata.options.modules[i].enabled = false;
- }
-
- if(typeof netdata.options.modules[val] === 'undefined')
- netdata.options.modules[val] = {};
-
- netdata.options.modules[val].enabled = true;
- netdata.options.modules_enable_all = false;
- netdata.debug('enabled module ' + val);
- }
- else {
- try {
- var x = parseInt(val);
- if(x > 0) {
- netdata.options.update_every = x;
- if(netdata.options.update_every < NETDATA_UPDATE_EVERY) {
- netdata.options.update_every = NETDATA_UPDATE_EVERY;
- netdata.debug('Update frequency ' + x + 's is too low');
- }
-
- found_number = true;
- netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds');
- }
- else netdata.error('Ignoring parameter: ' + val);
- }
- catch(e) {
- netdata.error('Cannot get value of parameter: ' + val);
- dumpError(e);
- }
- }
- break;
- }
- }
- });
-}
-
-if(netdata.options.update_every < 1) {
- netdata.debug('Adjusting update frequency to 1 second');
- netdata.options.update_every = 1;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// find modules
-
-function findModules() {
- var found = 0;
-
- var files = fs.readdirSync(NODE_D_DIR);
- var len = files.length;
- while(len--) {
- var m = files[len].match('.node.js' + '$');
- if(m !== null) {
- var n = files[len].substring(0, m.index);
-
- if(typeof(netdata.options.modules[n]) === 'undefined')
- netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all };
-
- if(netdata.options.modules[n].enabled === true) {
- netdata.options.modules[n].name = n;
- netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len];
- netdata.options.modules[n].loaded = false;
-
- if(typeof(netdata.options.modules[n].config_filename) !== 'string')
- netdata.options.modules[n].config_filename = pluginConfig(files[len]);
-
- // load the module
- try {
- netdata.debug('loading module ' + netdata.options.modules[n].filename);
- netdata.options.modules[n].module = require(netdata.options.modules[n].filename);
- netdata.options.modules[n].module.name = n;
- netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename);
- }
- catch(e) {
- netdata.options.modules[n].enabled = false;
- netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e);
- dumpError(e);
- continue;
- }
-
- // load its configuration
- var c = {
- enable_autodetect: netdata.options.modules_enable_autodetect,
- update_every: netdata.options.update_every
- };
- try {
- netdata.debug('loading module\'s ' + netdata.options.modules[n].name + ' config ' + netdata.options.modules[n].config_filename);
- var c2 = JSON.parse(fs.readFileSync(netdata.options.modules[n].config_filename, 'utf8'));
- extend(true, c, c2);
- netdata.debug('loaded module\'s ' + netdata.options.modules[n].name + ' config ' + netdata.options.modules[n].config_filename);
- }
- catch(e) {
- netdata.error('Cannot load module\'s ' + netdata.options.modules[n].name + ' config from ' + netdata.options.modules[n].config_filename + ' exception: ' + e + ', using internal defaults.');
- dumpError(e);
- }
-
- // call module auto-detection / configuration
- try {
- netdata.modules_configuring++;
- netdata.debug('Configuring module ' + netdata.options.modules[n].name);
- var serv = netdata.configure(netdata.options.modules[n].module, c, function() {
- netdata.debug('Configured module ' + netdata.options.modules[n].name);
- netdata.modules_configuring--;
- });
-
- netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.');
- }
- catch(e) {
- netdata.modules_configuring--;
- netdata.options.modules[n].enabled = false;
- netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.');
- dumpError(e);
- continue;
- }
-
- netdata.options.modules[n].loaded = true;
- found++;
- }
- }
- }
-
- // netdata.debug(netdata.options.modules);
- return found;
-}
-
-if(findModules() === 0) {
- netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR);
- netdata.disableNodePlugin();
- process.exit(1);
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// start
-
-function start_when_configuring_ends() {
- if(netdata.modules_configuring > 0) {
- netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring);
- setTimeout(start_when_configuring_ends, 500);
- return;
- }
-
- netdata.modules_configuring = 0;
- netdata.start();
-}
-start_when_configuring_ends();
-
-//netdata.debug('netdata object:')
-//netdata.debug(netdata);
diff --git a/plugins.d/python.d.plugin b/plugins.d/python.d.plugin
deleted file mode 100755
index c9b26016..00000000
--- a/plugins.d/python.d.plugin
+++ /dev/null
@@ -1,382 +0,0 @@
-#!/usr/bin/env bash
-'''':; exec "$(command -v python || command -v python3 || command -v python2 ||
-echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # '''
-
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (l2isbad)
-
-import os
-import sys
-import threading
-
-from re import sub
-from sys import version_info, argv
-from time import sleep
-
-try:
- from time import monotonic as time
-except ImportError:
- from time import time
-
-PY_VERSION = version_info[:2]
-PLUGIN_CONFIG_DIR = os.getenv('NETDATA_CONFIG_DIR', os.path.dirname(__file__) + '/../../../../etc/netdata') + '/'
-CHARTS_PY_DIR = os.path.abspath(os.getenv('NETDATA_PLUGINS_DIR', os.path.dirname(__file__)) + '/../python.d') + '/'
-CHARTS_PY_CONFIG_DIR = PLUGIN_CONFIG_DIR + 'python.d/'
-PYTHON_MODULES_DIR = CHARTS_PY_DIR + 'python_modules'
-
-sys.path.append(PYTHON_MODULES_DIR)
-
-from bases.loaders import ModuleAndConfigLoader
-from bases.loggers import PythonDLogger
-from bases.collection import setdefault_values, run_and_exit
-
-try:
- from collections import OrderedDict
-except ImportError:
- from third_party.ordereddict import OrderedDict
-
-BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1),
- 'retries': 60,
- 'priority': 60000,
- 'autodetection_retry': 0,
- 'chart_cleanup': 10,
- 'name': str()}
-
-
-MODULE_EXTENSION = '.chart.py'
-OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log']
-
-
-def module_ok(m):
- return m.endswith(MODULE_EXTENSION) and m[:-len(MODULE_EXTENSION)] not in OBSOLETE_MODULES
-
-
-ALL_MODULES = [m for m in sorted(os.listdir(CHARTS_PY_DIR)) if module_ok(m)]
-
-
-def parse_cmd():
- debug = 'debug' in argv[1:]
- trace = 'trace' in argv[1:]
- override_update_every = next((arg for arg in argv[1:] if arg.isdigit() and int(arg) > 1), False)
- modules = [''.join([m, MODULE_EXTENSION]) for m in argv[1:] if ''.join([m, MODULE_EXTENSION]) in ALL_MODULES]
- return debug, trace, override_update_every, modules or ALL_MODULES
-
-
-def multi_job_check(config):
- return next((True for key in config if isinstance(config[key], dict)), False)
-
-
-class Job(object):
- def __init__(self, initialized_job, job_id):
- """
- :param initialized_job: instance of <Class Service>
- :param job_id: <str>
- """
- self.job = initialized_job
- self.id = job_id # key in Modules.jobs()
- self.module_name = self.job.__module__ # used in Plugin.delete_job()
- self.recheck_every = self.job.configuration.pop('autodetection_retry')
- self.checked = False # used in Plugin.check_job()
- self.created = False # used in Plugin.create_job_charts()
- if OVERRIDE_UPDATE_EVERY:
- self.job.update_every = int(OVERRIDE_UPDATE_EVERY)
-
- def __getattr__(self, item):
- return getattr(self.job, item)
-
- def __repr__(self):
- return self.job.__repr__()
-
- def is_dead(self):
- return bool(self.ident) and not self.is_alive()
-
- def not_launched(self):
- return not bool(self.ident)
-
- def is_autodetect(self):
- return self.recheck_every
-
-
-class Module(object):
- def __init__(self, service, config):
- """
- :param service: <Module>
- :param config: <dict>
- """
- self.service = service
- self.name = service.__name__
- self.config = self.jobs_configurations_builder(config)
- self.jobs = OrderedDict()
- self.counter = 1
-
- self.initialize_jobs()
-
- def __repr__(self):
- return "<Class Module '{name}'>".format(name=self.name)
-
- def __iter__(self):
- return iter(OrderedDict(self.jobs).values())
-
- def __getitem__(self, item):
- return self.jobs[item]
-
- def __delitem__(self, key):
- del self.jobs[key]
-
- def __len__(self):
- return len(self.jobs)
-
- def __bool__(self):
- return bool(self.jobs)
-
- def __nonzero__(self):
- return self.__bool__()
-
- def jobs_configurations_builder(self, config):
- """
- :param config: <dict>
- :return:
- """
- counter = 0
- job_base_config = dict()
-
- for attr in BASE_CONFIG:
- job_base_config[attr] = config.pop(attr, getattr(self.service, attr, BASE_CONFIG[attr]))
-
- if not config:
- config = {str(): dict()}
- elif not multi_job_check(config):
- config = {str(): config}
-
- for job_name in config:
- if not isinstance(config[job_name], dict):
- continue
-
- job_config = setdefault_values(config[job_name], base_dict=job_base_config)
- job_name = sub(r'\s+', '_', job_name)
- config[job_name]['name'] = sub(r'\s+', '_', config[job_name]['name'])
- counter += 1
- job_id = 'job' + str(counter).zfill(3)
-
- yield job_id, job_name, job_config
-
- def initialize_jobs(self):
- """
- :return:
- """
- for job_id, job_name, job_config in self.config:
- job_config['job_name'] = job_name
- job_config['override_name'] = job_config.pop('name')
-
- try:
- initialized_job = self.service.Service(configuration=job_config)
- except Exception as error:
- Logger.error("job initialization: '{module_name} {job_name}' "
- "=> ['FAILED'] ({error})".format(module_name=self.name,
- job_name=job_name,
- error=error))
- continue
- else:
- Logger.debug("job initialization: '{module_name} {job_name}' "
- "=> ['OK']".format(module_name=self.name,
- job_name=job_name or self.name))
- self.jobs[job_id] = Job(initialized_job=initialized_job,
- job_id=job_id)
- del self.config
- del self.service
-
-
-class Plugin(object):
- def __init__(self):
- self.loader = ModuleAndConfigLoader()
- self.modules = OrderedDict()
- self.sleep_time = 1
- self.runs_counter = 0
- self.config, error = self.loader.load_config_from_file(PLUGIN_CONFIG_DIR + 'python.d.conf')
- if error:
- Logger.error('"python.d.conf" configuration file not found. Using defaults.')
-
- if not self.config.get('enabled', True):
- run_and_exit(Logger.info)('DISABLED in configuration file.')
-
- self.load_and_initialize_modules()
- if not self.modules:
- run_and_exit(Logger.info)('No modules to run. Exit...')
-
- def __iter__(self):
- return iter(OrderedDict(self.modules).values())
-
- @property
- def jobs(self):
- return (job for mod in self for job in mod)
-
- @property
- def dead_jobs(self):
- return (job for job in self.jobs if job.is_dead())
-
- @property
- def autodetect_jobs(self):
- return [job for job in self.jobs if job.not_launched()]
-
- def enabled_modules(self):
- for mod in MODULES_TO_RUN:
- mod_name = mod[:-len(MODULE_EXTENSION)]
- mod_path = CHARTS_PY_DIR + mod
- conf_path = ''.join([CHARTS_PY_CONFIG_DIR, mod_name, '.conf'])
-
- if DEBUG:
- yield mod, mod_name, mod_path, conf_path
- else:
- if all([self.config.get('default_run', True),
- self.config.get(mod_name, True)]):
- yield mod, mod_name, mod_path, conf_path
-
- elif all([not self.config.get('default_run'),
- self.config.get(mod_name)]):
- yield mod, mod_name, mod_path, conf_path
-
- def load_and_initialize_modules(self):
- for mod, mod_name, mod_path, conf_path in self.enabled_modules():
-
- # Load module from file ------------------------------------------------------------
- loaded_module, error = self.loader.load_module_from_file(mod_name, mod_path)
- log = Logger.error if error else Logger.debug
- log("module load source: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK',
- module_name=mod_name))
- if error:
- Logger.error("load source error : {0}".format(error))
- continue
-
- # Load module config from file ------------------------------------------------------
- loaded_config, error = self.loader.load_config_from_file(conf_path)
- log = Logger.error if error else Logger.debug
- log("module load config: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK',
- module_name=mod_name))
- if error:
- Logger.error('load config error : {0}'.format(error))
-
- # Service instance initialization ---------------------------------------------------
- initialized_module = Module(service=loaded_module, config=loaded_config)
- Logger.debug("module status: '{module_name}' => [{status}] "
- "(jobs: {jobs_number})".format(status='OK' if initialized_module else 'FAILED',
- module_name=initialized_module.name,
- jobs_number=len(initialized_module)))
-
- if initialized_module:
- self.modules[initialized_module.name] = initialized_module
-
- @staticmethod
- def check_job(job):
- """
- :param job: <Job>
- :return:
- """
- try:
- check_ok = bool(job.check())
- except Exception as error:
- job.error('check() unhandled exception: {error}'.format(error=error))
- return None
- else:
- return check_ok
-
- @staticmethod
- def create_job_charts(job):
- """
- :param job: <Job>
- :return:
- """
- try:
- create_ok = job.create()
- except Exception as error:
- job.error('create() unhandled exception: {error}'.format(error=error))
- return False
- else:
- return create_ok
-
- def delete_job(self, job):
- """
- :param job: <Job>
- :return:
- """
- del self.modules[job.module_name][job.id]
-
- def run_check(self):
- checked = list()
- for job in self.jobs:
- if job.name in checked:
- job.info('check() => [DROPPED] (already served by another job)')
- self.delete_job(job)
- continue
- ok = self.check_job(job)
- if ok:
- job.info('check() => [OK]')
- checked.append(job.name)
- job.checked = True
- continue
- if not job.is_autodetect() or ok is None:
- job.info('check() => [FAILED]')
- self.delete_job(job)
- else:
- job.info('check() => [RECHECK] (autodetection_retry: {0})'.format(job.recheck_every))
-
- def run_create(self):
- for job in self.jobs:
- if not job.checked:
- # skip autodetection_retry jobs
- continue
- ok = self.create_job_charts(job)
- if ok:
- job.debug('create() => [OK] (charts: {0})'.format(len(job.charts)))
- job.created = True
- continue
- job.error('create() => [FAILED] (charts: {0})'.format(len(job.charts)))
- self.delete_job(job)
-
- def start(self):
- self.run_check()
- self.run_create()
- for job in self.jobs:
- if job.created:
- job.start()
-
- while True:
- if threading.active_count() <= 1 and not self.autodetect_jobs:
- run_and_exit(Logger.info)('FINISHED')
-
- sleep(self.sleep_time)
- self.cleanup()
- self.autodetect_retry()
-
- def cleanup(self):
- for job in self.dead_jobs:
- self.delete_job(job)
- for mod in self:
- if not mod:
- del self.modules[mod.name]
-
- def autodetect_retry(self):
- self.runs_counter += self.sleep_time
- for job in self.autodetect_jobs:
- if self.runs_counter % job.recheck_every == 0:
- checked = self.check_job(job)
- if checked:
- created = self.create_job_charts(job)
- if not created:
- self.delete_job(job)
- continue
- job.start()
-
-
-if __name__ == '__main__':
- DEBUG, TRACE, OVERRIDE_UPDATE_EVERY, MODULES_TO_RUN = parse_cmd()
- Logger = PythonDLogger()
- if DEBUG:
- Logger.logger.severity = 'DEBUG'
- if TRACE:
- Logger.log_traceback = True
- Logger.info('Using python {version}'.format(version=PY_VERSION[0]))
-
- plugin = Plugin()
- plugin.start()
diff --git a/plugins.d/tc-qos-helper.sh b/plugins.d/tc-qos-helper.sh
deleted file mode 100755
index 9153f22e..00000000
--- a/plugins.d/tc-qos-helper.sh
+++ /dev/null
@@ -1,303 +0,0 @@
-#!/usr/bin/env bash
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-# This script is a helper to allow netdata collect tc data.
-# tc output parsing has been implemented in C, inside netdata
-# This script allows setting names to dimensions.
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
-export LC_ALL=C
-
-
-# -----------------------------------------------------------------------------
-# find /var/run/fireqos
-
-# the default
-fireqos_run_dir="/var/run/fireqos"
-
-function realdir {
- local r="$1"
- local t=$(readlink "$r")
-
- while [ "$t" ]
- do
- r=$(cd $(dirname "$r") && cd $(dirname "$t") && pwd -P)/$(basename "$t")
- t=$(readlink "$r")
- done
-
- dirname "$r"
-}
-
-if [ ! -d "${fireqos_run_dir}" ]
- then
-
- # the fireqos executable - we will use it to find its config
- fireqos="$(which fireqos 2>/dev/null || command -v fireqos 2>/dev/null)"
-
- if [ ! -z "${fireqos}" ]
- then
-
- fireqos_exec_dir="$(realdir ${fireqos})"
-
- if [ ! -z "${fireqos_exec_dir}" -a "${fireqos_exec_dir}" != "." -a -f "${fireqos_exec_dir}/install.config" ]
- then
-
- LOCALSTATEDIR=
- source "${fireqos_exec_dir}/install.config"
-
- if [ -d "${LOCALSTATEDIR}/run/fireqos" ]
- then
- fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos"
- fi
- fi
- fi
-fi
-
-# -----------------------------------------------------------------------------
-# logging functions
-
-PROGRAM_FILE="$0"
-PROGRAM_NAME="$(basename $0)"
-PROGRAM_NAME="${PROGRAM_NAME/.plugin}"
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- exit 1
-}
-
-debug=0
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
-}
-
-
-# -----------------------------------------------------------------------------
-
-[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
-[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata"
-
-plugins_dir="${NETDATA_PLUGINS_DIR}"
-config_dir="${NETDATA_CONFIG_DIR}"
-tc="$(which tc 2>/dev/null || command -v tc 2>/dev/null)"
-
-
-# -----------------------------------------------------------------------------
-# user configuration
-
-# time in seconds to refresh QoS class/qdisc names
-qos_get_class_names_every=120
-
-# time in seconds to exit - netdata will restart the script
-qos_exit_every=3600
-
-# what to use? classes or qdiscs?
-tc_show="qdisc" # can also be "class"
-
-
-# -----------------------------------------------------------------------------
-# check if we have a valid number for interval
-
-t=${1}
-update_every=$((t))
-[ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY}
-[ $((update_every)) -lt 1 ] && update_every=1
-
-
-# -----------------------------------------------------------------------------
-# allow the user to override our defaults
-
-if [ -f "${config_dir}/tc-qos-helper.conf" ]
- then
- source "${config_dir}/tc-qos-helper.conf"
-fi
-
-case "${tc_show}" in
- qdisc|class)
- ;;
-
- *)
- error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'."
- tc_show="qdisc"
- ;;
-esac
-
-
-# -----------------------------------------------------------------------------
-# default sleep function
-
-LOOPSLEEPMS_LASTWORK=0
-loopsleepms() {
- sleep $1
-}
-
-# if found and included, this file overwrites loopsleepms()
-# with a high resolution timer function for precise looping.
-. "${plugins_dir}/loopsleepms.sh.inc"
-
-
-# -----------------------------------------------------------------------------
-# final checks we can run
-
-if [ -z "${tc}" -o ! -x "${tc}" ]
- then
- fatal "cannot find command 'tc' in this system."
-fi
-
-tc_devices=
-fix_names=
-
-# -----------------------------------------------------------------------------
-
-setclassname() {
- if [ "${tc_show}" = "qdisc" ]
- then
- echo "SETCLASSNAME $4 $2"
- else
- echo "SETCLASSNAME $3 $2"
- fi
-}
-
-show_tc_cls() {
- [ "${tc_show}" = "qdisc" ] && return 1
-
- local x="${1}"
-
- if [ -f /etc/iproute2/tc_cls ]
- then
- local classid name rest
- while read classid name rest
- do
- [ -z "${classid}" -o -z "${name}" -o "${classid}" = "#" -o "${name}" = "#" -o "${classid:0:1}" = "#" -o "${name:0:1}" = "#" ] && continue
- setclassname "" "${name}" "${classid}"
- done </etc/iproute2/tc_cls
- return 0
- fi
- return 1
-}
-
-show_fireqos_names() {
- local x="${1}" name n interface_dev interface_classes interface_classes_monitor
-
- if [ -f "${fireqos_run_dir}/ifaces/${x}" ]
- then
- name="$(<"${fireqos_run_dir}/ifaces/${x}")"
- echo "SETDEVICENAME ${name}"
-
- interface_dev=
- interface_classes=
- interface_classes_monitor=
- source "${fireqos_run_dir}/${name}.conf"
- for n in ${interface_classes_monitor}
- do
- setclassname ${n//|/ }
- done
- [ ! -z "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}"
-
- return 0
- fi
-
- return 1
-}
-
-show_tc() {
- local x="${1}"
-
- echo "BEGIN ${x}"
-
- # netdata can parse the output of tc
- ${tc} -s ${tc_show} show dev ${x}
-
- # check FireQOS names for classes
- if [ ! -z "${fix_names}" ]
- then
- show_fireqos_names "${x}" || show_tc_cls "${x}"
- fi
-
- echo "END ${x}"
-}
-
-find_tc_devices() {
- local count=0 devs= dev rest l
-
- # find all the devices in the system
- # without forking
- while IFS=":| " read dev rest
- do
- count=$((count + 1))
- [ ${count} -le 2 ] && continue
- devs="${devs} ${dev}"
- done </proc/net/dev
-
- # from all the devices find the ones
- # that have QoS defined
- # unfortunately, one fork per device cannot be avoided
- tc_devices=
- for dev in ${devs}
- do
- l="$(${tc} class show dev ${dev} 2>/dev/null)"
- [ ! -z "${l}" ] && tc_devices="${tc_devices} ${dev}"
- done
-}
-
-# update devices and class names
-# once every 2 minutes
-names_every=$((qos_get_class_names_every / update_every))
-
-# exit this script every hour
-# it will be restarted automatically
-exit_after=$((qos_exit_every / update_every))
-
-c=0
-gc=0
-while [ 1 ]
-do
- fix_names=
- c=$((c + 1))
- gc=$((gc + 1))
-
- if [ ${c} -le 1 -o ${c} -ge ${names_every} ]
- then
- c=1
- fix_names="YES"
- find_tc_devices
- fi
-
- for d in ${tc_devices}
- do
- show_tc ${d}
- done
-
- echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}"
-
- loopsleepms ${update_every}
-
- [ ${gc} -gt ${exit_after} ] && exit 0
-done