diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2018-11-07 12:22:44 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2018-11-07 12:22:44 +0000 |
commit | 1e6c93250172946eeb38e94a92a1fd12c9d3011e (patch) | |
tree | 8ca5e16dfc7ad6b3bf2738ca0a48408a950f8f7e /conf.d | |
parent | Update watch file (diff) | |
download | netdata-1e6c93250172946eeb38e94a92a1fd12c9d3011e.tar.xz netdata-1e6c93250172946eeb38e94a92a1fd12c9d3011e.zip |
Merging upstream version 1.11.0+dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'conf.d')
134 files changed, 0 insertions, 10419 deletions
diff --git a/conf.d/Makefile.am b/conf.d/Makefile.am deleted file mode 100644 index d79bb5ab8..000000000 --- a/conf.d/Makefile.am +++ /dev/null @@ -1,160 +0,0 @@ -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> -# -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in - -dist_config_DATA = \ - apps_groups.conf \ - charts.d.conf \ - fping.conf \ - node.d.conf \ - python.d.conf \ - health_alarm_notify.conf \ - health_email_recipients.conf \ - stream.conf \ - $(NULL) - -nodeconfigdir=$(configdir)/node.d -dist_nodeconfig_DATA = \ - node.d/README.md \ - node.d/fronius.conf.md \ - node.d/named.conf.md \ - node.d/sma_webbox.conf.md \ - node.d/snmp.conf.md \ - node.d/stiebeleltron.conf.md \ - $(NULL) - -pythonconfigdir=$(configdir)/python.d -dist_pythonconfig_DATA = \ - python.d/apache.conf \ - python.d/beanstalk.conf \ - python.d/bind_rndc.conf \ - python.d/ceph.conf \ - python.d/chrony.conf \ - python.d/couchdb.conf \ - python.d/cpufreq.conf \ - python.d/dns_query_time.conf \ - python.d/dnsdist.conf \ - python.d/dovecot.conf \ - python.d/elasticsearch.conf \ - python.d/example.conf \ - python.d/exim.conf \ - python.d/fail2ban.conf \ - python.d/freeradius.conf \ - python.d/go_expvar.conf \ - python.d/haproxy.conf \ - python.d/hddtemp.conf \ - python.d/httpcheck.conf \ - python.d/icecast.conf \ - python.d/ipfs.conf \ - python.d/isc_dhcpd.conf \ - python.d/mdstat.conf \ - python.d/memcached.conf \ - python.d/mongodb.conf \ - python.d/mysql.conf \ - python.d/nginx.conf \ - python.d/nginx_plus.conf \ - python.d/nsd.conf \ - python.d/ntpd.conf \ - python.d/ovpn_status_log.conf \ - python.d/phpfpm.conf \ - python.d/portcheck.conf \ - python.d/postfix.conf \ - python.d/postgres.conf \ - python.d/powerdns.conf \ - python.d/rabbitmq.conf \ - python.d/redis.conf \ - python.d/retroshare.conf \ - python.d/samba.conf \ - python.d/sensors.conf \ - python.d/springboot.conf \ - python.d/squid.conf \ - python.d/smartd_log.conf \ - python.d/tomcat.conf \ - python.d/traefik.conf \ - python.d/varnish.conf \ - python.d/web_log.conf \ - $(NULL) - -healthconfigdir=$(configdir)/health.d - -dist_healthconfig_DATA = \ - health.d/apache.conf \ - health.d/backend.conf \ - health.d/beanstalkd.conf \ - health.d/bind_rndc.conf \ - health.d/btrfs.conf \ - health.d/ceph.conf \ - health.d/cpu.conf \ - health.d/couchdb.conf \ - health.d/disks.conf \ - health.d/elasticsearch.conf \ - health.d/entropy.conf \ - health.d/fping.conf \ - health.d/fronius.conf \ - health.d/haproxy.conf \ - health.d/httpcheck.conf \ - health.d/ipc.conf \ - health.d/ipfs.conf \ - health.d/ipmi.conf \ - health.d/isc_dhcpd.conf \ - health.d/lighttpd.conf \ - health.d/mdstat.conf \ - health.d/memcached.conf \ - health.d/memory.conf \ - health.d/mongodb.conf \ - health.d/mysql.conf \ - health.d/named.conf \ - health.d/net.conf \ - health.d/netfilter.conf \ - health.d/nginx.conf \ - health.d/nginx_plus.conf \ - health.d/portcheck.conf \ - health.d/postgres.conf \ - health.d/qos.conf \ - health.d/ram.conf \ - health.d/redis.conf \ - health.d/retroshare.conf \ - health.d/softnet.conf \ - health.d/squid.conf \ - health.d/stiebeleltron.conf \ - health.d/swap.conf \ - health.d/tcp_conn.conf \ - health.d/tcp_listen.conf \ - health.d/tcp_mem.conf \ - health.d/tcp_orphans.conf \ - health.d/tcp_resets.conf \ - health.d/udp_errors.conf \ - health.d/varnish.conf \ - health.d/web_log.conf \ - health.d/zfs.conf \ - $(NULL) - -chartsconfigdir=$(configdir)/charts.d -dist_chartsconfig_DATA = \ - charts.d/apache.conf \ - charts.d/apcupsd.conf \ - charts.d/cpufreq.conf \ - charts.d/exim.conf \ - charts.d/libreswan.conf \ - charts.d/load_average.conf \ - charts.d/mysql.conf \ - charts.d/nut.conf \ - charts.d/phpfpm.conf \ - charts.d/sensors.conf \ - charts.d/tomcat.conf \ - charts.d/ap.conf \ - charts.d/cpu_apps.conf \ - charts.d/example.conf \ - charts.d/hddtemp.conf \ - charts.d/mem_apps.conf \ - charts.d/nginx.conf \ - charts.d/opensips.conf \ - charts.d/postfix.conf \ - charts.d/squid.conf \ - $(NULL) - -statsdconfigdir=$(configdir)/statsd.d -dist_statsdconfig_DATA = \ - statsd.d/example.conf \ - $(NULL) diff --git a/conf.d/Makefile.in b/conf.d/Makefile.in deleted file mode 100644 index 48ce51191..000000000 --- a/conf.d/Makefile.in +++ /dev/null @@ -1,788 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = conf.d -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_chartsconfig_DATA) $(dist_config_DATA) \ - $(dist_healthconfig_DATA) $(dist_nodeconfig_DATA) \ - $(dist_pythonconfig_DATA) $(dist_statsdconfig_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \ - $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \ - $(top_srcdir)/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \ - $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(chartsconfigdir)" \ - "$(DESTDIR)$(configdir)" "$(DESTDIR)$(healthconfigdir)" \ - "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(pythonconfigdir)" \ - "$(DESTDIR)$(statsdconfigdir)" -DATA = $(dist_chartsconfig_DATA) $(dist_config_DATA) \ - $(dist_healthconfig_DATA) $(dist_nodeconfig_DATA) \ - $(dist_pythonconfig_DATA) $(dist_statsdconfig_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ - -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> -# -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_config_DATA = \ - apps_groups.conf \ - charts.d.conf \ - fping.conf \ - node.d.conf \ - python.d.conf \ - health_alarm_notify.conf \ - health_email_recipients.conf \ - stream.conf \ - $(NULL) - -nodeconfigdir = $(configdir)/node.d -dist_nodeconfig_DATA = \ - node.d/README.md \ - node.d/fronius.conf.md \ - node.d/named.conf.md \ - node.d/sma_webbox.conf.md \ - node.d/snmp.conf.md \ - node.d/stiebeleltron.conf.md \ - $(NULL) - -pythonconfigdir = $(configdir)/python.d -dist_pythonconfig_DATA = \ - python.d/apache.conf \ - python.d/beanstalk.conf \ - python.d/bind_rndc.conf \ - python.d/ceph.conf \ - python.d/chrony.conf \ - python.d/couchdb.conf \ - python.d/cpufreq.conf \ - python.d/dns_query_time.conf \ - python.d/dnsdist.conf \ - python.d/dovecot.conf \ - python.d/elasticsearch.conf \ - python.d/example.conf \ - python.d/exim.conf \ - python.d/fail2ban.conf \ - python.d/freeradius.conf \ - python.d/go_expvar.conf \ - python.d/haproxy.conf \ - python.d/hddtemp.conf \ - python.d/httpcheck.conf \ - python.d/icecast.conf \ - python.d/ipfs.conf \ - python.d/isc_dhcpd.conf \ - python.d/mdstat.conf \ - python.d/memcached.conf \ - python.d/mongodb.conf \ - python.d/mysql.conf \ - python.d/nginx.conf \ - python.d/nginx_plus.conf \ - python.d/nsd.conf \ - python.d/ntpd.conf \ - python.d/ovpn_status_log.conf \ - python.d/phpfpm.conf \ - python.d/portcheck.conf \ - python.d/postfix.conf \ - python.d/postgres.conf \ - python.d/powerdns.conf \ - python.d/rabbitmq.conf \ - python.d/redis.conf \ - python.d/retroshare.conf \ - python.d/samba.conf \ - python.d/sensors.conf \ - python.d/springboot.conf \ - python.d/squid.conf \ - python.d/smartd_log.conf \ - python.d/tomcat.conf \ - python.d/traefik.conf \ - python.d/varnish.conf \ - python.d/web_log.conf \ - $(NULL) - -healthconfigdir = $(configdir)/health.d -dist_healthconfig_DATA = \ - health.d/apache.conf \ - health.d/backend.conf \ - health.d/beanstalkd.conf \ - health.d/bind_rndc.conf \ - health.d/btrfs.conf \ - health.d/ceph.conf \ - health.d/cpu.conf \ - health.d/couchdb.conf \ - health.d/disks.conf \ - health.d/elasticsearch.conf \ - health.d/entropy.conf \ - health.d/fping.conf \ - health.d/fronius.conf \ - health.d/haproxy.conf \ - health.d/httpcheck.conf \ - health.d/ipc.conf \ - health.d/ipfs.conf \ - health.d/ipmi.conf \ - health.d/isc_dhcpd.conf \ - health.d/lighttpd.conf \ - health.d/mdstat.conf \ - health.d/memcached.conf \ - health.d/memory.conf \ - health.d/mongodb.conf \ - health.d/mysql.conf \ - health.d/named.conf \ - health.d/net.conf \ - health.d/netfilter.conf \ - health.d/nginx.conf \ - health.d/nginx_plus.conf \ - health.d/portcheck.conf \ - health.d/postgres.conf \ - health.d/qos.conf \ - health.d/ram.conf \ - health.d/redis.conf \ - health.d/retroshare.conf \ - health.d/softnet.conf \ - health.d/squid.conf \ - health.d/stiebeleltron.conf \ - health.d/swap.conf \ - health.d/tcp_conn.conf \ - health.d/tcp_listen.conf \ - health.d/tcp_mem.conf \ - health.d/tcp_orphans.conf \ - health.d/tcp_resets.conf \ - health.d/udp_errors.conf \ - health.d/varnish.conf \ - health.d/web_log.conf \ - health.d/zfs.conf \ - $(NULL) - -chartsconfigdir = $(configdir)/charts.d -dist_chartsconfig_DATA = \ - charts.d/apache.conf \ - charts.d/apcupsd.conf \ - charts.d/cpufreq.conf \ - charts.d/exim.conf \ - charts.d/libreswan.conf \ - charts.d/load_average.conf \ - charts.d/mysql.conf \ - charts.d/nut.conf \ - charts.d/phpfpm.conf \ - charts.d/sensors.conf \ - charts.d/tomcat.conf \ - charts.d/ap.conf \ - charts.d/cpu_apps.conf \ - charts.d/example.conf \ - charts.d/hddtemp.conf \ - charts.d/mem_apps.conf \ - charts.d/nginx.conf \ - charts.d/opensips.conf \ - charts.d/postfix.conf \ - charts.d/squid.conf \ - $(NULL) - -statsdconfigdir = $(configdir)/statsd.d -dist_statsdconfig_DATA = \ - statsd.d/example.conf \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu conf.d/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu conf.d/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_chartsconfigDATA: $(dist_chartsconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(chartsconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(chartsconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsconfigdir)" || exit $$?; \ - done - -uninstall-dist_chartsconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(chartsconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_configDATA: $(dist_config_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_config_DATA)'; test -n "$(configdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(configdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(configdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(configdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(configdir)" || exit $$?; \ - done - -uninstall-dist_configDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_config_DATA)'; test -n "$(configdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(configdir)'; $(am__uninstall_files_from_dir) -install-dist_healthconfigDATA: $(dist_healthconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_healthconfig_DATA)'; test -n "$(healthconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(healthconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(healthconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(healthconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(healthconfigdir)" || exit $$?; \ - done - -uninstall-dist_healthconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_healthconfig_DATA)'; test -n "$(healthconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(healthconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_nodeconfigDATA: $(dist_nodeconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(nodeconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(nodeconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodeconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(nodeconfigdir)" || exit $$?; \ - done - -uninstall-dist_nodeconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(nodeconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_pythonconfigDATA: $(dist_pythonconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythonconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonconfigdir)" || exit $$?; \ - done - -uninstall-dist_pythonconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(pythonconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_statsdconfigDATA: $(dist_statsdconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(statsdconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(statsdconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(statsdconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(statsdconfigdir)" || exit $$?; \ - done - -uninstall-dist_statsdconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(statsdconfigdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(chartsconfigdir)" "$(DESTDIR)$(configdir)" "$(DESTDIR)$(healthconfigdir)" "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(statsdconfigdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_chartsconfigDATA install-dist_configDATA \ - install-dist_healthconfigDATA install-dist_nodeconfigDATA \ - install-dist_pythonconfigDATA install-dist_statsdconfigDATA - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_chartsconfigDATA \ - uninstall-dist_configDATA uninstall-dist_healthconfigDATA \ - uninstall-dist_nodeconfigDATA uninstall-dist_pythonconfigDATA \ - uninstall-dist_statsdconfigDATA - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_chartsconfigDATA \ - install-dist_configDATA install-dist_healthconfigDATA \ - install-dist_nodeconfigDATA install-dist_pythonconfigDATA \ - install-dist_statsdconfigDATA install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am \ - uninstall-dist_chartsconfigDATA uninstall-dist_configDATA \ - uninstall-dist_healthconfigDATA uninstall-dist_nodeconfigDATA \ - uninstall-dist_pythonconfigDATA \ - uninstall-dist_statsdconfigDATA - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/conf.d/apps_groups.conf b/conf.d/apps_groups.conf deleted file mode 100644 index 4356e4910..000000000 --- a/conf.d/apps_groups.conf +++ /dev/null @@ -1,282 +0,0 @@ -# -# apps.plugin process grouping -# -# The apps.plugin displays charts with information about the processes running. -# This config allows grouping processes together, so that several processes -# will be reported as one. -# -# Only groups in this file are reported. All other processes will be reported -# as 'other'. -# -# For each process given, its whole process tree will be grouped, not just -# the process matched. The plugin will include both parents and childs. -# -# The format is: -# -# group: process1 process2 process3 ... -# -# Each group can be given multiple times, to add more processes to it. -# -# The process names are the ones returned by: -# -# - ps -e or /proc/PID/stat -# - in case of substring mode (see below): /proc/PID/cmdline -# -# To add process names with spaces, enclose them in quotes (single or double) -# example: 'Plex Media Serv' "my other process". -# -# Wildcard support: -# You can add an asterisk (*) at the beginning and/or the end of a process: -# -# *name suffix mode: will search for processes ending with 'name' -# (/proc/PID/stat) -# -# name* prefix mode: will search for processes beginning with 'name' -# (/proc/PID/stat) -# -# *name* substring mode: will search for 'name' in the whole command line -# (/proc/PID/cmdline) -# -# If you enter even just one *name* (substring), apps.plugin will process -# /proc/PID/cmdline for all processes, just once (when they are first seen). -# -# To add processes with single quotes, enclose them in double quotes -# example: "process with this ' single quote" -# -# To add processes with double quotes, enclose them in single quotes: -# example: 'process with this " double quote' -# -# If a group or process name starts with a -, the dimension will be hidden -# (cpu chart only). -# -# If a process starts with a +, debugging will be enabled for it -# (debugging produces a lot of output - do not enable it in production systems) -# -# You can add any number of groups you like. Only the ones found running will -# affect the charts generated. However, producing charts with hundreds of -# dimensions may slow down your web browser. -# -# The order of the entries in this list is important: the first that matches -# a process is used, so put important ones at the top. Processes not matched -# by any row, will inherit it from their parents or children. -# -# The order also controls the order of the dimensions on the generated charts -# (although applications started after apps.plugin is started, will be appended -# to the existing list of dimensions the netdata daemon maintains). - -# ----------------------------------------------------------------------------- -# NETDATA processes accounting - -# netdata main process -netdata: netdata - -# netdata known plugins -# plugins not defined here will be accumulated in netdata, above -apps.plugin: apps.plugin -freeipmi.plugin: freeipmi.plugin -charts.d.plugin: *charts.d.plugin* -node.d.plugin: *node.d.plugin* -python.d.plugin: *python.d.plugin* -tc-qos-helper: *tc-qos-helper.sh* -fping: fping - -# ----------------------------------------------------------------------------- -# authentication/authorization related servers - -auth: radius* openldap* ldap* -fail2ban: fail2ban* - -# ----------------------------------------------------------------------------- -# web/ftp servers - -httpd: apache* httpd nginx* lighttpd -proxy: squid* c-icap squidGuard varnish* -php: php* -ftpd: proftpd in.tftpd vsftpd -uwsgi: uwsgi -unicorn: *unicorn* -puma: *puma* - -# ----------------------------------------------------------------------------- -# database servers - -sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* -nosql: mongod redis* memcached *couchdb* -timedb: prometheus *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* *net.opentsdb.tools.TSDMain* - -# ----------------------------------------------------------------------------- -# email servers - -email: dovecot imapd pop3d amavis* master zmstat* zmmailboxdmgr qmgr oqmgr saslauthd opendkim clamd freshclam unbound tlsmgr postfwd2 postscreen postfix smtp* lmtp* - -# ----------------------------------------------------------------------------- -# network, routing, VPN - -ppp: ppp* -vpn: openvpn pptp* cjdroute gvpe tincd -wifi: hostapd wpa_supplicant -routing: ospfd* ospf6d* bgpd isisd ripd ripngd pimd ldpd zebra vtysh bird* - -# ----------------------------------------------------------------------------- -# high availability and balancers - -camo: *camo* -balancer: ipvs_* haproxy -ha: corosync hs_logd ha_logd stonithd pacemakerd lrmd crmd - -# ----------------------------------------------------------------------------- -# telephony - -pbx: asterisk safe_asterisk *vicidial* -sip: opensips* stund - -# ----------------------------------------------------------------------------- -# chat - -chat: irssi *vines* *prosody* murmurd - -# ----------------------------------------------------------------------------- -# monitoring - -logs: ulogd* syslog* rsyslog* logrotate systemd-journald rotatelogs -nms: snmpd vnstatd smokeping zabbix* monit munin* mon openhpid watchdog tailon nrpe -splunk: splunkd -azure: mdsd *waagent* *omiserver* *omiagent* hv_kvp_daemon hv_vss_daemon *auoms* *omsagent* - -# ----------------------------------------------------------------------------- -# storage, file systems and file servers - -ceph: ceph-mds ceph-mgr ceph-mon ceph-osd radosgw* rbd-* -samba: smbd nmbd winbindd -nfs: rpcbind rpc.* nfs* -zfs: spl_* z_* txg_* zil_* arc_* l2arc* -btrfs: btrfs* -iscsi: iscsid iscsi_eh - -# ----------------------------------------------------------------------------- -# containers & virtual machines - -containers: lxc* docker* -VMs: vbox* VBox* qemu* - -# ----------------------------------------------------------------------------- -# ssh servers and clients - -ssh: ssh* scp - -# ----------------------------------------------------------------------------- -# print servers and clients - -print: cups* lpd lpq - -# ----------------------------------------------------------------------------- -# time servers and clients - -time: ntp* systemd-timesyncd - -# ----------------------------------------------------------------------------- -# dhcp servers and clients - -dhcp: *dhcp* - -# ----------------------------------------------------------------------------- -# name servers and clients - -named: named rncd dig -dnsdist: dnsdist - -# ----------------------------------------------------------------------------- -# installation / compilation / debugging - -build: cc1 cc1plus as gcc* cppcheck ld make cmake automake autoconf autoreconf -build: git gdb valgrind* - -# ----------------------------------------------------------------------------- -# antivirus - -antivirus: clam* *clam - -# ----------------------------------------------------------------------------- -# torrent clients - -torrents: *deluge* transmission* *SickBeard* *CouchPotato* *rtorrent* - -# ----------------------------------------------------------------------------- -# backup servers and clients - -backup: rsync bacula* - -# ----------------------------------------------------------------------------- -# cron - -cron: cron* atd anacron systemd-cron* - -# ----------------------------------------------------------------------------- -# UPS - -ups: upsmon upsd */nut/* - -# ----------------------------------------------------------------------------- -# media players, servers, clients - -media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd -media: mpd minidlnad mt-daapd avahi* Plex* - -# ----------------------------------------------------------------------------- -# java applications - -hdfsdatanode: *org.apache.hadoop.hdfs.server.datanode.DataNode* -hdfsnamenode: *org.apache.hadoop.hdfs.server.namenode.NameNode* -hdfsjournalnode: *org.apache.hadoop.hdfs.qjournal.server.JournalNode* -hdfszkfc: *org.apache.hadoop.hdfs.tools.DFSZKFailoverController* - -yarnnode: *org.apache.hadoop.yarn.server.nodemanager.NodeManager* -yarnmgr: *org.apache.hadoop.yarn.server.resourcemanager.ResourceManager* -yarnproxy: *org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer* - -sparkworker: *org.apache.spark.deploy.worker.Worker* -sparkmaster: *org.apache.spark.deploy.master.Master* - -hbaseregion: *org.apache.hadoop.hbase.regionserver.HRegionServer* -hbaserest: *org.apache.hadoop.hbase.rest.RESTServer* -hbasethrift: *org.apache.hadoop.hbase.thrift.ThriftServer* -hbasemaster: *org.apache.hadoop.hbase.master.HMaster* - -zookeeper: *org.apache.zookeeper.server.quorum.QuorumPeerMain* - -hive2: *org.apache.hive.service.server.HiveServer2* -hivemetastore: *org.apache.hadoop.hive.metastore.HiveMetaStore* - -solr: *solr.install.dir* - -airflow: *airflow* - -# ----------------------------------------------------------------------------- -# X - -X: X Xorg xinit lightdm xdm pulseaudio gkrellm xfwm4 xfdesktop xfce* Thunar -X: xfsettingsd xfconfd gnome-* gdm gconf* dconf* xfconf* *gvfs gvfs* kdm slim -X: evolution-* firefox chromium opera vivaldi-bin epiphany WebKit* - -# ----------------------------------------------------------------------------- -# Kernel / System - -ksmd: ksmd - -system: systemd* udisks* udevd* *udevd connmand ipv6_addrconf dbus-* rtkit* -system: inetd xinetd mdadm polkitd acpid uuidd packagekitd upowerd colord -system: accounts-daemon rngd haveged - -kernel: kthreadd kauditd lockd khelper kdevtmpfs khungtaskd rpciod -kernel: fsnotify_mark kthrotld deferwq scsi_* - -# ----------------------------------------------------------------------------- -# other application servers - -kafka: *kafka.Kafka* - -rabbitmq: *rabbitmq* - -sidekiq: *sidekiq* -java: java -ipfs: ipfs diff --git a/conf.d/charts.d.conf b/conf.d/charts.d.conf deleted file mode 100644 index acb2a6fae..000000000 --- a/conf.d/charts.d.conf +++ /dev/null @@ -1,63 +0,0 @@ -# This is the configuration for charts.d.plugin - -# Each of its collectors can read configuration eiher from this file -# or a NAME.conf file (where NAME is the collector name). -# The collector specific file has higher precedence. - -# This file is a shell script too. - -# ----------------------------------------------------------------------------- - -# number of seconds to run without restart -# after this time, charts.d.plugin will exit -# netdata will restart it, but a small gap -# will appear in the charts.d.plugin charts. -#restart_timeout=$[3600 * 4] - -# when making iterations, charts.d can loop more frequently -# to prevent plugins missing iterations. -# this is a percentage relative to update_every to align its -# iterations. -# The minimum is 10%, the maximum 100%. -# So, if update_every is 1 second and time_divisor is 50, -# charts.d will iterate every 500ms. -# Charts will be called to collect data only if the time -# passed since the last time the collected data is equal or -# above their update_every. -#time_divisor=50 - -# ----------------------------------------------------------------------------- - -# the default enable/disable for all charts.d collectors -# the default is "yes" -# enable_all_charts="yes" - -# BY DEFAULT ENABLED MODULES -# ap=yes -# nut=yes -# opensips=yes - -# ----------------------------------------------------------------------------- -# THESE NEED TO BE SET TO "force" TO BE ENABLED - -# Nothing useful. -# Just an example charts.d plugin you can use as a template. -# example=force - -# OLD MODULES THAT ARE NOW SERVED BY python.d.plugin -# apache=force -# cpufreq=force -# exim=force -# hddtemp=force -# mysql=force -# nginx=force -# phpfpm=force -# postfix=force -# sensors=force -# squid=force -# tomcat=force - -# OLD MODULES THAT ARE NOW SERVED BY NETDATA DAEMON -# cpu_apps=force -# mem_apps=force -# load_average=force diff --git a/conf.d/charts.d/ap.conf b/conf.d/charts.d/ap.conf deleted file mode 100644 index 38fc157ce..000000000 --- a/conf.d/charts.d/ap.conf +++ /dev/null @@ -1,23 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# nothing fancy to configure. -# this module will run -# iw dev - to find wireless devices in AP mode -# iw ${dev} station dump - to get connected clients -# based on the above, it generates several charts - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#ap_update_every= - -# the charts priority on the dashboard -#ap_priority=6900 - -# the number of retries to do in case of failure -# before disabling the module -#ap_retries=10 diff --git a/conf.d/charts.d/apache.conf b/conf.d/charts.d/apache.conf deleted file mode 100644 index 50914cf32..000000000 --- a/conf.d/charts.d/apache.conf +++ /dev/null @@ -1,30 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -# the URL to download apache status info -#apache_url="http://127.0.0.1:80/server-status?auto" -#apache_curl_opts= - -# convert apache floating point values -# to integer using this multiplier -# this only affects precision - the values -# will be in the proper units -#apache_decimal_detail=1000000 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#apache_update_every= - -# the charts priority on the dashboard -#apache_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#apache_retries=10 diff --git a/conf.d/charts.d/apcupsd.conf b/conf.d/charts.d/apcupsd.conf deleted file mode 100644 index 679c0d61b..000000000 --- a/conf.d/charts.d/apcupsd.conf +++ /dev/null @@ -1,25 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# add all your APC UPSes in this array - uncomment it too -#declare -A apcupsd_sources=( -# ["local"]="127.0.0.1:3551" -#) - -# how long to wait for apcupsd to respond -#apcupsd_timeout=3 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#apcupsd_update_every=10 - -# the charts priority on the dashboard -#apcupsd_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#apcupsd_retries=10 diff --git a/conf.d/charts.d/cpu_apps.conf b/conf.d/charts.d/cpu_apps.conf deleted file mode 100644 index 850cd0c6f..000000000 --- a/conf.d/charts.d/cpu_apps.conf +++ /dev/null @@ -1,19 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# app.plugin can do better - -#cpu_apps_apps= - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#cpu_apps_update_every=2 - -# the number of retries to do in case of failure -# before disabling the module -#cpu_apps_retries=10 diff --git a/conf.d/charts.d/cpufreq.conf b/conf.d/charts.d/cpufreq.conf deleted file mode 100644 index 7130555af..000000000 --- a/conf.d/charts.d/cpufreq.conf +++ /dev/null @@ -1,24 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -#cpufreq_sys_dir="/sys/devices" -#cpufreq_sys_depth=10 -#cpufreq_source_update=1 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#cpufreq_update_every= - -# the charts priority on the dashboard -#cpufreq_priority=10000 - -# the number of retries to do in case of failure -# before disabling the module -#cpufreq_retries=10 diff --git a/conf.d/charts.d/example.conf b/conf.d/charts.d/example.conf deleted file mode 100644 index 6232ca584..000000000 --- a/conf.d/charts.d/example.conf +++ /dev/null @@ -1,21 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# to enable this chart, you have to set this to 12345 -# (just a demonstration for something that needs to be checked) -#example_magic_number=12345 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#example_update_every= - -# the charts priority on the dashboard -#example_priority=150000 - -# the number of retries to do in case of failure -# before disabling the module -#example_retries=10 diff --git a/conf.d/charts.d/exim.conf b/conf.d/charts.d/exim.conf deleted file mode 100644 index f96ac4dbb..000000000 --- a/conf.d/charts.d/exim.conf +++ /dev/null @@ -1,24 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -# the exim command to run -# if empty, it will use the one found in the system path -#exim_command= - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#exim_update_every=5 - -# the charts priority on the dashboard -#exim_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#exim_retries=10 diff --git a/conf.d/charts.d/hddtemp.conf b/conf.d/charts.d/hddtemp.conf deleted file mode 100644 index b6037b40e..000000000 --- a/conf.d/charts.d/hddtemp.conf +++ /dev/null @@ -1,23 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -#hddtemp_host="localhost" -#hddtemp_port="7634" - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#hddtemp_update_every=3 - -# the charts priority on the dashboard -#hddtemp_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#hddtemp_retries=10 diff --git a/conf.d/charts.d/libreswan.conf b/conf.d/charts.d/libreswan.conf deleted file mode 100644 index 9b3ee77b7..000000000 --- a/conf.d/charts.d/libreswan.conf +++ /dev/null @@ -1,29 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ -# - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#libreswan_update_every=1 - -# the charts priority on the dashboard -#libreswan_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#libreswan_retries=10 - -# set to 1, to run ipsec with sudo (the default) -# set to 0, to run ipsec without sudo -#libreswan_sudo=1 - -# TO ALLOW NETDATA RUN ipsec AS ROOT -# CREATE THE FILE: /etc/sudoers.d/netdata -# WITH THESE 2 LINES (uncommented of course): -# -# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status -# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus diff --git a/conf.d/charts.d/load_average.conf b/conf.d/charts.d/load_average.conf deleted file mode 100644 index 68979275f..000000000 --- a/conf.d/charts.d/load_average.conf +++ /dev/null @@ -1,22 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# netdata can collect this metric already - -#load_average_enabled=0 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#load_average_update_every=5 - -# the charts priority on the dashboard -#load_average_priority=100 - -# the number of retries to do in case of failure -# before disabling the module -#load_average_retries=10 diff --git a/conf.d/charts.d/mem_apps.conf b/conf.d/charts.d/mem_apps.conf deleted file mode 100644 index 75d24dc3e..000000000 --- a/conf.d/charts.d/mem_apps.conf +++ /dev/null @@ -1,19 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# app.plugin can do better - -#mem_apps_apps= - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#mem_apps_update_every=2 - -# the number of retries to do in case of failure -# before disabling the module -#mem_apps_retries=10 diff --git a/conf.d/charts.d/mysql.conf b/conf.d/charts.d/mysql.conf deleted file mode 100644 index 683e4af35..000000000 --- a/conf.d/charts.d/mysql.conf +++ /dev/null @@ -1,23 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -#mysql_cmds[name]="" -#mysql_opts[name]="" - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#mysql_update_every=2 - -# the charts priority on the dashboard -#mysql_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#mysql_retries=10 diff --git a/conf.d/charts.d/nginx.conf b/conf.d/charts.d/nginx.conf deleted file mode 100644 index c46100a58..000000000 --- a/conf.d/charts.d/nginx.conf +++ /dev/null @@ -1,23 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -#nginx_url="http://127.0.0.1:80/stub_status" -#nginx_curl_opts="" - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#nginx_update_every= - -# the charts priority on the dashboard -#nginx_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#nginx_retries=10 diff --git a/conf.d/charts.d/nut.conf b/conf.d/charts.d/nut.conf deleted file mode 100644 index d477ddd34..000000000 --- a/conf.d/charts.d/nut.conf +++ /dev/null @@ -1,28 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# a space separated list of UPS names -# if empty, the list returned by 'upsc -l' will be used -#nut_ups= - -# how much time in seconds, to wait for nut to respond -#nut_timeout=2 - -# set this to 1, to enable another chart showing the number -# of UPS clients connected to upsd -#nut_clients_chart=1 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#nut_update_every=2 - -# the charts priority on the dashboard -#nut_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#nut_retries=10 diff --git a/conf.d/charts.d/opensips.conf b/conf.d/charts.d/opensips.conf deleted file mode 100644 index e25111dce..000000000 --- a/conf.d/charts.d/opensips.conf +++ /dev/null @@ -1,21 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -#opensips_opts="fifo get_statistics all" -#opensips_cmd= -#opensips_timeout=2 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#opensips_update_every=5 - -# the charts priority on the dashboard -#opensips_priority=80000 - -# the number of retries to do in case of failure -# before disabling the module -#opensips_retries=10 diff --git a/conf.d/charts.d/phpfpm.conf b/conf.d/charts.d/phpfpm.conf deleted file mode 100644 index e4dd0231b..000000000 --- a/conf.d/charts.d/phpfpm.conf +++ /dev/null @@ -1,27 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -# first, you need open php-fpm status in php-fpm.conf -# second, you need add status location in nginx.conf -# you can see, https://easyengine.io/tutorials/php/fpm-status-page/ -#phpfpm_urls[name]="" -#phpfpm_curl_opts[name]="" - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#phpfpm_update_every= - -# the charts priority on the dashboard -#phpfpm_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#phpfpm_retries=10 - diff --git a/conf.d/charts.d/postfix.conf b/conf.d/charts.d/postfix.conf deleted file mode 100644 index b77817bd6..000000000 --- a/conf.d/charts.d/postfix.conf +++ /dev/null @@ -1,25 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -# the postqueue command -# if empty, it will use the one found in the system path -#postfix_postqueue= - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#postfix_update_every=15 - -# the charts priority on the dashboard -#postfix_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#postfix_retries=10 - diff --git a/conf.d/charts.d/sensors.conf b/conf.d/charts.d/sensors.conf deleted file mode 100644 index bcb28807d..000000000 --- a/conf.d/charts.d/sensors.conf +++ /dev/null @@ -1,32 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -# the directory the kernel keeps sensor data -#sensors_sys_dir="/sys/devices" - -# how deep in the tree to check for sensor data -#sensors_sys_depth=10 - -# if set to 1, the script will overwrite internal -# script functions with code generated ones -# leave to 1, is faster -#sensors_source_update=1 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#sensors_update_every= - -# the charts priority on the dashboard -#sensors_priority=90000 - -# the number of retries to do in case of failure -# before disabling the module -#sensors_retries=10 - diff --git a/conf.d/charts.d/squid.conf b/conf.d/charts.d/squid.conf deleted file mode 100644 index 19e928f25..000000000 --- a/conf.d/charts.d/squid.conf +++ /dev/null @@ -1,26 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -#squid_host= -#squid_port= -#squid_url= -#squid_timeout=2 - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#squid_update_every=2 - -# the charts priority on the dashboard -#squid_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#squid_retries=10 - diff --git a/conf.d/charts.d/tomcat.conf b/conf.d/charts.d/tomcat.conf deleted file mode 100644 index e9f3eefa9..000000000 --- a/conf.d/charts.d/tomcat.conf +++ /dev/null @@ -1,38 +0,0 @@ -# no need for shebang - this file is loaded from charts.d.plugin - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2018 Costa Tsaousis <costa@tsaousis.gr> -# GPL v3+ - -# THIS PLUGIN IS DEPRECATED -# USE THE PYTHON.D ONE - -# the URL to download tomcat status info -# usually http://localhost:8080/manager/status?XML=true -#tomcat_url="" -#tomcat_curl_opts="" - -# set tomcat username/password here -#tomcat_user="" -#tomcat_password="" - -# the data collection frequency -# if unset, will inherit the netdata update frequency -#tomcat_update_every=1 - -# the charts priority on the dashboard -#tomcat_priority=60000 - -# the number of retries to do in case of failure -# before disabling the module -#tomcat_retries=10 - -# convert tomcat floating point values -# to integer using this multiplier -# this only affects precision - the values -# will be in the proper units -#tomcat_decimal_detail=1000000 - -# used by volume chart to convert bytes to KB -#tomcat_decimal_KB_detail=1000 diff --git a/conf.d/fping.conf b/conf.d/fping.conf deleted file mode 100644 index 63a7f7acd..000000000 --- a/conf.d/fping.conf +++ /dev/null @@ -1,44 +0,0 @@ -# no need for shebang - this file is sourced from fping.plugin - -# fping.plugin requires a recent version of fping. -# -# You can get it on your system, by running: -# -# /usr/libexec/netdata/plugins.d/fping.plugin install - -# ----------------------------------------------------------------------------- -# configuration options - -# The fping binary to use. We need one that can output netdata friendly info -# (supporting: -N). If you have multiple versions, put here the full filename -# of the right one - -#fping="/usr/local/bin/fping" - - -# a space separated list of hosts to fping -# we suggest to put names here and the IPs of these names in /etc/hosts - -hosts="" - - -# The update frequency of the chart - the default is inherited from netdata - -#update_every=2 - - -# The time in milliseconds (1 sec = 1000 ms) to ping the hosts -# by default 5 pings per host per iteration -# fping will not allow this to be below 20ms - -#ping_every="200" - - -# other fping options - defaults: -# -R = send packets with random data -# -b 56 = the number of bytes per packet -# -i 1 = 1 ms when sending packets to others hosts (switching hosts) -# -r 0 = never retry packets -# -t 5000 = per packet timeout at 5000 ms - -#fping_opts="-R -b 56 -i 1 -r 0 -t 5000" diff --git a/conf.d/health.d/apache.conf b/conf.d/health.d/apache.conf deleted file mode 100644 index 0c98b8778..000000000 --- a/conf.d/health.d/apache.conf +++ /dev/null @@ -1,14 +0,0 @@ - -# make sure apache is running - -template: apache_last_collected_secs - on: apache.requests - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/conf.d/health.d/backend.conf b/conf.d/health.d/backend.conf deleted file mode 100644 index 7af100d8f..000000000 --- a/conf.d/health.d/backend.conf +++ /dev/null @@ -1,45 +0,0 @@ - -# make sure we are sending data to backend - - alarm: backend_last_buffering - on: netdata.backend_metrics - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful buffering of backend data - to: dba - - alarm: backend_metrics_sent - on: netdata.backend_metrics - units: % - calc: abs($sent) * 100 / abs($buffered) - every: 10s - warn: $this != 100 - delay: down 5m multiplier 1.5 max 1h - info: percentage of metrics sent to the backend server - to: dba - - alarm: backend_metrics_lost - on: netdata.backend_metrics - units: metrics - calc: abs($lost) - every: 10s - crit: ($this != 0) || ($status == $CRITICAL && abs($sent) == 0) - delay: down 5m multiplier 1.5 max 1h - info: number of metrics lost due to repeating failures to contact the backend server - to: dba - -# this chart has been removed from netdata -# alarm: backend_slow -# on: netdata.backend_latency -# units: % -# calc: $latency * 100 / ($update_every * 1000) -# every: 10s -# warn: $this > 50 -# crit: $this > 100 -# delay: down 5m multiplier 1.5 max 1h -# info: the percentage of time between iterations needed by the backend time to process the data sent by netdata -# to: dba diff --git a/conf.d/health.d/beanstalkd.conf b/conf.d/health.d/beanstalkd.conf deleted file mode 100644 index 30dc27328..000000000 --- a/conf.d/health.d/beanstalkd.conf +++ /dev/null @@ -1,36 +0,0 @@ -# get the number of buried jobs in all queues - -template: server_buried_jobs - on: beanstalk.current_jobs - calc: $buried - units: jobs - every: 10s - warn: $this > 0 - crit: $this > 10 - delay: up 0 down 5m multiplier 1.2 max 1h - info: the number of buried jobs aggregated across all tubes - to: sysadmin - -# get the number of buried jobs per queue - -#template: tube_buried_jobs -# on: beanstalk.jobs -# calc: $buried -# units: jobs -# every: 10s -# warn: $this > 0 -# crit: $this > 10 -# delay: up 0 down 5m multiplier 1.2 max 1h -# info: the number of jobs buried per tube -# to: sysadmin - -# get the current number of tubes - -#template: number_of_tubes -# on: beanstalk.current_tubes -# calc: $tubes -# every: 10s -# warn: $this < 5 -# delay: up 0 down 5m multiplier 1.2 max 1h -# info: the current number of tubes on the server -# to: sysadmin diff --git a/conf.d/health.d/bind_rndc.conf b/conf.d/health.d/bind_rndc.conf deleted file mode 100644 index 4145e77cd..000000000 --- a/conf.d/health.d/bind_rndc.conf +++ /dev/null @@ -1,9 +0,0 @@ - template: bind_rndc_stats_file_size - on: bind_rndc.stats_size - units: megabytes - every: 60 - calc: $stats_size - warn: $this > 512 - crit: $this > 1024 - info: Bind stats file is very large! Consider to create logrotate conf file for it! - to: sysadmin diff --git a/conf.d/health.d/btrfs.conf b/conf.d/health.d/btrfs.conf deleted file mode 100644 index b27aa544f..000000000 --- a/conf.d/health.d/btrfs.conf +++ /dev/null @@ -1,57 +0,0 @@ - -template: btrfs_allocated - on: btrfs.disk - os: * - hosts: * -families: * - calc: 100 - ($unallocated * 100 / ($unallocated + $data_used + $data_free + $meta_used + $meta_free + $sys_used + $sys_free)) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (90) : (95)) - crit: $this > (($status == $CRITICAL) ? (95) : (98)) - delay: up 1m down 15m multiplier 1.5 max 1h - info: the percentage of allocated BTRFS physical disk space - to: sysadmin - -template: btrfs_data - on: btrfs.data - os: * - hosts: * -families: * - calc: $used * 100 / ($used + $free) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (90) : (95)) && $btrfs_allocated > 98 - crit: $this > (($status == $CRITICAL) ? (95) : (98)) && $btrfs_allocated > 98 - delay: up 1m down 15m multiplier 1.5 max 1h - info: the percentage of used BTRFS data space - to: sysadmin - -template: btrfs_metadata - on: btrfs.metadata - os: * - hosts: * -families: * - calc: ($used + $reserved) * 100 / ($used + $free + $reserved) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (90) : (95)) && $btrfs_allocated > 98 - crit: $this > (($status == $CRITICAL) ? (95) : (98)) && $btrfs_allocated > 98 - delay: up 1m down 15m multiplier 1.5 max 1h - info: the percentage of used BTRFS metadata space - to: sysadmin - -template: btrfs_system - on: btrfs.system - os: * - hosts: * -families: * - calc: $used * 100 / ($used + $free) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (90) : (95)) && $btrfs_allocated > 98 - crit: $this > (($status == $CRITICAL) ? (95) : (98)) && $btrfs_allocated > 98 - delay: up 1m down 15m multiplier 1.5 max 1h - info: the percentage of used BTRFS system space - to: sysadmin - diff --git a/conf.d/health.d/ceph.conf b/conf.d/health.d/ceph.conf deleted file mode 100644 index de16f7b6f..000000000 --- a/conf.d/health.d/ceph.conf +++ /dev/null @@ -1,13 +0,0 @@ -# low ceph disk available - -template: cluster_space_usage - on: ceph.general_usage - calc: $avail * 100 / ($avail + $used) - units: % - every: 10s - warn: $this < 10 - crit: $this < 1 - delay: down 5m multiplier 1.2 max 1h - info: ceph disk usage is almost full - to: sysadmin - diff --git a/conf.d/health.d/couchdb.conf b/conf.d/health.d/couchdb.conf deleted file mode 100644 index 4a2895280..000000000 --- a/conf.d/health.d/couchdb.conf +++ /dev/null @@ -1,13 +0,0 @@ - -# make sure couchdb is running - -template: couchdb_last_collected_secs - on: couchdb.request_methods - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba diff --git a/conf.d/health.d/cpu.conf b/conf.d/health.d/cpu.conf deleted file mode 100644 index fa8189856..000000000 --- a/conf.d/health.d/cpu.conf +++ /dev/null @@ -1,55 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - -template: 10min_cpu_usage - on: system.cpu - os: linux - hosts: * - lookup: average -10m unaligned of user,system,softirq,irq,guest - units: % - every: 1m - warn: $this > (($status >= $WARNING) ? (75) : (85)) - crit: $this > (($status == $CRITICAL) ? (85) : (95)) - delay: down 15m multiplier 1.5 max 1h - info: average cpu utilization for the last 10 minutes (excluding iowait, nice and steal) - to: sysadmin - -template: 10min_cpu_iowait - on: system.cpu - os: linux - hosts: * - lookup: average -10m unaligned of iowait - units: % - every: 1m - warn: $this > (($status >= $WARNING) ? (20) : (40)) - crit: $this > (($status == $CRITICAL) ? (40) : (50)) - delay: down 15m multiplier 1.5 max 1h - info: average CPU wait I/O for the last 10 minutes - to: sysadmin - -template: 20min_steal_cpu - on: system.cpu - os: linux - hosts: * - lookup: average -20m unaligned of steal - units: % - every: 5m - warn: $this > (($status >= $WARNING) ? (5) : (10)) - crit: $this > (($status == $CRITICAL) ? (20) : (30)) - delay: down 1h multiplier 1.5 max 2h - info: average CPU steal time for the last 20 minutes - to: sysadmin - -## FreeBSD -template: 10min_cpu_usage - on: system.cpu - os: freebsd - hosts: * - lookup: average -10m unaligned of user,system,interrupt - units: % - every: 1m - warn: $this > (($status >= $WARNING) ? (75) : (85)) - crit: $this > (($status == $CRITICAL) ? (85) : (95)) - delay: down 15m multiplier 1.5 max 1h - info: average cpu utilization for the last 10 minutes (excluding nice) - to: sysadmin diff --git a/conf.d/health.d/disks.conf b/conf.d/health.d/disks.conf deleted file mode 100644 index 26f85848a..000000000 --- a/conf.d/health.d/disks.conf +++ /dev/null @@ -1,167 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - - -# ----------------------------------------------------------------------------- -# low disk space - -# checking the latest collected values -# raise an alarm if the disk is low on -# available disk space - -template: disk_space_usage - on: disk.space - os: linux freebsd - hosts: * -families: * - calc: $used * 100 / ($avail + $used) - units: % - every: 1m - warn: $this > (($status >= $WARNING ) ? (80) : (90)) - crit: $this > (($status == $CRITICAL) ? (90) : (98)) - delay: up 1m down 15m multiplier 1.5 max 1h - info: current disk space usage - to: sysadmin - -template: disk_inode_usage - on: disk.inodes - os: linux freebsd - hosts: * -families: * - calc: $used * 100 / ($avail + $used) - units: % - every: 1m - warn: $this > (($status >= $WARNING) ? (80) : (90)) - crit: $this > (($status == $CRITICAL) ? (90) : (98)) - delay: up 1m down 15m multiplier 1.5 max 1h - info: current disk inode usage - to: sysadmin - - -# ----------------------------------------------------------------------------- -# disk fill rate - -# calculate the rate the disk fills -# use as base, the available space change -# during the last hour - -# this is just a calculation - it has no alarm -# we will use it in the next template to find -# the hours remaining - -template: disk_fill_rate - on: disk.space - os: linux freebsd - hosts: * -families: * - lookup: min -10m at -50m unaligned of avail - calc: ($this - $avail) / (($now - $after) / 3600) - every: 1m - units: GB/hour - info: average rate the disk fills up (positive), or frees up (negative) space, for the last hour - - -# calculate the hours remaining -# if the disk continues to fill -# in this rate - -template: out_of_disk_space_time - on: disk.space - os: linux freebsd - hosts: * -families: * - calc: ($disk_fill_rate > 0) ? ($avail / $disk_fill_rate) : (inf) - units: hours - every: 10s - warn: $this > 0 and $this < (($status >= $WARNING) ? (48) : (8)) - crit: $this > 0 and $this < (($status == $CRITICAL) ? (24) : (2)) - delay: down 15m multiplier 1.2 max 1h - info: estimated time the disk will run out of space, if the system continues to add data with the rate of the last hour - to: sysadmin - - -# ----------------------------------------------------------------------------- -# disk inode fill rate - -# calculate the rate the disk inodes are allocated -# use as base, the available inodes change -# during the last hour - -# this is just a calculation - it has no alarm -# we will use it in the next template to find -# the hours remaining - -template: disk_inode_rate - on: disk.inodes - os: linux freebsd - hosts: * -families: * - lookup: min -10m at -50m unaligned of avail - calc: ($this - $avail) / (($now - $after) / 3600) - every: 1m - units: inodes/hour - info: average rate at which disk inodes are allocated (positive), or freed (negative), for the last hour - -# calculate the hours remaining -# if the disk inodes are allocated -# in this rate - -template: out_of_disk_inodes_time - on: disk.inodes - os: linux freebsd - hosts: * -families: * - calc: ($disk_inode_rate > 0) ? ($avail / $disk_inode_rate) : (inf) - units: hours - every: 10s - warn: $this > 0 and $this < (($status >= $WARNING) ? (48) : (8)) - crit: $this > 0 and $this < (($status == $CRITICAL) ? (24) : (2)) - delay: down 15m multiplier 1.2 max 1h - info: estimated time the disk will run out of inodes, if the system continues to allocate inodes with the rate of the last hour - to: sysadmin - - -# ----------------------------------------------------------------------------- -# disk congestion - -# raise an alarm if the disk is congested -# by calculating the average disk utilization -# for the last 10 minutes - -template: 10min_disk_utilization - on: disk.util - os: linux freebsd - hosts: * -families: * - lookup: average -10m unaligned - units: % - every: 1m - green: 90 - red: 98 - warn: $this > $green * (($status >= $WARNING) ? (0.7) : (1)) - crit: $this > $red * (($status == $CRITICAL) ? (0.7) : (1)) - delay: down 15m multiplier 1.2 max 1h - info: the percentage of time the disk was busy, during the last 10 minutes - to: sysadmin - - -# raise an alarm if the disk backlog -# is above 1000ms (1s) per second -# for 10 minutes -# (i.e. the disk cannot catch up) - -template: 10min_disk_backlog - on: disk.backlog - os: linux - hosts: * -families: * - lookup: average -10m unaligned - units: ms - every: 1m - green: 2000 - red: 5000 - warn: $this > $green * (($status >= $WARNING) ? (0.7) : (1)) - crit: $this > $red * (($status == $CRITICAL) ? (0.7) : (1)) - delay: down 15m multiplier 1.2 max 1h - info: average of the kernel estimated disk backlog, for the last 10 minutes - to: sysadmin diff --git a/conf.d/health.d/elasticsearch.conf b/conf.d/health.d/elasticsearch.conf deleted file mode 100644 index dffd40965..000000000 --- a/conf.d/health.d/elasticsearch.conf +++ /dev/null @@ -1,9 +0,0 @@ - alarm: elasticsearch_last_collected - on: elasticsearch_local.cluster_health_status - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - info: number of seconds since the last successful data collection - to: sysadmin diff --git a/conf.d/health.d/entropy.conf b/conf.d/health.d/entropy.conf deleted file mode 100644 index 66d44ec13..000000000 --- a/conf.d/health.d/entropy.conf +++ /dev/null @@ -1,16 +0,0 @@ - -# check if entropy is too low -# the alarm is checked every 1 minute -# and examines the last hour of data - - alarm: lowest_entropy - on: system.entropy - os: linux - hosts: * - lookup: min -10m unaligned - units: entries - every: 5m - warn: $this < (($status >= $WARNING) ? (200) : (100)) - delay: down 1h multiplier 1.5 max 2h - info: minimum entries in the random numbers pool in the last 10 minutes - to: silent diff --git a/conf.d/health.d/fping.conf b/conf.d/health.d/fping.conf deleted file mode 100644 index 43658fef6..000000000 --- a/conf.d/health.d/fping.conf +++ /dev/null @@ -1,53 +0,0 @@ - -template: fping_last_collected_secs -families: * - on: fping.latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - -template: host_reachable -families: * - on: fping.latency - calc: $average != nan - units: up/down - every: 10s - crit: $this == 0 - info: states if the remote host is reachable - delay: down 30m multiplier 1.5 max 2h - to: sysadmin - -template: host_latency -families: * - on: fping.latency - lookup: average -10s unaligned of average - units: ms - every: 10s - green: 500 - red: 1000 - warn: $this > $green OR $max > $red - crit: $this > $red - info: average round trip delay during the last 10 seconds - delay: down 30m multiplier 1.5 max 2h - to: sysadmin - -template: packet_loss -families: * - on: fping.quality - lookup: average -10m unaligned of returned - calc: 100 - $this - green: 1 - red: 10 - units: % - every: 10s - warn: $this > $green - crit: $this > $red - info: packet loss percentage - delay: down 30m multiplier 1.5 max 2h - to: sysadmin - diff --git a/conf.d/health.d/fronius.conf b/conf.d/health.d/fronius.conf deleted file mode 100644 index cdf6c8fcb..000000000 --- a/conf.d/health.d/fronius.conf +++ /dev/null @@ -1,11 +0,0 @@ -template: fronius_last_collected_secs -families: * - on: fronius.power - calc: $now - $last_collected_t - every: 10s - units: seconds ago - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sitemgr diff --git a/conf.d/health.d/haproxy.conf b/conf.d/health.d/haproxy.conf deleted file mode 100644 index e49c70d48..000000000 --- a/conf.d/health.d/haproxy.conf +++ /dev/null @@ -1,27 +0,0 @@ -template: haproxy_backend_server_status - on: haproxy_hs.down - units: failed servers - every: 10s - lookup: average -10s - crit: $this > 0 - info: number of failed haproxy backend servers - to: sysadmin - -template: haproxy_backend_status - on: haproxy_hb.down - units: failed backend - every: 10s - lookup: average -10s - crit: $this > 0 - info: number of failed haproxy backends - to: sysadmin - -template: haproxy_last_collected - on: haproxy_hb.down - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - info: number of seconds since the last successful data collection - to: sysadmin diff --git a/conf.d/health.d/httpcheck.conf b/conf.d/health.d/httpcheck.conf deleted file mode 100644 index 0ddf35eab..000000000 --- a/conf.d/health.d/httpcheck.conf +++ /dev/null @@ -1,99 +0,0 @@ -template: httpcheck_last_collected_secs -families: * - on: httpcheck.status - calc: $now - $last_collected_t - every: 10s - units: seconds ago - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - -# This is a fast-reacting no-notification alarm ideal for custom dashboards or badges -template: web_service_up -families: * - on: httpcheck.status - lookup: average -1m unaligned percentage of success - calc: ($this < 75) ? (0) : ($this) - every: 5s - units: up/down - info: at least 75% verified responses during last 60 seconds, ideal for badges - to: silent - -template: web_service_bad_content -families: * - on: httpcheck.status - lookup: average -5m unaligned percentage of bad_content - every: 10s - units: % - warn: $this >= 10 AND $this < 40 - crit: $this >= 40 - delay: down 5m multiplier 1.5 max 1h - info: average of unexpected http response content during the last 5 minutes - options: no-clear-notification - to: webmaster - -template: web_service_bad_status -families: * - on: httpcheck.status - lookup: average -5m unaligned percentage of bad_status - every: 10s - units: % - warn: $this >= 10 AND $this < 40 - crit: $this >= 40 - delay: down 5m multiplier 1.5 max 1h - info: average of unexpected http status during the last 5 minutes - options: no-clear-notification - to: webmaster - -template: web_service_timeouts -families: * - on: httpcheck.status - lookup: average -5m unaligned percentage of timeout - every: 10s - units: % - info: average of timeouts during the last 5 minutes - -template: no_web_service_connections -families: * - on: httpcheck.status - lookup: average -5m unaligned percentage of no_connection - every: 10s - units: % - info: average of failed requests during the last 5 minutes - -# combined timeout & no connection alarm -template: web_service_unreachable -families: * - on: httpcheck.status - calc: ($no_web_service_connections >= $web_service_timeouts) ? ($no_web_service_connections) : ($web_service_timeouts) - units: % - every: 10s - warn: ($no_web_service_connections >= 10 OR $web_service_timeouts >= 10) AND ($no_web_service_connections < 40 OR $web_service_timeouts < 40) - crit: $no_web_service_connections >= 40 OR $web_service_timeouts >= 40 - delay: down 5m multiplier 1.5 max 1h - info: average of failed requests either due to timeouts or no connection during the last 5 minutes - options: no-clear-notification - to: webmaster - -template: 1h_web_service_response_time -families: * - on: httpcheck.responsetime - lookup: average -1h unaligned of time - every: 30s - units: ms - info: average response time over the last hour - -template: web_service_slow -families: * - on: httpcheck.responsetime - lookup: average -3m unaligned of time - units: ms - every: 10s - warn: ($this > ($1h_web_service_response_time * 2) ) - crit: ($this > ($1h_web_service_response_time * 3) ) - info: average response time over the last 3 minutes, compared to the average over the last hour - delay: down 5m multiplier 1.5 max 1h - options: no-clear-notification - to: webmaster diff --git a/conf.d/health.d/ipc.conf b/conf.d/health.d/ipc.conf deleted file mode 100644 index 03cf264d8..000000000 --- a/conf.d/health.d/ipc.conf +++ /dev/null @@ -1,28 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - - alarm: semaphores_used - on: system.ipc_semaphores - os: linux - hosts: * - calc: $semaphores * 100 / $ipc.semaphores.max - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (70) : (80)) - crit: $this > (($status == $CRITICAL) ? (70) : (90)) - delay: down 5m multiplier 1.5 max 1h - info: the percentage of IPC semaphores used - to: sysadmin - - alarm: semaphore_arrays_used - on: system.ipc_semaphore_arrays - os: linux - hosts: * - calc: $arrays * 100 / $ipc.semaphores.arrays.max - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (70) : (80)) - crit: $this > (($status == $CRITICAL) ? (70) : (90)) - delay: down 5m multiplier 1.5 max 1h - info: the percentage of IPC semaphore arrays used - to: sysadmin diff --git a/conf.d/health.d/ipfs.conf b/conf.d/health.d/ipfs.conf deleted file mode 100644 index 3f77572d6..000000000 --- a/conf.d/health.d/ipfs.conf +++ /dev/null @@ -1,11 +0,0 @@ - -template: ipfs_datastore_usage - on: ipfs.repo_size - calc: $size * 100 / $avail - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (80) : (90)) - crit: $this > (($status == $CRITICAL) ? (90) : (98)) - delay: down 15m multiplier 1.5 max 1h - info: ipfs Datastore close to running out of space - to: sysadmin diff --git a/conf.d/health.d/ipmi.conf b/conf.d/health.d/ipmi.conf deleted file mode 100644 index c25581964..000000000 --- a/conf.d/health.d/ipmi.conf +++ /dev/null @@ -1,20 +0,0 @@ - alarm: ipmi_sensors_states - on: ipmi.sensors_states - calc: $warning + $critical - units: sensors - every: 10s - warn: $this > 0 - crit: $critical > 0 - delay: up 5m down 15m multiplier 1.5 max 1h - info: the number IPMI sensors in non-nominal state - to: sysadmin - - alarm: ipmi_events - on: ipmi.events - calc: $events - units: events - every: 10s - warn: $this > 0 - delay: up 5m down 15m multiplier 1.5 max 1h - info: the number of events in the IPMI System Event Log (SEL) - to: sysadmin diff --git a/conf.d/health.d/isc_dhcpd.conf b/conf.d/health.d/isc_dhcpd.conf deleted file mode 100644 index 8054656ff..000000000 --- a/conf.d/health.d/isc_dhcpd.conf +++ /dev/null @@ -1,10 +0,0 @@ - template: isc_dhcpd_leases_size - on: isc_dhcpd.leases_total - units: KB - every: 60 - calc: $leases_size - warn: $this > 3072 - crit: $this > 6144 - delay: up 2m down 5m - info: dhcpd.leases file too big! Module can slow down your server. - to: sysadmin diff --git a/conf.d/health.d/lighttpd.conf b/conf.d/health.d/lighttpd.conf deleted file mode 100644 index 915907a4a..000000000 --- a/conf.d/health.d/lighttpd.conf +++ /dev/null @@ -1,14 +0,0 @@ - -# make sure lighttpd is running - -template: lighttpd_last_collected_secs - on: lighttpd.requests - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/conf.d/health.d/mdstat.conf b/conf.d/health.d/mdstat.conf deleted file mode 100644 index c9e7d20db..000000000 --- a/conf.d/health.d/mdstat.conf +++ /dev/null @@ -1,18 +0,0 @@ -template: mdstat_disks - on: md.disks - units: failed devices - every: 10s - calc: $total - $inuse - crit: $this > 0 - info: Array is degraded! - to: sysadmin - -template: mdstat_last_collected - on: md.disks - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - info: number of seconds since the last successful data collection - to: sysadmin diff --git a/conf.d/health.d/memcached.conf b/conf.d/health.d/memcached.conf deleted file mode 100644 index d248ef57a..000000000 --- a/conf.d/health.d/memcached.conf +++ /dev/null @@ -1,52 +0,0 @@ - -# make sure memcached is running - -template: memcached_last_collected_secs - on: memcached.cache - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba - - -# detect if memcached cache is full - -template: memcached_cache_memory_usage - on: memcached.cache - calc: $used * 100 / ($used + $available) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (70) : (80)) - crit: $this > (($status == $CRITICAL) ? (80) : (90)) - delay: up 0 down 15m multiplier 1.5 max 1h - info: current cache memory usage - to: dba - - -# find the rate memcached cache is filling - -template: cache_fill_rate - on: memcached.cache - lookup: min -10m at -50m unaligned of available - calc: ($this - $available) / (($now - $after) / 3600) - units: KB/hour - every: 1m - info: average rate the cache fills up (positive), or frees up (negative) space, for the last hour - - -# find the hours remaining until memcached cache is full - -template: out_of_cache_space_time - on: memcached.cache - calc: ($cache_fill_rate > 0) ? ($available / $cache_fill_rate) : (inf) - units: hours - every: 10s - warn: $this > 0 and $this < (($status >= $WARNING) ? (48) : (8)) - crit: $this > 0 and $this < (($status == $CRITICAL) ? (24) : (2)) - delay: down 15m multiplier 1.5 max 1h - info: estimated time the cache will run out of space, if the system continues to add data with the rate of the last hour - to: dba diff --git a/conf.d/health.d/memory.conf b/conf.d/health.d/memory.conf deleted file mode 100644 index 4a0e6e522..000000000 --- a/conf.d/health.d/memory.conf +++ /dev/null @@ -1,38 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - - alarm: 1hour_ecc_memory_correctable - on: mem.ecc_ce - os: linux - hosts: * - lookup: sum -10m unaligned - units: errors - every: 1m - warn: $this > 0 - delay: down 1h multiplier 1.5 max 1h - info: number of ECC correctable errors during the last hour - to: sysadmin - - alarm: 1hour_ecc_memory_uncorrectable - on: mem.ecc_ue - os: linux - hosts: * - lookup: sum -10m unaligned - units: errors - every: 1m - crit: $this > 0 - delay: down 1h multiplier 1.5 max 1h - info: number of ECC uncorrectable errors during the last hour - to: sysadmin - - alarm: 1hour_memory_hw_corrupted - on: mem.hwcorrupt - os: linux - hosts: * - calc: $HardwareCorrupted - units: MB - every: 10s - warn: $this > 0 - delay: down 1h multiplier 1.5 max 1h - info: amount of memory corrupted due to a hardware failure - to: sysadmin diff --git a/conf.d/health.d/mongodb.conf b/conf.d/health.d/mongodb.conf deleted file mode 100644 index a80cb3112..000000000 --- a/conf.d/health.d/mongodb.conf +++ /dev/null @@ -1,13 +0,0 @@ - -# make sure mongodb is running - -template: mongodb_last_collected_secs - on: mongodb.read_operations - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba diff --git a/conf.d/health.d/mysql.conf b/conf.d/health.d/mysql.conf deleted file mode 100644 index 1eeb993f0..000000000 --- a/conf.d/health.d/mysql.conf +++ /dev/null @@ -1,85 +0,0 @@ - -# make sure mysql is running - -template: mysql_last_collected_secs - on: mysql.queries - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba - - -# ----------------------------------------------------------------------------- -# slow queries - -template: mysql_10s_slow_queries - on: mysql.queries - lookup: sum -10s of slow_queries - units: slow queries - every: 10s - warn: $this > (($status >= $WARNING) ? (5) : (10)) - crit: $this > (($status == $CRITICAL) ? (10) : (20)) - delay: down 5m multiplier 1.5 max 1h - info: number of mysql slow queries over the last 10 seconds - to: dba - - -# ----------------------------------------------------------------------------- -# lock waits - -template: mysql_10s_table_locks_immediate - on: mysql.table_locks - lookup: sum -10s absolute of immediate - units: immediate locks - every: 10s - info: number of table immediate locks over the last 10 seconds - to: dba - -template: mysql_10s_table_locks_waited - on: mysql.table_locks - lookup: sum -10s absolute of waited - units: waited locks - every: 10s - info: number of table waited locks over the last 10 seconds - to: dba - -template: mysql_10s_waited_locks_ratio - on: mysql.table_locks - calc: ( ($mysql_10s_table_locks_waited + $mysql_10s_table_locks_immediate) > 0 ) ? (($mysql_10s_table_locks_waited * 100) / ($mysql_10s_table_locks_waited + $mysql_10s_table_locks_immediate)) : 0 - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (10) : (25)) - crit: $this > (($status == $CRITICAL) ? (25) : (50)) - delay: down 30m multiplier 1.5 max 1h - info: the ratio of mysql waited table locks, for the last 10 seconds - to: dba - - -# ----------------------------------------------------------------------------- -# replication - -template: mysql_replication - on: mysql.slave_status - calc: ($sql_running == -1 OR $io_running == -1)?0:1 - units: ok/failed - every: 10s - crit: $this == 0 - delay: down 5m multiplier 1.5 max 1h - info: checks if mysql replication has stopped - to: dba - -template: mysql_replication_lag - on: mysql.slave_behind - calc: $seconds - units: seconds - every: 10s - warn: $this > (($status >= $WARNING) ? (5) : (10)) - crit: $this > (($status == $CRITICAL) ? (10) : (30)) - delay: down 15m multiplier 1.5 max 1h - info: the number of seconds mysql replication is behind this master - to: dba - diff --git a/conf.d/health.d/named.conf b/conf.d/health.d/named.conf deleted file mode 100644 index 4fc65c8ee..000000000 --- a/conf.d/health.d/named.conf +++ /dev/null @@ -1,14 +0,0 @@ - -# make sure named is running - -template: named_last_collected_secs - on: named.global_queries - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: domainadmin - diff --git a/conf.d/health.d/net.conf b/conf.d/health.d/net.conf deleted file mode 100644 index 22a88927d..000000000 --- a/conf.d/health.d/net.conf +++ /dev/null @@ -1,122 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - -# ----------------------------------------------------------------------------- -# dropped packets - -# check if an interface is dropping packets -# the alarm is checked every 1 minute -# and examines the last 10 minutes of data - -template: inbound_packets_dropped - on: net.drops - os: linux - hosts: * -families: * - lookup: sum -10m unaligned absolute of inbound - units: packets - every: 1m - warn: $this >= 5 - delay: down 1h multiplier 1.5 max 2h - info: interface inbound dropped packets in the last 10 minutes - to: sysadmin - -template: outbound_packets_dropped - on: net.drops - os: linux - hosts: * -families: * - lookup: sum -10m unaligned absolute of outbound - units: packets - every: 1m - warn: $this >= 5 - delay: down 1h multiplier 1.5 max 2h - info: interface outbound dropped packets in the last 10 minutes - to: sysadmin - -template: inbound_packets_dropped_ratio - on: net.packets - os: linux - hosts: * -families: * - lookup: sum -10m unaligned absolute of received - calc: (($inbound_packets_dropped != nan AND $this > 0) ? ($inbound_packets_dropped * 100 / $this) : (0)) - units: % - every: 1m - warn: $this >= 0.1 - crit: $this >= 2 - delay: down 1h multiplier 1.5 max 2h - info: the ratio of inbound dropped packets vs the total number of received packets of the network interface, during the last 10 minutes - to: sysadmin - -template: outbound_packets_dropped_ratio - on: net.packets - os: linux - hosts: * -families: * - lookup: sum -10m unaligned absolute of sent - calc: (($outbound_packets_dropped != nan AND $this > 0) ? ($outbound_packets_dropped * 100 / $this) : (0)) - units: % - every: 1m - warn: $this >= 0.1 - crit: $this >= 2 - delay: down 1h multiplier 1.5 max 2h - info: the ratio of outbound dropped packets vs the total number of sent packets of the network interface, during the last 10 minutes - to: sysadmin - - -# ----------------------------------------------------------------------------- -# FIFO errors - -# check if an interface is having FIFO -# buffer errors -# the alarm is checked every 1 minute -# and examines the last 10 minutes of data - -template: 10min_fifo_errors - on: net.fifo - os: linux - hosts: * -families: * - lookup: sum -10m unaligned absolute - units: errors - every: 1m - warn: $this > 0 - delay: down 1h multiplier 1.5 max 2h - info: interface fifo errors in the last 10 minutes - to: sysadmin - - -# ----------------------------------------------------------------------------- -# check for packet storms - -# 1. calculate the rate packets are received in 1m: 1m_received_packets_rate -# 2. do the same for the last 10s -# 3. raise an alarm if the later is 10x or 20x the first -# we assume the minimum packet storm should at least have -# 10000 packets/s, average of the last 10 seconds - -template: 1m_received_packets_rate - on: net.packets - os: linux freebsd - hosts: * -families: * - lookup: average -1m of received - units: packets - every: 10s - info: the average number of packets received during the last minute - -template: 10s_received_packets_storm - on: net.packets - os: linux freebsd - hosts: * -families: * - lookup: average -10s of received - calc: $this * 100 / (($1m_received_packets_rate < 1000)?(1000):($1m_received_packets_rate)) - every: 10s - units: % - warn: $this > (($status >= $WARNING)?(200):(5000)) - crit: $this > (($status >= $WARNING)?(5000):(6000)) -options: no-clear-notification - info: the % of the rate of received packets in the last 10 seconds, compared to the rate of the last minute (clear notification for this alarm will not be sent) - to: sysadmin diff --git a/conf.d/health.d/netfilter.conf b/conf.d/health.d/netfilter.conf deleted file mode 100644 index fa1732b33..000000000 --- a/conf.d/health.d/netfilter.conf +++ /dev/null @@ -1,29 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - - alarm: netfilter_last_collected_secs - on: netfilter.conntrack_sockets - os: linux - hosts: * - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - - alarm: netfilter_conntrack_full - on: netfilter.conntrack_sockets - os: linux - hosts: * - lookup: max -10s unaligned of connections - calc: $this * 100 / $netfilter.conntrack.max - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (70) : (80)) - crit: $this > (($status == $CRITICAL) ? (80) : (90)) - delay: down 5m multiplier 1.5 max 1h - info: the number of connections tracked by the netfilter connection tracker, as a percentage of the connection tracker table size - to: sysadmin diff --git a/conf.d/health.d/nginx.conf b/conf.d/health.d/nginx.conf deleted file mode 100644 index a686c3d99..000000000 --- a/conf.d/health.d/nginx.conf +++ /dev/null @@ -1,14 +0,0 @@ - -# make sure nginx is running - -template: nginx_last_collected_secs - on: nginx.requests - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/conf.d/health.d/nginx_plus.conf b/conf.d/health.d/nginx_plus.conf deleted file mode 100644 index 5a171a76d..000000000 --- a/conf.d/health.d/nginx_plus.conf +++ /dev/null @@ -1,14 +0,0 @@ - -# make sure nginx_plus is running - -template: nginx_plus_last_collected_secs - on: nginx_plus.requests_total - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/conf.d/health.d/portcheck.conf b/conf.d/health.d/portcheck.conf deleted file mode 100644 index f42b63d30..000000000 --- a/conf.d/health.d/portcheck.conf +++ /dev/null @@ -1,48 +0,0 @@ -template: portcheck_last_collected_secs -families: * - on: portcheck.status - calc: $now - $last_collected_t - every: 10s - units: seconds ago - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - -# This is a fast-reacting no-notification alarm ideal for custom dashboards or badges -template: service_reachable -families: * - on: portcheck.status - lookup: average -1m unaligned percentage of success - calc: ($this < 75) ? (0) : ($this) - every: 5s - units: up/down - info: at least 75% successful connections during last 60 seconds, ideal for badges - to: silent - -template: connection_timeouts -families: * - on: portcheck.status - lookup: average -5m unaligned percentage of timeout - every: 10s - units: % - warn: $this >= 10 AND $this < 40 - crit: $this >= 40 - delay: down 5m multiplier 1.5 max 1h - info: average of timeouts during the last 5 minutes - options: no-clear-notification - to: sysadmin - -template: connection_fails -families: * - on: portcheck.status - lookup: average -5m unaligned percentage of no_connection - every: 10s - units: % - warn: $this >= 10 AND $this < 40 - crit: $this >= 40 - delay: down 5m multiplier 1.5 max 1h - info: average of failed connections during the last 5 minutes - options: no-clear-notification - to: sysadmin diff --git a/conf.d/health.d/postgres.conf b/conf.d/health.d/postgres.conf deleted file mode 100644 index 4e0583b85..000000000 --- a/conf.d/health.d/postgres.conf +++ /dev/null @@ -1,13 +0,0 @@ - -# make sure postgres is running - -template: postgres_last_collected_secs - on: postgres.db_stat_transactions - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba diff --git a/conf.d/health.d/qos.conf b/conf.d/health.d/qos.conf deleted file mode 100644 index 7290d15ff..000000000 --- a/conf.d/health.d/qos.conf +++ /dev/null @@ -1,18 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - -# check if a QoS class is dropping packets -# the alarm is checked every 10 seconds -# and examines the last minute of data - -#template: 10min_qos_packet_drops -# on: tc.qos_dropped -# os: linux -# hosts: * -# lookup: sum -10m unaligned absolute -# every: 30s -# warn: $this > 0 -# delay: up 0 down 30m multiplier 1.5 max 1h -# units: packets -# info: dropped packets in the last 30 minutes -# to: sysadmin diff --git a/conf.d/health.d/ram.conf b/conf.d/health.d/ram.conf deleted file mode 100644 index b6dc5f945..000000000 --- a/conf.d/health.d/ram.conf +++ /dev/null @@ -1,64 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - - alarm: used_ram_to_ignore - on: system.ram - os: linux - hosts: * - calc: ($zfs.arc_size.arcsz = nan)?(0):($zfs.arc_size.arcsz) - every: 10s - info: the amount of memory that is reported as used, but it is actually capable for resizing itself based on the system needs (eg. ZFS ARC) - - alarm: ram_in_use - on: system.ram - os: linux - hosts: * -# calc: $used * 100 / ($used + $cached + $free) - calc: ($used - $used_ram_to_ignore) * 100 / ($used - $used_ram_to_ignore + $cached + $free) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (80) : (90)) - crit: $this > (($status == $CRITICAL) ? (90) : (98)) - delay: down 15m multiplier 1.5 max 1h - info: system RAM used - to: sysadmin - - alarm: ram_available - on: mem.available - os: linux - hosts: * - calc: ($avail + $used_ram_to_ignore) * 100 / ($system.ram.used + $system.ram.cached + $system.ram.free + $system.ram.buffers) - units: % - every: 10s - warn: $this < (($status >= $WARNING) ? ( 5) : (10)) - crit: $this < (($status == $CRITICAL) ? (10) : ( 5)) - delay: down 15m multiplier 1.5 max 1h - info: estimated amount of RAM available for userspace processes, without causing swapping - to: sysadmin - -## FreeBSD -alarm: ram_in_use - on: system.ram - os: freebsd -hosts: * - calc: (($active + $wired) - $used_ram_to_ignore) * 100 / (($active + $wired) - $used_ram_to_ignore + $cached + $free) -units: % -every: 10s - warn: $this > (($status >= $WARNING) ? (80) : (90)) - crit: $this > (($status == $CRITICAL) ? (90) : (98)) -delay: down 15m multiplier 1.5 max 1h - info: system RAM usage - to: sysadmin - - alarm: ram_available - on: system.ram - os: freebsd - hosts: * - calc: ($free + $inactive + $used_ram_to_ignore) * 100 / ($free + $active + $inactive + $wired + $cache + $buffers) - units: % - every: 10s - warn: $this < (($status >= $WARNING) ? ( 5) : (10)) - crit: $this < (($status == $CRITICAL) ? (10) : ( 5)) - delay: down 15m multiplier 1.5 max 1h - info: estimated amount of RAM available for userspace processes, without causing swapping - to: sysadmin diff --git a/conf.d/health.d/redis.conf b/conf.d/health.d/redis.conf deleted file mode 100644 index c08a884a6..000000000 --- a/conf.d/health.d/redis.conf +++ /dev/null @@ -1,34 +0,0 @@ - -# make sure redis is running - -template: redis_last_collected_secs - on: redis.operations - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba - -template: redis_bgsave_broken -families: * - on: redis.bgsave_health - every: 10s - crit: $rdb_last_bgsave_status != 0 - units: ok/failed - info: states if redis bgsave is working - delay: down 5m multiplier 1.5 max 1h - to: dba - -template: redis_bgsave_slow -families: * - on: redis.bgsave_now - every: 10s - warn: $rdb_bgsave_in_progress > 600 - crit: $rdb_bgsave_in_progress > 1200 - units: seconds - info: the time redis needs to save its database - delay: down 5m multiplier 1.5 max 1h - to: dba diff --git a/conf.d/health.d/retroshare.conf b/conf.d/health.d/retroshare.conf deleted file mode 100644 index 2344b60ec..000000000 --- a/conf.d/health.d/retroshare.conf +++ /dev/null @@ -1,25 +0,0 @@ -# make sure RetroShare is running - -template: retroshare_last_collected_secs - on: retroshare.peers - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - -# make sure the DHT is fine when active - -template: retroshare_dht_working - on: retroshare.dht - calc: $dht_size_all - units: peers - every: 1m - warn: $this < (($status >= $WARNING) ? (120) : (100)) - crit: $this < (($status == $CRITICAL) ? (10) : (1)) - delay: up 0 down 15m multiplier 1.5 max 1h - info: Checks if the DHT has enough peers to operate - to: sysadmin diff --git a/conf.d/health.d/softnet.conf b/conf.d/health.d/softnet.conf deleted file mode 100644 index 77c804bfd..000000000 --- a/conf.d/health.d/softnet.conf +++ /dev/null @@ -1,40 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - -# check for common /proc/net/softnet_stat errors - - alarm: 10min_netdev_backlog_exceeded - on: system.softnet_stat - os: linux - hosts: * - lookup: sum -10m unaligned absolute of dropped - units: packets - every: 1m - warn: $this > 0 - delay: down 1h multiplier 1.5 max 2h - info: number of packets dropped in the last 10min, because sysctl net.core.netdev_max_backlog was exceeded (this can be a cause for dropped packets) - to: sysadmin - - alarm: 10min_netdev_budget_ran_outs - on: system.softnet_stat - os: linux - hosts: * - lookup: sum -10m unaligned absolute of squeezed - units: events - every: 1m - warn: $this > (($status >= $WARNING) ? (0) : (10)) - delay: down 1h multiplier 1.5 max 2h - info: number of times, during the last 10min, ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs, with work remaining (this can be a cause for dropped packets) - to: silent - - alarm: 10min_netisr_backlog_exceeded - on: system.softnet_stat - os: freebsd - hosts: * - lookup: sum -10m unaligned absolute of qdrops - units: packets - every: 1m - warn: $this > 0 - delay: down 1h multiplier 1.5 max 2h - info: number of drops in the last 10min, because sysctl net.route.netisr_maxqlen was exceeded (this can be a cause for dropped packets) - to: sysadmin diff --git a/conf.d/health.d/squid.conf b/conf.d/health.d/squid.conf deleted file mode 100644 index 06cc9678f..000000000 --- a/conf.d/health.d/squid.conf +++ /dev/null @@ -1,14 +0,0 @@ - -# make sure squid is running - -template: squid_last_collected_secs - on: squid.clients_requests - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: proxyadmin - diff --git a/conf.d/health.d/stiebeleltron.conf b/conf.d/health.d/stiebeleltron.conf deleted file mode 100644 index e0361eb20..000000000 --- a/conf.d/health.d/stiebeleltron.conf +++ /dev/null @@ -1,11 +0,0 @@ -template: stiebeleltron_last_collected_secs -families: * - on: stiebeleltron.heating.hc1 - calc: $now - $last_collected_t - every: 10s - units: seconds ago - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sitemgr diff --git a/conf.d/health.d/swap.conf b/conf.d/health.d/swap.conf deleted file mode 100644 index f920b0807..000000000 --- a/conf.d/health.d/swap.conf +++ /dev/null @@ -1,43 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - - alarm: 30min_ram_swapped_out - on: system.swapio - os: linux freebsd - hosts: * - lookup: sum -30m unaligned absolute of out - # we have to convert KB to MB by dividing $this (i.e. the result of the lookup) with 1024 - calc: $this / 1024 * 100 / ( $system.ram.used + $system.ram.cached + $system.ram.free ) - units: % of RAM - every: 1m - warn: $this > (($status >= $WARNING) ? (10) : (20)) - crit: $this > (($status == $CRITICAL) ? (20) : (30)) - delay: up 0 down 15m multiplier 1.5 max 1h - info: the amount of memory swapped in the last 30 minutes, as a percentage of the system RAM - to: sysadmin - - alarm: ram_in_swap - on: system.swap - os: linux - hosts: * - calc: $used * 100 / ( $system.ram.used + $system.ram.cached + $system.ram.free ) - units: % of RAM - every: 10s - warn: $this > (($status >= $WARNING) ? (15) : (20)) - crit: $this > (($status == $CRITICAL) ? (40) : (50)) - delay: up 30s down 15m multiplier 1.5 max 1h - info: the swap memory used, as a percentage of the system RAM - to: sysadmin - - alarm: used_swap - on: system.swap - os: linux freebsd - hosts: * - calc: $used * 100 / ( $used + $free ) - units: % - every: 10s - warn: $this > (($status >= $WARNING) ? (80) : (90)) - crit: $this > (($status == $CRITICAL) ? (90) : (98)) - delay: up 30s down 15m multiplier 1.5 max 1h - info: the percentage of swap memory used - to: sysadmin diff --git a/conf.d/health.d/tcp_conn.conf b/conf.d/health.d/tcp_conn.conf deleted file mode 100644 index 7aa9a9800..000000000 --- a/conf.d/health.d/tcp_conn.conf +++ /dev/null @@ -1,19 +0,0 @@ - -# -# ${tcp_max_connections} may be nan or -1 if the system -# supports dynamic threshold for TCP connections. -# In this case, the alarm will always be zero. -# - - alarm: tcp_connections - on: ipv4.tcpsock - os: linux - hosts: * - calc: (${tcp_max_connections} > 0) ? ( ${connections} * 100 / ${tcp_max_connections} ) : 0 - units: % - every: 10s - warn: $this > (($status >= $WARNING ) ? ( 60 ) : ( 80 )) - crit: $this > (($status >= $CRITICAL) ? ( 80 ) : ( 90 )) - delay: up 0 down 5m multiplier 1.5 max 1h - info: the percentage of IPv4 TCP connections over the max allowed - to: sysadmin diff --git a/conf.d/health.d/tcp_listen.conf b/conf.d/health.d/tcp_listen.conf deleted file mode 100644 index 957964ae4..000000000 --- a/conf.d/health.d/tcp_listen.conf +++ /dev/null @@ -1,27 +0,0 @@ -# ----------------------------------------------------------------------------- -# tcp listen sockets issues - - alarm: 1m_ipv4_tcp_listen_overflows - on: ipv4.tcplistenissues - os: linux freebsd - hosts: * - lookup: sum -60s unaligned absolute of ListenOverflows - units: overflows - every: 10s - crit: $this > 0 - delay: up 0 down 5m multiplier 1.5 max 1h - info: the number of TCP listen socket overflows during the last minute - to: sysadmin - - alarm: 1m_ipv4_tcp_listen_drops - on: ipv4.tcplistenissues - os: linux - hosts: * - lookup: sum -60s unaligned absolute of ListenDrops - units: drops - every: 10s - crit: $this > 0 - delay: up 0 down 5m multiplier 1.5 max 1h - info: the number of TCP listen socket drops during the last minute - to: sysadmin - diff --git a/conf.d/health.d/tcp_mem.conf b/conf.d/health.d/tcp_mem.conf deleted file mode 100644 index 6927d5765..000000000 --- a/conf.d/health.d/tcp_mem.conf +++ /dev/null @@ -1,20 +0,0 @@ -# -# check -# http://blog.tsunanet.net/2011/03/out-of-socket-memory.html -# -# We give a warning when TCP is under memory pressure -# and a critical when TCP is 90% of its upper memory limit -# - - alarm: tcp_memory - on: ipv4.sockstat_tcp_mem - os: linux - hosts: * - calc: ${mem} * 100 / ${tcp_mem_high} - units: % - every: 10s - warn: ${mem} > (($status >= $WARNING ) ? ( ${tcp_mem_pressure} * 0.8 ) : ( ${tcp_mem_pressure} )) - crit: ${mem} > (($status >= $CRITICAL ) ? ( ${tcp_mem_pressure} ) : ( ${tcp_mem_high} * 0.9 )) - delay: up 0 down 5m multiplier 1.5 max 1h - info: the amount of TCP memory as a percentage of its max memory limit - to: sysadmin diff --git a/conf.d/health.d/tcp_orphans.conf b/conf.d/health.d/tcp_orphans.conf deleted file mode 100644 index 280d6590f..000000000 --- a/conf.d/health.d/tcp_orphans.conf +++ /dev/null @@ -1,21 +0,0 @@ - -# -# check -# http://blog.tsunanet.net/2011/03/out-of-socket-memory.html -# -# The kernel may penalize orphans by 2x or even 4x -# so we alarm warning at 25% and critical at 50% -# - - alarm: tcp_orphans - on: ipv4.sockstat_tcp_sockets - os: linux - hosts: * - calc: ${orphan} * 100 / ${tcp_max_orphans} - units: % - every: 10s - warn: $this > (($status >= $WARNING ) ? ( 20 ) : ( 25 )) - crit: $this > (($status >= $CRITICAL) ? ( 25 ) : ( 50 )) - delay: up 0 down 5m multiplier 1.5 max 1h - info: the percentage of orphan IPv4 TCP sockets over the max allowed (this may lead to too-many-orphans errors) - to: sysadmin diff --git a/conf.d/health.d/tcp_resets.conf b/conf.d/health.d/tcp_resets.conf deleted file mode 100644 index 91dad3c6a..000000000 --- a/conf.d/health.d/tcp_resets.conf +++ /dev/null @@ -1,67 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - -# ----------------------------------------------------------------------------- - - alarm: ipv4_tcphandshake_last_collected_secs - on: ipv4.tcphandshake - os: linux freebsd - hosts: * - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: up 0 down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - -# ----------------------------------------------------------------------------- -# tcp resets this host sends - - alarm: 1m_ipv4_tcp_resets_sent - on: ipv4.tcphandshake - os: linux - hosts: * - lookup: average -1m at -10s unaligned absolute of OutRsts - units: tcp resets/s - every: 10s - info: average TCP RESETS this host is sending, over the last minute - - alarm: 10s_ipv4_tcp_resets_sent - on: ipv4.tcphandshake - os: linux - hosts: * - lookup: average -10s unaligned absolute of OutRsts - units: tcp resets/s - every: 10s - warn: $this > ((($1m_ipv4_tcp_resets_sent < 5)?(5):($1m_ipv4_tcp_resets_sent)) * (($status >= $WARNING) ? (1) : (20))) - delay: up 0 down 60m multiplier 1.2 max 2h - options: no-clear-notification - info: average TCP RESETS this host is sending, over the last 10 seconds (this can be an indication that a port scan is made, or that a service running on this host has crashed; clear notification for this alarm will not be sent) - to: sysadmin - -# ----------------------------------------------------------------------------- -# tcp resets this host receives - - alarm: 1m_ipv4_tcp_resets_received - on: ipv4.tcphandshake - os: linux freebsd - hosts: * - lookup: average -1m at -10s unaligned absolute of AttemptFails - units: tcp resets/s - every: 10s - info: average TCP RESETS this host is sending, over the last minute - - alarm: 10s_ipv4_tcp_resets_received - on: ipv4.tcphandshake - os: linux freebsd - hosts: * - lookup: average -10s unaligned absolute of AttemptFails - units: tcp resets/s - every: 10s - warn: $this > ((($1m_ipv4_tcp_resets_received < 5)?(5):($1m_ipv4_tcp_resets_received)) * (($status >= $WARNING) ? (1) : (10))) - delay: up 0 down 60m multiplier 1.2 max 2h - options: no-clear-notification - info: average TCP RESETS this host is receiving, over the last 10 seconds (this can be an indication that a service this host needs, has crashed; clear notification for this alarm will not be sent) - to: sysadmin diff --git a/conf.d/health.d/udp_errors.conf b/conf.d/health.d/udp_errors.conf deleted file mode 100644 index 382b39658..000000000 --- a/conf.d/health.d/udp_errors.conf +++ /dev/null @@ -1,49 +0,0 @@ - -# you can disable an alarm notification by setting the 'to' line to: silent - -# ----------------------------------------------------------------------------- - - alarm: ipv4_udperrors_last_collected_secs - on: ipv4.udperrors - os: linux freebsd - hosts: * - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: up 0 down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - -# ----------------------------------------------------------------------------- -# UDP receive buffer errors - - alarm: 1m_ipv4_udp_receive_buffer_errors - on: ipv4.udperrors - os: linux freebsd - hosts: * - lookup: sum -1m unaligned absolute of RcvbufErrors - units: errors - every: 10s - warn: $this > 0 - crit: $this > 100 - info: number of UDP receive buffer errors during the last minute - delay: up 0 down 60m multiplier 1.2 max 2h - to: sysadmin - -# ----------------------------------------------------------------------------- -# UDP send buffer errors - - alarm: 1m_ipv4_udp_send_buffer_errors - on: ipv4.udperrors - os: linux - hosts: * - lookup: sum -1m unaligned absolute of SndbufErrors - units: errors - every: 10s - warn: $this > 0 - crit: $this > 100 - info: number of UDP send buffer errors during the last minute - delay: up 0 down 60m multiplier 1.2 max 2h - to: sysadmin diff --git a/conf.d/health.d/varnish.conf b/conf.d/health.d/varnish.conf deleted file mode 100644 index cca7446b4..000000000 --- a/conf.d/health.d/varnish.conf +++ /dev/null @@ -1,9 +0,0 @@ - alarm: varnish_last_collected - on: varnish.uptime - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - info: number of seconds since the last successful data collection - to: sysadmin diff --git a/conf.d/health.d/web_log.conf b/conf.d/health.d/web_log.conf deleted file mode 100644 index d8be88b47..000000000 --- a/conf.d/health.d/web_log.conf +++ /dev/null @@ -1,163 +0,0 @@ - -# make sure we can collect web log data - -template: last_collected_secs - on: web_log.response_codes -families: * - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - - -# ----------------------------------------------------------------------------- -# high level response code alarms - -# the following alarms trigger only when there are enough data. -# we assume there are enough data when: -# -# $1m_requests > 120 -# -# i.e. when there are at least 120 requests during the last minute - -template: 1m_requests - on: web_log.response_statuses -families: * - lookup: sum -1m unaligned - calc: ($this == 0)?(1):($this) - units: requests - every: 10s - info: the sum of all HTTP requests over the last minute - -template: 1m_successful - on: web_log.response_statuses -families: * - lookup: sum -1m unaligned of successful_requests - calc: $this * 100 / $1m_requests - units: % - every: 10s - warn: ($1m_requests > 120) ? ($this < (($status >= $WARNING ) ? ( 95 ) : ( 85 )) ) : ( 0 ) - crit: ($1m_requests > 120) ? ($this < (($status == $CRITICAL) ? ( 85 ) : ( 75 )) ) : ( 0 ) - delay: up 2m down 15m multiplier 1.5 max 1h - info: the ratio of successful HTTP responses (1xx, 2xx, 304) over the last minute - to: webmaster - -template: 1m_redirects - on: web_log.response_statuses -families: * - lookup: sum -1m unaligned of redirects - calc: $this * 100 / $1m_requests - units: % - every: 10s - warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING ) ? ( 1 ) : ( 20 )) ) : ( 0 ) - crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 20 ) : ( 30 )) ) : ( 0 ) - delay: up 2m down 15m multiplier 1.5 max 1h - info: the ratio of HTTP redirects (3xx except 304) over the last minute - to: webmaster - -template: 1m_bad_requests - on: web_log.response_statuses -families: * - lookup: sum -1m unaligned of bad_requests - calc: $this * 100 / $1m_requests - units: % - every: 10s - warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING) ? ( 10 ) : ( 30 )) ) : ( 0 ) - crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 30 ) : ( 50 )) ) : ( 0 ) - delay: up 2m down 15m multiplier 1.5 max 1h - info: the ratio of HTTP bad requests (4xx) over the last minute - to: webmaster - -template: 1m_internal_errors - on: web_log.response_statuses -families: * - lookup: sum -1m unaligned of server_errors - calc: $this * 100 / $1m_requests - units: % - every: 10s - warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING) ? ( 1 ) : ( 2 )) ) : ( 0 ) - crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 2 ) : ( 5 )) ) : ( 0 ) - delay: up 2m down 15m multiplier 1.5 max 1h - info: the ratio of HTTP internal server errors (5xx), over the last minute - to: webmaster - - -# ----------------------------------------------------------------------------- -# web slow - -# the following alarms trigger only when there are enough data. -# we assume there are enough data when: -# -# $1m_requests > 120 -# -# i.e. when there are at least 120 requests during the last minute - -template: 10m_response_time - on: web_log.response_time -families: * - lookup: average -10m unaligned of avg - units: ms - every: 30s - info: the average time to respond to HTTP requests, over the last 10 minutes - -template: web_slow - on: web_log.response_time -families: * - lookup: average -1m unaligned of avg - units: ms - every: 10s - green: 500 - red: 1000 - warn: ($1m_requests > 120) ? ($this > $green && $this > ($10m_response_time * 2) ) : ( 0 ) - crit: ($1m_requests > 120) ? ($this > $red && $this > ($10m_response_time * 4) ) : ( 0 ) - delay: down 15m multiplier 1.5 max 1h - info: the average time to respond to HTTP requests, over the last 1 minute - options: no-clear-notification - to: webmaster - -# ----------------------------------------------------------------------------- -# web too many or too few requests - -# the following alarms trigger only when there are enough data. -# we assume there are enough data when: -# -# $5m_successful_old > 120 -# -# i.e. when there were at least 120 requests during the 5 minutes starting -# at -10m and ending at -5m - -template: 5m_successful_old - on: web_log.response_statuses -families: * - lookup: average -5m at -5m unaligned of successful_requests - units: requests/s - every: 30s - info: average rate of successful HTTP requests over the last 5 minutes - -template: 5m_successful - on: web_log.response_statuses -families: * - lookup: average -5m unaligned of successful_requests - units: requests/s - every: 30s - info: average successful HTTP requests over the last 5 minutes - -template: 5m_requests_ratio - on: web_log.response_codes -families: * - calc: ($5m_successful_old > 0)?($5m_successful * 100 / $5m_successful_old):(100) - units: % - every: 30s - warn: ($5m_successful_old > 120) ? ($this > 200 OR $this < 50) : (0) - crit: ($5m_successful_old > 120) ? ($this > 400 OR $this < 25) : (0) - delay: down 15m multiplier 1.5 max 1h -options: no-clear-notification - info: the percentage of successful web requests over the last 5 minutes, \ - compared with the previous 5 minutes \ - (clear notification for this alarm will not be sent) - to: webmaster - diff --git a/conf.d/health.d/zfs.conf b/conf.d/health.d/zfs.conf deleted file mode 100644 index af73824e6..000000000 --- a/conf.d/health.d/zfs.conf +++ /dev/null @@ -1,10 +0,0 @@ - - alarm: zfs_memory_throttle - on: zfs.memory_ops - lookup: sum -10m unaligned absolute of throttled - units: events - every: 1m - warn: $this > 0 - delay: down 1h multiplier 1.5 max 2h - info: the number of times ZFS had to limit the ARC growth in the last 10 minutes - to: sysadmin diff --git a/conf.d/health_alarm_notify.conf b/conf.d/health_alarm_notify.conf deleted file mode 100755 index 0a95931ec..000000000 --- a/conf.d/health_alarm_notify.conf +++ /dev/null @@ -1,708 +0,0 @@ -# Configuration for alarm notifications -# -# This configuration is used by: alarm-notify.sh -# changes take effect immediately (the next alarm will use them). -# -# alarm-notify.sh can send: -# - e-mails (using the sendmail command), -# - push notifications to your mobile phone (pushover.net), -# - messages to your slack team (slack.com), -# - messages to your alerta server (alerta.io), -# - messages to your flock team (flock.com), -# - messages to your discord guild (discordapp.com), -# - messages to your telegram chat / group chat (telegram.org) -# - sms messages to your cell phone or any sms enabled device (twilio.com) -# - sms messages to your cell phone or any sms enabled device (messagebird.com) -# - notifications to users on pagerduty.com -# - messages to your irc channel on your selected network -# -# The 'to' line given at netdata alarms defines a *role*, so that many -# people can be notified for each role. -# -# This file is a BASH script itself. -# -# -#------------------------------------------------------------------------------ -# proxy configuration -# -# If you need to send curl based notifications (pushover, pushbullet, slack, alerta, -# flock, discord, telegram) via a proxy, set these to your proxy address: -#export http_proxy="http://10.0.0.1:3128/" -#export https_proxy="http://10.0.0.1:3128/" - - -#------------------------------------------------------------------------------ -# notifications images -# -# Images in notifications need to be downloaded from an Internet facing site. -# To allow notification providers fetch the icons/images, by default we set -# the URL of the global public netdata registry. -# If you have an Internet facing netdata (or you have copied the images/ folder -# of netdata to your web server), set its URL here, to fetch the notification -# images from it. -#images_base_url="http://my.public.netdata.server:19999" - - -#------------------------------------------------------------------------------ -# external commands - -# The full path to the sendmail command. -# If empty, the system $PATH will be searched for it. -# If not found, email notifications will be disabled (silently). -sendmail="" - -# The full path of the curl command. -# If empty, the system $PATH will be searched for it. -# If not found, most notifications will be silently disabled. -curl="" - -# The full path of the nc command. -# If empty, the system $PATH will be searched for it. -# If not found, irc notifications will be silently disabled. -nc="" - -#------------------------------------------------------------------------------ -# extra options for external commands -# -# In some cases, you may need to change what options get passed to an -# external command. Such cases are covered here. - -# Extra options to pass to curl. In most cases, you shouldn't need to add anything -# to this. If you're having issues with HTTPS connections, you might try adding -# '--insecure' here, but be warned that it will make it much easier for -# third-parties to block notification delivery, and may allow disclosure -# of potentially sensitive information. -#curl_options="--insecure" - -#------------------------------------------------------------------------------ -# NOTE ABOUT RECIPIENTS -# -# When you define recipients (all types): -# -# - emails addresses -# - pushover user tokens -# - telegram chat ids -# - slack channels -# - alerta environment -# - flock rooms -# - discord channels -# - hipchat rooms -# - sms phone numbers -# - pagerduty.com (pd) services -# - irc channels -# -# You can append |critical to limit the notifications to be sent. -# -# In these examples, the first recipient receives all the alarms -# while the second one receives only the critical ones: -# -# email : "user1@example.com user2@example.com|critical" -# pushover : "2987343...9437837 8756278...2362736|critical" -# telegram : "111827421 112746832|critical" -# slack : "alarms disasters|critical" -# alerta : "alarms disasters|critical" -# flock : "alarms disasters|critical" -# discord : "alarms disasters|critical" -# twilio : "+15555555555 +17777777777|critical" -# messagebird: "+15555555555 +17777777777|critical" -# kavenegar : "09155555555 09177777777|critical" -# pd : "<pd_service_key_1> <pd_service_key_2>|critical" -# irc : "<irc_channel_1> <irc_channel_2>|critical" -# -# If a recipient is set to empty string, the default recipient of the given -# notification method (email, pushover, telegram, slack, alerta, etc) will be used. -# To disable a notification, use the recipient called: disabled -# This works for all notification methods (including the default recipients). - - -#------------------------------------------------------------------------------ -# email global notification options - -# multiple recipients can be given like this: -# "admin1@example.com admin2@example.com ..." - -# the email address sending email notifications -# the default is the system user netdata runs as (usually: netdata) -# The following formats are supported: -# EMAIL_SENDER="user@domain" -# EMAIL_SENDER="User Name <user@domain>" -# EMAIL_SENDER="'User Name' <user@domain>" -# EMAIL_SENDER="\"User Name\" <user@domain>" -EMAIL_SENDER="" - -# enable/disable sending emails -SEND_EMAIL="YES" - -# if a role recipient is not configured, an email will be send to: -DEFAULT_RECIPIENT_EMAIL="root" -# to receive only critical alarms, set it to "root|critical" - -# Optionally specify the encoding to list in the Content-Type header. -# This doesn't change what encoding the e-mail is sent with, just what -# the headers say it was encoded as. -# This shouldn't need to be changed as it will almost always be -# autodetected from the environment. -#EMAIL_CHARSET="UTF-8" - - -#------------------------------------------------------------------------------ -# pushover (pushover.net) global notification options - -# multiple recipients can be given like this: -# "USERTOKEN1 USERTOKEN2 ..." - -# enable/disable sending pushover notifications -SEND_PUSHOVER="YES" - -# Login to pushover.net to get your pushover app token. -# You need only one for all your netdata servers (or you can have one for -# each of your netdata - your call). -# Without an app token, netdata cannot send pushover notifications. -PUSHOVER_APP_TOKEN="" - -# if a role's recipients are not configured, a notification will be send to -# this pushover user token (empty = do not send a notification for unconfigured -# roles): -DEFAULT_RECIPIENT_PUSHOVER="" - - -#------------------------------------------------------------------------------ -# pushbullet (pushbullet.com) push notification options - -# multiple recipients can be given like this: -# "user1@email.com user2@mail.com" - -# enable/disable sending pushbullet notifications -SEND_PUSHBULLET="YES" - -# Signup and Login to pushbullet.com -# To get your Access Token, go to https://www.pushbullet.com/#settings/account -# Create a new access token and paste it below. -# Then just set the recipients' emails. -# Please note that the if the email in the DEFAULT_RECIPIENT_PUSHBULLET does -# not have a pushbullet account, the pushbullet service will send an email -# to that address instead. - -# Without an access token, netdata cannot send pushbullet notifications. -PUSHBULLET_ACCESS_TOKEN="" -DEFAULT_RECIPIENT_PUSHBULLET="" - -# Device iden of the sending device. Optional. -PUSHBULLET_SOURCE_DEVICE="" - - -#------------------------------------------------------------------------------ -# Twilio (twilio.com) SMS options - -# multiple recipients can be given like this: -# "+15555555555 +17777777777" - -# enable/disable sending twilio SMS -SEND_TWILIO="YES" - -# Signup for free trial and select a SMS capable Twilio Number -# To get your Account SID and Token, go to https://www.twilio.com/console -# Place your sid, token and number below. -# Then just set the recipients' phone numbers. -# The trial account is only allowed to use the number specified when set up. - -# Without an account sid and token, netdata cannot send Twilio text messages. -TWILIO_ACCOUNT_SID="" -TWILIO_ACCOUNT_TOKEN="" -TWILIO_NUMBER="" -DEFAULT_RECIPIENT_TWILIO="" - - -#------------------------------------------------------------------------------ -# Messagebird (messagebird.com) SMS options - -# multiple recipients can be given like this: -# "+15555555555 +17777777777" - -# enable/disable sending messagebird SMS -SEND_MESSAGEBIRD="YES" - -# to get an access key, create a free account at https://www.messagebird.com -# verify and activate the account (no CC info needed) -# login to your account and enter your phonenumber to get some free credits -# to get the API key, click on 'API' in the sidebar, then 'API Access (REST)' -# click 'Add access key' and fill in data (you want a live key to send SMS) - -# Without an access key, netdata cannot send Messagebird text messages. -MESSAGEBIRD_ACCESS_KEY="" -MESSAGEBIRD_NUMBER="" -DEFAULT_RECIPIENT_MESSAGEBIRD="" - - -#------------------------------------------------------------------------------ -# Kavenegar (Kavenegar.com) SMS options - -# multiple recipients can be given like this: -# "09155555555 09177777777" - -# enable/disable sending kavenegar SMS -SEND_KAVENEGAR="YES" - -# to get an access key, after selecting and purchasing your desired service -# at http://kavenegar.com/pricing.html -# login to your account, go to your dashboard and my account are -# https://panel.kavenegar.com/Client/setting/account from API Key -# copy your api key. You can generate new API Key too. -# You can find and select kevenegar sender number from this place. - -# Without an API key, netdata cannot send KAVENEGAR text messages. -KAVENEGAR_API_KEY="" -KAVENEGAR_SENDER="" -DEFAULT_RECIPIENT_KAVENEGAR="" - - -#------------------------------------------------------------------------------ -# telegram (telegram.org) global notification options - -# To get your chat ID send the command /my_id to telegram bot @get_id. -# Users also need to open a query with the bot (see below). - -# note: multiple recipients can be given like this: -# "CHAT_ID_1 CHAT_ID_2 ..." - -# enable/disable sending telegram messages -SEND_TELEGRAM="YES" - -# Contact the bot @BotFather to create a new bot and receive a bot token. -# Without it, netdata cannot send telegram messages. -TELEGRAM_BOT_TOKEN="" - -# If a role's recipients are not configured, a message will be send to -# this chat id (empty = do not send a notification for unconfigured roles): -DEFAULT_RECIPIENT_TELEGRAM="" - - -#------------------------------------------------------------------------------ -# slack (slack.com) global notification options - -# multiple recipients can be given like this: -# "CHANNEL1 CHANNEL2 ..." - -# enable/disable sending slack notifications -SEND_SLACK="YES" - -# Login to slack.com and create an incoming webhook. You need only one for all -# your netdata servers (or you can have one for each of your netdata). -# Without it, netdata cannot send slack notifications. -# Get yours from: https://api.slack.com/incoming-webhooks -SLACK_WEBHOOK_URL="" - -# if a role's recipients are not configured, a notification will be send to -# this slack channel (empty = do not send a notification for unconfigured -# roles): -DEFAULT_RECIPIENT_SLACK="" - - -#------------------------------------------------------------------------------ -# alerta (alerta.io) global notification options - -# multiple recipients (Environments) can be given like this: -# "Production Development ..." - -# enable/disable sending alerta notifications -SEND_ALERTA="YES" - -# here set your alerta server API url -# this is the API url you defined when installed Alerta server, -# it is the same for all users. Do not include last slash. -# ALERTA_WEBHOOK_URL="https://<server>/alerta/api" -ALERTA_WEBHOOK_URL="" - -# Login with an administrative user to you Alerta server and create an API KEY -# with write permissions. -ALERTA_API_KEY="" - -# you can define environments in /etc/alertad.conf option ALLOWED_ENVIRONMENTS -# standard environments are Production and Development -# if a role's recipients are not configured, a notification will be send to -# this Environment (empty = do not send a notification for unconfigured roles): -DEFAULT_RECIPIENT_ALERTA="" - - -#------------------------------------------------------------------------------ -# flock (flock.com) global notification options - -# enable/disable sending flock notifications -SEND_FLOCK="YES" - -# Login to flock.com and create an incoming webhook. You need only one for all -# your netdata servers (or you can have one for each of your netdata). -# Without it, netdata cannot send flock notifications. -FLOCK_WEBHOOK_URL="" - -# if a role recipient is not configured, no notification will be sent -DEFAULT_RECIPIENT_FLOCK="" - - -#------------------------------------------------------------------------------ -# discord (discordapp.com) global notification options - -# multiple recipients can be given like this: -# "CHANNEL1 CHANNEL2 ..." - -# enable/disable sending discord notifications -SEND_DISCORD="YES" - -# Create a webhook by following the official documentation - -# https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks -DISCORD_WEBHOOK_URL="" - -# if a role's recipients are not configured, a notification will be send to -# this discord channel (empty = do not send a notification for unconfigured -# roles): -DEFAULT_RECIPIENT_DISCORD="" - - -#------------------------------------------------------------------------------ -# hipchat global notification options - -# multiple recipients can be given like this: -# "ROOM1 ROOM2 ..." - -# enable/disable sending hipchat notifications -SEND_HIPCHAT="YES" - -# define hipchat server -HIPCHAT_SERVER="api.hipchat.com" - -# api.hipchat.com authorization token -# Without this, netdata cannot send hipchat notifications. -HIPCHAT_AUTH_TOKEN="" - -# if a role's recipients are not configured, a notification will be send to -# this hipchat room (empty = do not send a notification for unconfigured -# roles): -DEFAULT_RECIPIENT_HIPCHAT="" - - -#------------------------------------------------------------------------------ -# kafka notification options - -# enable/disable sending kafka notifications -SEND_KAFKA="YES" - -# The URL to POST kafka alarm data to. It should be the full URL. -KAFKA_URL="" - -# The IP to be used in the kafka message as the sender. -KAFKA_SENDER_IP="" - - -#------------------------------------------------------------------------------ -# pagerduty.com notification options -# -# pagerduty.com notifications require the pagerduty agent to be installed and -# a "Generic API" pagerduty service. -# https://www.pagerduty.com/docs/guides/agent-install-guide/ - -# multiple recipients can be given like this: -# "<pd_service_key_1> <pd_service_key_2> ..." - -# enable/disable sending pagerduty notifications -SEND_PD="YES" - -# if a role's recipients are not configured, a notification will be sent to -# the "General API" pagerduty.com service that uses this service key. -# (empty = do not send a notification for unconfigured roles): -DEFAULT_RECIPIENT_PD="" - - -#------------------------------------------------------------------------------ -# irc notification options -# -# irc notifications require only the nc utility to be installed. - -# multiple recipients can be given like this: -# "<irc_channel_1> <irc_channel_2> ..." - -# enable/disable sending irc notifications -SEND_IRC="YES" - -# if a role's recipients are not configured, a notification will not be sent. -# (empty = do not send a notification for unconfigured roles): -DEFAULT_RECIPIENT_IRC="" - -# The irc network to which the recipients belong. It must be the full network. -# e.g. "irc.freenode.net" -IRC_NETWORK="" - -# The irc nickname which is required to send the notification. It must not be -# an already registered name as the connection's MODE is defined as a 'guest'. -IRC_NICKNAME="" - -# The irc realname which is required in order to make the connection and is an -# extra identifier. -IRC_REALNAME="" - - -#------------------------------------------------------------------------------ -# custom notifications -# - -# enable/disable sending custom notifications -SEND_CUSTOM="YES" - -# if a role's recipients are not configured, use the following. -# (empty = do not send a notification for unconfigured roles) -DEFAULT_RECIPIENT_CUSTOM="" - -# The custom_sender() is a custom function to do whatever you need to do -custom_sender() { - # variables you can use: - # ${host} the host generated this event - # ${url_host} same as ${host} but URL encoded - # ${unique_id} the unique id of this event - # ${alarm_id} the unique id of the alarm that generated this event - # ${event_id} the incremental id of the event, for this alarm id - # ${when} the timestamp this event occurred - # ${name} the name of the alarm, as given in netdata health.d entries - # ${url_name} same as ${name} but URL encoded - # ${chart} the name of the chart (type.id) - # ${url_chart} same as ${chart} but URL encoded - # ${family} the family of the chart - # ${url_family} same as ${family} but URL encoded - # ${status} the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL - # ${old_status} the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL - # ${value} the current value of the alarm - # ${old_value} the previous value of the alarm - # ${src} the line number and file the alarm has been configured - # ${duration} the duration in seconds of the previous alarm state - # ${duration_txt} same as ${duration} for humans - # ${non_clear_duration} the total duration in seconds this is/was non-clear - # ${non_clear_duration_txt} same as ${non_clear_duration} for humans - # ${units} the units of the value - # ${info} a short description of the alarm - # ${value_string} friendly value (with units) - # ${old_value_string} friendly old value (with units) - # ${image} the URL of an image to represent the status of the alarm - # ${color} a color in #AABBCC format for the alarm - # ${goto_url} the URL the user can click to see the netdata dashboard - - # these are more human friendly: - # ${alarm} like "name = value units" - # ${status_message} like "needs attention", "recovered", "is critical" - # ${severity} like "Escalated to CRITICAL", "Recovered from WARNING" - # ${raised_for} like "(alarm was raised for 10 minutes)" - - # example human readable SMS - local msg="${host} ${status_message}: ${alarm} ${raised_for}" - - # limit it to 160 characters and encode it for use in a URL - urlencode "${msg:0:160}" >/dev/null; msg="${REPLY}" - - # a space separated list of the recipients to send alarms to - to="${1}" - - info "not sending custom notification to ${to}, for ${status} of '${host}.${chart}.${name}' - custom_sender() is not configured." -} - - -############################################################################### -# RECIPIENTS PER ROLE - -# ----------------------------------------------------------------------------- -# generic system alarms -# CPU, disks, network interfaces, entropy, etc - -role_recipients_email[sysadmin]="${DEFAULT_RECIPIENT_EMAIL}" - -role_recipients_pushover[sysadmin]="${DEFAULT_RECIPIENT_PUSHOVER}" - -role_recipients_pushbullet[sysadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}" - -role_recipients_telegram[sysadmin]="${DEFAULT_RECIPIENT_TELEGRAM}" - -role_recipients_slack[sysadmin]="${DEFAULT_RECIPIENT_SLACK}" - -role_recipients_alerta[sysadmin]="${DEFAULT_RECIPIENT_ALERTA}" - -role_recipients_flock[sysadmin]="${DEFAULT_RECIPIENT_FLOCK}" - -role_recipients_discord[sysadmin]="${DEFAULT_RECIPIENT_DISCORD}" - -role_recipients_hipchat[sysadmin]="${DEFAULT_RECIPIENT_HIPCHAT}" - -role_recipients_twilio[sysadmin]="${DEFAULT_RECIPIENT_TWILIO}" - -role_recipients_messagebird[sysadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - -role_recipients_kavenegar[sysadmin]="${DEFAULT_RECIPIENT_KAVENEGAR}" - -role_recipients_pd[sysadmin]="${DEFAULT_RECIPIENT_PD}" - -role_recipients_irc[sysadmin]="${DEFAULT_RECIPIENT_IRC}" - -role_recipients_custom[sysadmin]="${DEFAULT_RECIPIENT_CUSTOM}" - -# ----------------------------------------------------------------------------- -# DNS related alarms - -role_recipients_email[domainadmin]="${DEFAULT_RECIPIENT_EMAIL}" - -role_recipients_pushover[domainadmin]="${DEFAULT_RECIPIENT_PUSHOVER}" - -role_recipients_pushbullet[domainadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}" - -role_recipients_telegram[domainadmin]="${DEFAULT_RECIPIENT_TELEGRAM}" - -role_recipients_slack[domainadmin]="${DEFAULT_RECIPIENT_SLACK}" - -role_recipients_alerta[domainadmin]="${DEFAULT_RECIPIENT_ALERTA}" - -role_recipients_flock[domainadmin]="${DEFAULT_RECIPIENT_FLOCK}" - -role_recipients_discord[domainadmin]="${DEFAULT_RECIPIENT_DISCORD}" - -role_recipients_hipchat[domainadmin]="${DEFAULT_RECIPIENT_HIPCHAT}" - -role_recipients_twilio[domainadmin]="${DEFAULT_RECIPIENT_TWILIO}" - -role_recipients_messagebird[domainadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - -role_recipients_kavenegar[domainadmin]="${DEFAULT_RECIPIENT_KAVENEGAR}" - -role_recipients_pd[domainadmin]="${DEFAULT_RECIPIENT_PD}" - -role_recipients_irc[domainadmin]="${DEFAULT_RECIPIENT_IRC}" - -role_recipients_custom[domainadmin]="${DEFAULT_RECIPIENT_CUSTOM}" - -# ----------------------------------------------------------------------------- -# database servers alarms -# mysql, redis, memcached, postgres, etc - -role_recipients_email[dba]="${DEFAULT_RECIPIENT_EMAIL}" - -role_recipients_pushover[dba]="${DEFAULT_RECIPIENT_PUSHOVER}" - -role_recipients_pushbullet[dba]="${DEFAULT_RECIPIENT_PUSHBULLET}" - -role_recipients_telegram[dba]="${DEFAULT_RECIPIENT_TELEGRAM}" - -role_recipients_slack[dba]="${DEFAULT_RECIPIENT_SLACK}" - -role_recipients_alerta[dba]="${DEFAULT_RECIPIENT_ALERTA}" - -role_recipients_flock[dba]="${DEFAULT_RECIPIENT_FLOCK}" - -role_recipients_discord[dba]="${DEFAULT_RECIPIENT_DISCORD}" - -role_recipients_hipchat[dba]="${DEFAULT_RECIPIENT_HIPCHAT}" - -role_recipients_twilio[dba]="${DEFAULT_RECIPIENT_TWILIO}" - -role_recipients_messagebird[dba]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - -role_recipients_kavenegar[dba]="${DEFAULT_RECIPIENT_KAVENEGAR}" - -role_recipients_pd[dba]="${DEFAULT_RECIPIENT_PD}" - -role_recipients_irc[dba]="${DEFAULT_RECIPIENT_IRC}" - -role_recipients_custom[dba]="${DEFAULT_RECIPIENT_CUSTOM}" - -# ----------------------------------------------------------------------------- -# web servers alarms -# apache, nginx, lighttpd, etc - -role_recipients_email[webmaster]="${DEFAULT_RECIPIENT_EMAIL}" - -role_recipients_pushover[webmaster]="${DEFAULT_RECIPIENT_PUSHOVER}" - -role_recipients_pushbullet[webmaster]="${DEFAULT_RECIPIENT_PUSHBULLET}" - -role_recipients_telegram[webmaster]="${DEFAULT_RECIPIENT_TELEGRAM}" - -role_recipients_slack[webmaster]="${DEFAULT_RECIPIENT_SLACK}" - -role_recipients_alerta[webmaster]="${DEFAULT_RECIPIENT_ALERTA}" - -role_recipients_flock[webmaster]="${DEFAULT_RECIPIENT_FLOCK}" - -role_recipients_discord[webmaster]="${DEFAULT_RECIPIENT_DISCORD}" - -role_recipients_hipchat[webmaster]="${DEFAULT_RECIPIENT_HIPCHAT}" - -role_recipients_twilio[webmaster]="${DEFAULT_RECIPIENT_TWILIO}" - -role_recipients_messagebird[webmaster]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - -role_recipients_kavenegar[webmaster]="${DEFAULT_RECIPIENT_KAVENEGAR}" - -role_recipients_pd[webmaster]="${DEFAULT_RECIPIENT_PD}" - -role_recipients_irc[webmaster]="${DEFAULT_RECIPIENT_IRC}" - -role_recipients_custom[webmaster]="${DEFAULT_RECIPIENT_CUSTOM}" - -# ----------------------------------------------------------------------------- -# proxy servers alarms -# squid, etc - -role_recipients_email[proxyadmin]="${DEFAULT_RECIPIENT_EMAIL}" - -role_recipients_pushover[proxyadmin]="${DEFAULT_RECIPIENT_PUSHOVER}" - -role_recipients_pushbullet[proxyadmin]="${DEFAULT_RECIPIENT_PUSHBULLET}" - -role_recipients_telegram[proxyadmin]="${DEFAULT_RECIPIENT_TELEGRAM}" - -role_recipients_slack[proxyadmin]="${DEFAULT_RECIPIENT_SLACK}" - -role_recipients_alerta[proxyadmin]="${DEFAULT_RECIPIENT_ALERTA}" - -role_recipients_flock[proxyadmin]="${DEFAULT_RECIPIENT_FLOCK}" - -role_recipients_discord[proxyadmin]="${DEFAULT_RECIPIENT_DISCORD}" - -role_recipients_hipchat[proxyadmin]="${DEFAULT_RECIPIENT_HIPCHAT}" - -role_recipients_twilio[proxyadmin]="${DEFAULT_RECIPIENT_TWILIO}" - -role_recipients_messagebird[proxyadmin]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - -role_recipients_kavenegar[proxyadmin]="${DEFAULT_RECIPIENT_KAVENEGAR}" - -role_recipients_pd[proxyadmin]="${DEFAULT_RECIPIENT_PD}" - -role_recipients_irc[proxyadmin]="${DEFAULT_RECIPIENT_IRC}" - -role_recipients_custom[proxyadmin]="${DEFAULT_RECIPIENT_CUSTOM}" - -# ----------------------------------------------------------------------------- -# peripheral devices -# UPS, photovoltaics, etc - -role_recipients_email[sitemgr]="${DEFAULT_RECIPIENT_EMAIL}" - -role_recipients_pushover[sitemgr]="${DEFAULT_RECIPIENT_PUSHOVER}" - -role_recipients_pushbullet[sitemgr]="${DEFAULT_RECIPIENT_PUSHBULLET}" - -role_recipients_telegram[sitemgr]="${DEFAULT_RECIPIENT_TELEGRAM}" - -role_recipients_slack[sitemgr]="${DEFAULT_RECIPIENT_SLACK}" - -role_recipients_alerta[sitemgr]="${DEFAULT_RECIPIENT_ALERTA}" - -role_recipients_flock[sitemgr]="${DEFAULT_RECIPIENT_FLOCK}" - -role_recipients_discord[sitemgr]="${DEFAULT_RECIPIENT_DISCORD}" - -role_recipients_hipchat[sitemgr]="${DEFAULT_RECIPIENT_HIPCHAT}" - -role_recipients_twilio[sitemgr]="${DEFAULT_RECIPIENT_TWILIO}" - -role_recipients_messagebird[sitemgr]="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - -role_recipients_kavenegar[sitemgr]="${DEFAULT_RECIPIENT_KAVENEGAR}" - -role_recipients_pd[sitemgr]="${DEFAULT_RECIPIENT_PD}" - -role_recipients_custom[sitemgr]="${DEFAULT_RECIPIENT_CUSTOM}" - diff --git a/conf.d/health_email_recipients.conf b/conf.d/health_email_recipients.conf deleted file mode 100644 index f56c6c64a..000000000 --- a/conf.d/health_email_recipients.conf +++ /dev/null @@ -1,2 +0,0 @@ -# OBSOLETE FILE -# REPLACED WITH health_alarm_notify.conf diff --git a/conf.d/node.d.conf b/conf.d/node.d.conf deleted file mode 100644 index 95aec99ce..000000000 --- a/conf.d/node.d.conf +++ /dev/null @@ -1,39 +0,0 @@ -{
- "___help_1": "Default options for node.d.plugin - this is a JSON file.",
- "___help_2": "Use http://jsonlint.com/ to verify it is valid JSON.",
- "___help_3": "------------------------------------------------------------",
-
- "___help_update_every": "Minimum data collection frequency for all node.d/*.node.js modules. Set it to 0 to inherit it from netdata.",
- "update_every": 0,
-
- "___help_modules_enable_autodetect": "Enable/disable auto-detection for node.d/*.node.js modules that support it.",
- "modules_enable_autodetect": true,
-
- "___help_modules_enable_all": "Enable all node.d/*.node.js modules by default.",
- "modules_enable_all": true,
-
- "___help_modules": "Enable/disable the following modules. Give only XXX for node.d/XXX.node.js",
- "modules": {
- "named": {
- "enabled": true
- },
- "sma_webbox": {
- "enabled": true
- },
- "snmp": {
- "enabled": true
- }
- },
-
- "___help_paths": "Paths that control the operation of node.d.plugin",
- "paths": {
- "___help_plugins": "The full path to the modules javascript node.d/ directory",
- "plugins": null,
-
- "___help_config": "The full path to the modules configs node.d/ directory",
- "config": null,
-
- "___help_modules": "Array of paths to add to node.js when searching for node_modules",
- "modules": []
- }
-}
diff --git a/conf.d/node.d/README.md b/conf.d/node.d/README.md deleted file mode 100644 index 45e3d02a6..000000000 --- a/conf.d/node.d/README.md +++ /dev/null @@ -1,7 +0,0 @@ -`node.d.plugin` modules accept configuration in JSON format. - -Unfortunately, JSON files do not accept comments. So, the best way to describe them is to have markdown text files with instructions. - -JSON has a very strict formatting. If you get errors from netdata at `/var/log/netdata/error.log` that a certain configuration file cannot be loaded, we suggest to verify it at [http://jsonlint.com/](http://jsonlint.com/). - -The files in this directory, provide usable examples for configuring each `node.d.plugin` module. diff --git a/conf.d/node.d/fronius.conf.md b/conf.d/node.d/fronius.conf.md deleted file mode 100644 index 622086b27..000000000 --- a/conf.d/node.d/fronius.conf.md +++ /dev/null @@ -1,67 +0,0 @@ -[Fronius Symo 8.2](https://www.fronius.com/en/photovoltaics/products/all-products/inverters/fronius-symo/fronius-symo-8-2-3-m) - -The plugin has been tested with a single inverter, namely Fronius Symo 8.2-3-M: - -- Datalogger version: 240.162630 -- Software version: 3.7.4-6 -- Hardware version: 2.4D - -Other products and versions may work, but without any guarantees. - -Example netdata configuration for node.d/fronius.conf. Copy this section to fronius.conf and change name/ip. -The module supports any number of servers. Sometimes there is a lag when collecting every 3 seconds, so 5 should be okay too. You can modify this per server. -```json -{ - "enable_autodetect": false, - "update_every": 5, - "servers": [ - { - "name": "solar", - "hostname": "symo.ip.or.dns", - "update_every": 5, - "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi" - } - ] -} -``` - -The output of /solar_api/v1/GetPowerFlowRealtimeData.fcgi looks like this: -```json -{ - "Head" : { - "RequestArguments" : {}, - "Status" : { - "Code" : 0, - "Reason" : "", - "UserMessage" : "" - }, - "Timestamp" : "2017-07-05T12:35:12+02:00" - }, - "Body" : { - "Data" : { - "Site" : { - "Mode" : "meter", - "P_Grid" : -6834.549847, - "P_Load" : -1271.450153, - "P_Akku" : null, - "P_PV" : 8106, - "rel_SelfConsumption" : 15.685297, - "rel_Autonomy" : 100, - "E_Day" : 35020, - "E_Year" : 5826076, - "E_Total" : 14788870, - "Meter_Location" : "grid" - }, - "Inverters" : { - "1" : { - "DT" : 123, - "P" : 8106, - "E_Day" : 35020, - "E_Year" : 5826076, - "E_Total" : 14788870 - } - } - } - } -} -``` diff --git a/conf.d/node.d/named.conf.md b/conf.d/node.d/named.conf.md deleted file mode 100644 index fa843dd58..000000000 --- a/conf.d/node.d/named.conf.md +++ /dev/null @@ -1,344 +0,0 @@ -# ISC Bind Statistics
-
-Using this netdata collector, you can monitor one or more ISC Bind servers.
-
-The source code for this plugin in [here](https://github.com/firehol/netdata/blob/master/node.d/named.node.js).
-
-## Example netdata charts
-
-Depending on the number of views your bind has, you may get a large number of charts.
-Here this is with just one view:
-
-![image](https://cloud.githubusercontent.com/assets/2662304/12765473/879b8e04-ca07-11e5-817d-b0651996c42b.png)
-![image](https://cloud.githubusercontent.com/assets/2662304/12766538/12b272fa-ca0d-11e5-81e1-6a9f8ff488ff.png)
-
-## How it works
-
-The plugin will execute (from within node.js) the equivalent of:
-
-```sh
-curl "http://localhost:8888/json/v1/server"
-```
-
-Here is a sample of the output this command produces.
-
-```js
-{
- "json-stats-version":"1.0",
- "boot-time":"2016-01-31T08:20:48Z",
- "config-time":"2016-01-31T09:28:03Z",
- "current-time":"2016-02-02T22:22:20Z",
- "opcodes":{
- "QUERY":247816,
- "IQUERY":0,
- "STATUS":0,
- "RESERVED3":0,
- "NOTIFY":0,
- "UPDATE":3813,
- "RESERVED6":0,
- "RESERVED7":0,
- "RESERVED8":0,
- "RESERVED9":0,
- "RESERVED10":0,
- "RESERVED11":0,
- "RESERVED12":0,
- "RESERVED13":0,
- "RESERVED14":0,
- "RESERVED15":0
- },
- "qtypes":{
- "A":89519,
- "NS":863,
- "CNAME":1,
- "SOA":1,
- "PTR":116779,
- "MX":276,
- "TXT":198,
- "AAAA":39324,
- "SRV":850,
- "ANY":5
- },
- "nsstats":{
- "Requestv4":251630,
- "ReqEdns0":1255,
- "ReqTSIG":3813,
- "ReqTCP":57,
- "AuthQryRej":1455,
- "RecQryRej":122,
- "Response":245918,
- "TruncatedResp":44,
- "RespEDNS0":1255,
- "RespTSIG":3813,
- "QrySuccess":205159,
- "QryAuthAns":119495,
- "QryNoauthAns":120770,
- "QryNxrrset":32711,
- "QrySERVFAIL":262,
- "QryNXDOMAIN":2395,
- "QryRecursion":40885,
- "QryDuplicate":5712,
- "QryFailure":1577,
- "UpdateDone":2514,
- "UpdateFail":1299,
- "UpdateBadPrereq":1276,
- "QryUDP":246194,
- "QryTCP":45,
- "OtherOpt":101
- },
- "views":{
- "local":{
- "resolver":{
- "stats":{
- "Queryv4":74577,
- "Responsev4":67032,
- "NXDOMAIN":601,
- "SERVFAIL":5,
- "FORMERR":7,
- "EDNS0Fail":7,
- "Truncated":3071,
- "Lame":4,
- "Retry":11826,
- "QueryTimeout":1838,
- "GlueFetchv4":6864,
- "GlueFetchv4Fail":30,
- "QryRTT10":112,
- "QryRTT100":42900,
- "QryRTT500":23275,
- "QryRTT800":534,
- "QryRTT1600":97,
- "QryRTT1600+":20,
- "BucketSize":31,
- "REFUSED":13
- },
- "qtypes":{
- "A":64931,
- "NS":870,
- "CNAME":185,
- "PTR":5,
- "MX":49,
- "TXT":149,
- "AAAA":7972,
- "SRV":416
- },
- "cache":{
- "A":40356,
- "NS":8032,
- "CNAME":14477,
- "PTR":2,
- "MX":21,
- "TXT":32,
- "AAAA":3301,
- "SRV":94,
- "DS":237,
- "RRSIG":2301,
- "NSEC":126,
- "!A":52,
- "!NS":4,
- "!TXT":1,
- "!AAAA":3797,
- "!SRV":9,
- "NXDOMAIN":590
- },
- "cachestats":{
- "CacheHits":1085188,
- "CacheMisses":109,
- "QueryHits":464755,
- "QueryMisses":55624,
- "DeleteLRU":0,
- "DeleteTTL":42615,
- "CacheNodes":5188,
- "CacheBuckets":2079,
- "TreeMemTotal":2326026,
- "TreeMemInUse":1508075,
- "HeapMemMax":132096,
- "HeapMemTotal":393216,
- "HeapMemInUse":132096
- },
- "adb":{
- "nentries":1021,
- "entriescnt":3157,
- "nnames":1021,
- "namescnt":3022
- }
- }
- },
- "public":{
- "resolver":{
- "stats":{
- "BucketSize":31
- },
- "qtypes":{
- },
- "cache":{
- },
- "cachestats":{
- "CacheHits":0,
- "CacheMisses":0,
- "QueryHits":0,
- "QueryMisses":0,
- "DeleteLRU":0,
- "DeleteTTL":0,
- "CacheNodes":0,
- "CacheBuckets":64,
- "TreeMemTotal":287392,
- "TreeMemInUse":29608,
- "HeapMemMax":1024,
- "HeapMemTotal":262144,
- "HeapMemInUse":1024
- },
- "adb":{
- "nentries":1021,
- "nnames":1021
- }
- }
- },
- "_bind":{
- "resolver":{
- "stats":{
- "BucketSize":31
- },
- "qtypes":{
- },
- "cache":{
- },
- "cachestats":{
- "CacheHits":0,
- "CacheMisses":0,
- "QueryHits":0,
- "QueryMisses":0,
- "DeleteLRU":0,
- "DeleteTTL":0,
- "CacheNodes":0,
- "CacheBuckets":64,
- "TreeMemTotal":287392,
- "TreeMemInUse":29608,
- "HeapMemMax":1024,
- "HeapMemTotal":262144,
- "HeapMemInUse":1024
- },
- "adb":{
- "nentries":1021,
- "nnames":1021
- }
- }
- }
- }
-}
-```
-
-
-From this output it collects:
-
-- Global Received Requests by IP version (IPv4, IPv6)
-- Global Successful Queries
-- Current Recursive Clients
-- Global Queries by IP Protocol (TCP, UDP)
-- Global Queries Analysis
-- Global Received Updates
-- Global Query Failures
-- Global Query Failures Analysis
-- Other Global Server Statistics
-- Global Incoming Requests by OpCode
-- Global Incoming Requests by Query Type
-- Global Socket Statistics (will only work if the url is `http://127.0.0.1:8888/json/v1`, i.e. without `/server`, but keep in mind this produces a very long output and probably will account for 0.5% CPU overhead alone, per bind server added)
-- Per View Statistics (the following set will be added for each bind view):
- - View, Resolver Active Queries
- - View, Resolver Statistics
- - View, Resolver Round Trip Timings
- - View, Requests by Query Type
-
-## Configuration
-
-The collector (optionally) reads a configuration file named `/etc/netdata/node.d/named.conf`, with the following contents:
-
-```js
-{
- "enable_autodetect": true,
- "update_every": 5,
- "servers": [
- {
- "name": "bind1",
- "url": "http://127.0.0.1:8888/json/v1/server",
- "update_every": 1
- },
- {
- "name": "bind2",
- "url": "http://10.1.2.3:8888/json/v1/server",
- "update_every": 2
- }
- ]
-}
-```
-
-You can add any number of bind servers.
-
-If the configuration file is missing, or the key `enable_autodetect` is `true`, the collector will also attempt to fetch `http://localhost:8888/json/v1/server` which, if successful will be added too.
-
-### XML instead of JSON, from bind
-
-The collector can also accept bind URLs that return XML output. This might required if you cannot have bind 9.10+ with JSON but you have an version of bind that supports XML statistics v3. Check [this](https://www.isc.org/blogs/bind-9-10-statistics-troubleshooting-and-zone-configuration/) for versions supported.
-
-In such cases, use a URL like this:
-
-```sh
-curl "http://localhost:8888/xml/v3/server"
-```
-
-Only `xml` and `v3` has been tested.
-
-Keep in mind though, that XML parsing is done using javascript code, which requires a triple conversion:
-
-1. from XML to JSON using a javascript XML parser (**CPU intensive**),
-2. which is then transformed to emulate the output of the JSON output of bind (**CPU intensive** - and yes the converted JSON from XML is different to the native JSON - even bind produces different names for various attributes),
-3. which is then processed to generate the data for the charts (this will happen even if bind is producing JSON).
-
-In general, expect XML parsing to be 2 to 3 times more CPU intensive than JSON.
-
-**So, if you can use the JSON output of bind, prefer it over XML**. Keep also in mind that even bind will use more CPU when generating XML instead of JSON.
-
-The XML interface of bind is not autodetected.
-You will have to provide the config file `/etc/netdata/node.d/named.conf`, like this:
-
-```js
-{
- "enable_autodetect": false,
- "update_every": 1,
- "servers": [
- {
- "name": "local",
- "url": "http://localhost:8888/xml/v3/server",
- "update_every": 1
- }
- ]
-}
-```
-
-Of course, you can monitor more than one bind servers. Each one can be configured with either JSON or XML output.
-
-## Auto-detection
-
-Auto-detection is controlled by `enable_autodetect` in the config file. The default is enabled, so that if the collector can connect to `http://localhost:8888/json/v1/server` to receive bind statistics, it will automatically enable it.
-
-## Bind (named) configuration
-
-To use this plugin, you have to have bind v9.10+ properly compiled to provide statistics in `JSON` format.
-
-For more information on how to get your bind installation ready, please refer to the [bind statistics channel developer comments](http://jpmens.net/2013/03/18/json-in-bind-9-s-statistics-server/) and to [bind documentation](https://ftp.isc.org/isc/bind/9.10.3/doc/arm/Bv9ARM.ch06.html#statistics) or [bind Knowledge Base article AA-01123](https://kb.isc.org/article/AA-01123/0).
-
-Normally, you will need something like this in your `named.conf`:
-
-```
-statistics-channels {
- inet 127.0.0.1 port 8888 allow { 127.0.0.1; };
- inet ::1 port 8888 allow { ::1; };
-};
-```
-
-(use the IPv4 or IPv6 line depending on what you are using, you can also use both)
-
-Verify it works by running the following command (the collector is written in node.js and will query your bind server directly, but if this command works, the collector should be able to work too):
-
-```sh
-curl "http://localhost:8888/json/v1/server"
-```
-
diff --git a/conf.d/node.d/sma_webbox.conf.md b/conf.d/node.d/sma_webbox.conf.md deleted file mode 100644 index 19fdc9dd3..000000000 --- a/conf.d/node.d/sma_webbox.conf.md +++ /dev/null @@ -1,25 +0,0 @@ -
-[SMA Sunny Webbox](http://www.solar-is-future.com/sma-technology-for-our-future/products/sunny-webbox/index.html)
-
-Example netdata configuration for node.d/sma_webbox.conf
-
-The module supports any number of name servers, like this:
-
-```json
-{
- "enable_autodetect": false,
- "update_every": 5,
- "servers": [
- {
- "name": "plant1",
- "hostname": "10.0.1.1",
- "update_every": 10
- },
- {
- "name": "plant2",
- "hostname": "10.0.2.1",
- "update_every": 15
- }
- ]
-}
-```
diff --git a/conf.d/node.d/snmp.conf.md b/conf.d/node.d/snmp.conf.md deleted file mode 100644 index 6b496f7a8..000000000 --- a/conf.d/node.d/snmp.conf.md +++ /dev/null @@ -1,359 +0,0 @@ -# SNMP Data Collector - -Using this collector, netdata can collect data from any SNMP device. - -This collector supports: - -- any number of SNMP devices -- each SNMP device can be used to collect data for any number of charts -- each chart may have any number of dimensions -- each SNMP device may have a different update frequency -- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches). - -The source code of the plugin is [here](https://github.com/firehol/netdata/blob/master/node.d/snmp.node.js). - -## Configuration - -You will need to create the file `/etc/netdata/node.d/snmp.conf` with data like the following. - -In this example: - - - the SNMP device is `10.11.12.8`. - - the SNMP community is `public`. - - we will update the values every 10 seconds (`update_every: 10` under the server `10.11.12.8`). - - we define 2 charts `snmp_switch.bandwidth_port1` and `snmp_switch.bandwidth_port2`, each having 2 dimensions: `in` and `out`. - -```js -{ - "enable_autodetect": false, - "update_every": 5, - "max_request_size": 100, - "servers": [ - { - "hostname": "10.11.12.8", - "community": "public", - "update_every": 10, - "max_request_size": 50, - "options": { "timeout": 10000 }, - "charts": { - "snmp_switch.bandwidth_port1": { - "title": "Switch Bandwidth for port 1", - "units": "kilobits/s", - "type": "area", - "priority": 1, - "family": "ports", - "dimensions": { - "in": { - "oid": "1.3.6.1.2.1.2.2.1.10.1", - "algorithm": "incremental", - "multiplier": 8, - "divisor": 1024, - "offset": 0 - }, - "out": { - "oid": "1.3.6.1.2.1.2.2.1.16.1", - "algorithm": "incremental", - "multiplier": -8, - "divisor": 1024, - "offset": 0 - } - } - }, - "snmp_switch.bandwidth_port2": { - "title": "Switch Bandwidth for port 2", - "units": "kilobits/s", - "type": "area", - "priority": 1, - "family": "ports", - "dimensions": { - "in": { - "oid": "1.3.6.1.2.1.2.2.1.10.2", - "algorithm": "incremental", - "multiplier": 8, - "divisor": 1024, - "offset": 0 - }, - "out": { - "oid": "1.3.6.1.2.1.2.2.1.16.2", - "algorithm": "incremental", - "multiplier": -8, - "divisor": 1024, - "offset": 0 - } - } - } - } - } - ] -} -``` - -`update_every` is the update frequency for each server, in seconds. - -`max_request_size` limits the maximum number of OIDs that will be requested in a single call. The default is 50. Lower this number of you get `TooBig` errors in netdata error.log. - -`family` sets the name of the submenu of the dashboard each chart will appear under. - -If you need to define many charts using incremental OIDs, you can use something like this: - -This is like the previous, but the option `multiply_range` given, will multiply the current chart from `1` to `24` inclusive, producing 24 charts in total for the 24 ports of the switch `10.11.12.8`. - -Each of the 24 new charts will have its id (1-24) appended at: - -1. its chart unique id, i.e. `snmp_switch.bandwidth_port1` to `snmp_switch.bandwidth_port24` -2. its `title`, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24` -3. its `oid` (for all dimensions), i.e. dimension `in` will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24` -3. its priority (which will be incremented for each chart so that the charts will appear on the dashboard in this order) - -```js -{ - "enable_autodetect": false, - "update_every": 10, - "servers": [ - { - "hostname": "10.11.12.8", - "community": "public", - "update_every": 10, - "options": { "timeout": 20000 }, - "charts": { - "snmp_switch.bandwidth_port": { - "title": "Switch Bandwidth for port ", - "units": "kilobits/s", - "type": "area", - "priority": 1, - "family": "ports", - "multiply_range": [ 1, 24 ], - "dimensions": { - "in": { - "oid": "1.3.6.1.2.1.2.2.1.10.", - "algorithm": "incremental", - "multiplier": 8, - "divisor": 1024, - "offset": 0 - }, - "out": { - "oid": "1.3.6.1.2.1.2.2.1.16.", - "algorithm": "incremental", - "multiplier": -8, - "divisor": 1024, - "offset": 0 - } - } - } - } - } - ] -} -``` - -The `options` given for each server, are: - - - `timeout`, the time to wait for the SNMP device to respond. The default is 5000 ms. - - `version`, the SNMP version to use. `0` is Version 1, `1` is Version 2c. The default is Version 1 (`0`). - - `transport`, the default is `udp4`. - - `port`, the port of the SNMP device to connect to. The default is `161`. - - `retries`, the number of attempts to make to fetch the data. The default is `1`. - -## Retreiving names from snmp - -You can append a value retrieved from SNMP to the title, by adding `titleoid` to the chart. - -You can set a dimension name to a value retrieved from SNMP, by adding `oidname` to the dimension. - -Both of the above will participate in `multiply_range`. - - -## Testing the configuration - -To test it, you can run: - -```sh -/usr/libexec/netdata/plugins.d/node.d.plugin 1 snmp -``` - -The above will run it on your console and you will be able to see what netdata sees, but also errors. You can get a very detailed output by appending `debug` to the command line. - -If it works, restart netdata to activate the snmp collector and refresh the dashboard (if your SNMP device responds with a delay, you may need to refresh the dashboard in a few seconds). - -## Data collection speed - -Keep in mind that many SNMP switches are routers are very slow. They may not be able to report values per second. If you run `node.d.plugin` in `debug` mode, it will report the time it took for the SNMP device to respond. My switch, for example, needs 7-8 seconds to respond for the traffic on 24 ports (48 OIDs, in/out). - -Also, if you use many SNMP clients on the same SNMP device at the same time, values may be skipped. This is a problem of the SNMP device, not this collector. - -## Finding OIDs - -Use `snmpwalk`, like this: - -```sh -snmpwalk -t 20 -v 1 -O fn -c public 10.11.12.8 -``` - -- `-t 20` is the timeout in seconds -- `-v 1` is the SNMP version -- `-O fn` will display full OIDs in numeric format (you may want to run it also without this option to see human readable output of OIDs) -- `-c public` is the SNMP community -- `10.11.12.8` is the SNMP device - -Keep in mind that `snmpwalk` outputs the OIDs with a dot in front them. You should remove this dot when adding OIDs to the configuration file of this collector. - -## Example: Linksys SRW2024P - -This is what I use for my Linksys SRW2024P. It creates: - -1. A chart for power consumption (it is a PoE switch) -2. Two charts for packets received (total packets received and packets received with errors) -3. One chart for packets output -4. 24 charts, one for each port of the switch. It also appends the port names, as defined at the switch, to the chart titles. - -This switch also reports various other metrics, like snmp, packets per port, etc. Unfortunately it does not report CPU utilization or backplane utilization. - -This switch has a very slow SNMP processors. To respond, it needs about 8 seconds, so I have set the refresh frequency (`update_every`) to 15 seconds. - -```js -{ - "enable_autodetect": false, - "update_every": 5, - "servers": [ - { - "hostname": "10.11.12.8", - "community": "public", - "update_every": 15, - "options": { "timeout": 20000, "version": 1 }, - "charts": { - "snmp_switch.power": { - "title": "Switch Power Supply", - "units": "watts", - "type": "line", - "priority": 10, - "family": "power", - "dimensions": { - "supply": { - "oid": ".1.3.6.1.2.1.105.1.3.1.1.2.1", - "algorithm": "absolute", - "multiplier": 1, - "divisor": 1, - "offset": 0 - }, - "used": { - "oid": ".1.3.6.1.2.1.105.1.3.1.1.4.1", - "algorithm": "absolute", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - } - } - , "snmp_switch.input": { - "title": "Switch Packets Input", - "units": "packets/s", - "type": "area", - "priority": 20, - "family": "IP", - "dimensions": { - "receives": { - "oid": ".1.3.6.1.2.1.4.3.0", - "algorithm": "incremental", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - , "discards": { - "oid": ".1.3.6.1.2.1.4.8.0", - "algorithm": "incremental", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - } - } - , "snmp_switch.input_errors": { - "title": "Switch Received Packets with Errors", - "units": "packets/s", - "type": "line", - "priority": 30, - "family": "IP", - "dimensions": { - "bad_header": { - "oid": ".1.3.6.1.2.1.4.4.0", - "algorithm": "incremental", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - , "bad_address": { - "oid": ".1.3.6.1.2.1.4.5.0", - "algorithm": "incremental", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - , "unknown_protocol": { - "oid": ".1.3.6.1.2.1.4.7.0", - "algorithm": "incremental", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - } - } - , "snmp_switch.output": { - "title": "Switch Output Packets", - "units": "packets/s", - "type": "line", - "priority": 40, - "family": "IP", - "dimensions": { - "requests": { - "oid": ".1.3.6.1.2.1.4.10.0", - "algorithm": "incremental", - "multiplier": 1, - "divisor": 1, - "offset": 0 - } - , "discards": { - "oid": ".1.3.6.1.2.1.4.11.0", - "algorithm": "incremental", - "multiplier": -1, - "divisor": 1, - "offset": 0 - } - , "no_route": { - "oid": ".1.3.6.1.2.1.4.12.0", - "algorithm": "incremental", - "multiplier": -1, - "divisor": 1, - "offset": 0 - } - } - } - , "snmp_switch.bandwidth_port": { - "title": "Switch Bandwidth for port ", - "titleoid": ".1.3.6.1.2.1.31.1.1.1.18.", - "units": "kilobits/s", - "type": "area", - "priority": 100, - "family": "ports", - "multiply_range": [ 1, 24 ], - "dimensions": { - "in": { - "oid": ".1.3.6.1.2.1.2.2.1.10.", - "algorithm": "incremental", - "multiplier": 8, - "divisor": 1024, - "offset": 0 - } - , "out": { - "oid": ".1.3.6.1.2.1.2.2.1.16.", - "algorithm": "incremental", - "multiplier": -8, - "divisor": 1024, - "offset": 0 - } - } - } - } - } - ] -} -``` diff --git a/conf.d/node.d/stiebeleltron.conf.md b/conf.d/node.d/stiebeleltron.conf.md deleted file mode 100644 index 6ae5aa1c7..000000000 --- a/conf.d/node.d/stiebeleltron.conf.md +++ /dev/null @@ -1,453 +0,0 @@ -[Stiebel Eltron Heat pump system with ISG](https://www.stiebel-eltron.com/en/home/products-solutions/renewables/controller_energymanagement/internet_servicegateway/isg_web.html) - -Original author: BrainDoctor (github) - -The module supports any metrics that are parseable with RegEx. There is no API that gives direct access to the values (AFAIK), so the "workaround" is to parse the HTML output of the ISG. - -### Testing -This plugin has been tested within the following environment: - * ISG version: 8.5.6 - * MFG version: 12 - * Controller version: 9 - * July (summer time, not much activity) - * Interface language: English - * login- and password-less ISG web access (without HTTPS it's useless anyway) - * Heatpump model: WPL 25 I-2 - * Hot water boiler model: 820 WT 1 - -So, if the language is set to english, copy the following configuration into `/etc/netdata/node.d/stiebeleltron.conf` and change the `url`s. - -In my case, the ISG is relatively slow with responding (at least 1s, but also up to 4s). Collecting metrics every 10s is more than enough for me. - -### How to update the config - -* The dimensions support variable digits, the default is `1`. Most of the values printed by ISG are using 1 digit, some use 2. -* The dimensions also support the `multiplier` and `divisor` attributes, however the divisor gets overridden by `digits`, if specified. Default is `1`. -* The test string for the regex is always the whole HTML output from the url. For each parameter you need to have a regular expression that extracts the value from the HTML source in the first capture group. - Recommended: [regexr.com](regexr.com) for testing and matching, [freeformatter.com](https://www.freeformatter.com/json-escape.html) for escaping the newly created regex for the JSON config. - -The charts are being generated using the configuration below. So if your installation is in another language or has other metrics, just adapt the structure or regexes. -### Configuration template -```json -{ - "enable_autodetect": false, - "update_every": 10, - "pages": [ - { - "name": "System", - "id": "system", - "url": "http://machine.ip.or.dns/?s=1,0", - "update_every": 10, - "categories": [ - { - "id": "eletricreheating", - "name": "electric reheating", - "charts": [ - { - "title": "Dual Mode Reheating Temperature", - "id": "reheatingtemp", - "unit": "Celsius", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Heating", - "id": "dualmodeheatingtemp", - "regex": "DUAL MODE TEMP HEATING<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - }, - { - "name": "Hot Water", - "id" : "dualmodehotwatertemp", - "regex": "DUAL MODE TEMP DHW<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - } - ] - }, - { - "id": "roomtemp", - "name": "room temperature", - "charts": [ - { - "title": "Heat Circuit 1", - "id": "hc1", - "unit": "Celsius", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Actual", - "id": "actual", - "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - }, - { - "name": "Set", - "id" : "set", - "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - } - ] - }, - { - "title": "Heat Circuit 2", - "id": "hc2", - "unit": "Celsius", - "type": "line", - "prio": 2, - "dimensions": [ - { - "name": "Actual", - "id": "actual", - "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - }, - { - "name": "Set", - "id" : "set", - "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - } - ] - } - ] - }, - { - "id": "heating", - "name": "heating", - "charts": [ - { - "title": "Heat Circuit 1", - "id": "hc1", - "unit": "Celsius", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Actual", - "id": "actual", - "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - }, - { - "name": "Set", - "id" : "set", - "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - } - ] - }, - { - "title": "Heat Circuit 2", - "id": "hc2", - "unit": "Celsius", - "type": "line", - "prio": 2, - "dimensions": [ - { - "name": "Actual", - "id": "actual", - "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - }, - { - "name": "Set", - "id" : "set", - "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - } - ] - }, - { - "title": "Flow Temperature", - "id": "flowtemp", - "unit": "Celsius", - "type": "line", - "prio": 3, - "dimensions": [ - { - "name": "Heating", - "id": "heating", - "regex": "ACTUAL FLOW TEMPERATURE WP<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - }, - { - "name": "Reheating", - "id" : "reheating", - "regex": "ACTUAL FLOW TEMPERATURE NHZ<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - }, - { - "title": "Buffer Temperature", - "id": "buffertemp", - "unit": "Celsius", - "type": "line", - "prio": 4, - "dimensions": [ - { - "name": "Actual", - "id": "actual", - "regex": "ACTUAL BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - }, - { - "name": "Set", - "id" : "set", - "regex": "SET BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - }, - { - "title": "Fixed Temperature", - "id": "fixedtemp", - "unit": "Celsius", - "type": "line", - "prio": 5, - "dimensions": [ - { - "name": "Set", - "id" : "setfixed", - "regex": "SET FIXED TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - }, - { - "title": "Pre-flow Temperature", - "id": "preflowtemp", - "unit": "Celsius", - "type": "line", - "prio": 6, - "dimensions": [ - { - "name": "Actual", - "id": "actualreturn", - "regex": "ACTUAL RETURN TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - } - ] - }, - { - "id": "hotwater", - "name": "hot water", - "charts": [ - { - "title": "Hot Water Temperature", - "id": "hotwatertemp", - "unit": "Celsius", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Actual", - "id": "actual", - "regex": "ACTUAL TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - }, - { - "name": "Set", - "id" : "set", - "regex": "SET TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - } - ] - }, - { - "id": "general", - "name": "general", - "charts": [ - { - "title": "Outside Temperature", - "id": "outside", - "unit": "Celsius", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Outside temperature", - "id": "outsidetemp", - "regex": "OUTSIDE TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>" - } - ] - }, - { - "title": "Condenser Temperature", - "id": "condenser", - "unit": "Celsius", - "type": "line", - "prio": 2, - "dimensions": [ - { - "name": "Condenser", - "id": "condenser", - "regex": "CONDENSER TEMP\\.<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - }, - { - "title": "Heating Circuit Pressure", - "id": "heatingcircuit", - "unit": "bar", - "type": "line", - "prio": 3, - "dimensions": [ - { - "name": "Heating Circuit", - "id": "heatingcircuit", - "digits": 2, - "regex": "PRESSURE HTG CIRC<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]*).*<\\\/td>" - } - ] - }, - { - "title": "Flow Rate", - "id": "flowrate", - "unit": "liters/min", - "type": "line", - "prio": 4, - "dimensions": [ - { - "name": "Flow Rate", - "id": "flowrate", - "digits": 2, - "regex": "FLOW RATE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>" - } - ] - }, - { - "title": "Output", - "id": "output", - "unit": "%", - "type": "line", - "prio": 5, - "dimensions": [ - { - "name": "Heat Pump", - "id": "outputheatpump", - "regex": "OUTPUT HP<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>" - }, - { - "name": "Water Pump", - "id": "intpumprate", - "regex": "INT PUMP RATE<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>" - } - ] - } - ] - } - ] - }, - { - "name": "Heat Pump", - "id": "heatpump", - "url": "http://machine.ip.or.dns/?s=1,1", - "update_every": 10, - "categories": [ - { - "id": "runtime", - "name": "runtime", - "charts": [ - { - "title": "Compressor", - "id": "compressor", - "unit": "h", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Heating", - "id": "heating", - "regex": "RNT COMP 1 HEA<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - }, - { - "name": "Hot Water", - "id" : "hotwater", - "regex": "RNT COMP 1 DHW<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - } - ] - }, - { - "title": "Reheating", - "id": "reheating", - "unit": "h", - "type": "line", - "prio": 2, - "dimensions": [ - { - "name": "Reheating 1", - "id": "rh1", - "regex": "BH 1<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - }, - { - "name": "Reheating 2", - "id" : "rh2", - "regex": "BH 2<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - } - ] - } - ] - }, - { - "id": "processdata", - "name": "process data", - "charts": [ - { - "title": "Remaining Compressor Rest Time", - "id": "remaincomp", - "unit": "s", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Timer", - "id": "timer", - "regex": "COMP DLAY CNTR<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - } - ] - } - ] - }, - { - "id": "energy", - "name": "energy", - "charts": [ - { - "title": "Compressor Today", - "id": "compressorday", - "unit": "kWh", - "type": "line", - "prio": 1, - "dimensions": [ - { - "name": "Heating", - "id": "heating", - "digits": 3, - "regex": "COMPRESSOR HEATING DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - }, - { - "name": "Hot Water", - "id": "hotwater", - "digits": 3, - "regex": "COMPRESSOR DHW DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - } - ] - }, - { - "title": "Compressor Total", - "id": "compressortotal", - "unit": "MWh", - "type": "line", - "prio": 2, - "dimensions": [ - { - "name": "Heating", - "id": "heating", - "digits": 3, - "regex": "COMPRESSOR HEATING TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - }, - { - "name": "Hot Water", - "id": "hotwater", - "digits": 3, - "regex": "COMPRESSOR DHW TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)" - } - ] - } - ] - } - ] - } - ] -} -``` diff --git a/conf.d/python.d.conf b/conf.d/python.d.conf deleted file mode 100644 index bb57738bb..000000000 --- a/conf.d/python.d.conf +++ /dev/null @@ -1,79 +0,0 @@ -# netdata python.d.plugin configuration -# -# This file is in YaML format. -# Generally the format is: -# -# name: value -# - -# Enable / disable the whole python.d.plugin (all its modules) -enabled: yes - -# ---------------------------------------------------------------------- -# Enable / Disable python.d.plugin modules -#default_run: yes -# -# If "default_run" = "yes" the default for all modules is enabled (yes). -# Setting any of these to "no" will disable it. -# -# If "default_run" = "no" the default for all modules is disabled (no). -# Setting any of these to "yes" will enable it. - -# apache_cache has been replaced by web_log -apache_cache: no -# apache: yes -# beanstalk: yes -# bind_rndc: yes -# ceph: yes -chrony: no -# couchdb: yes -# cpufreq: yes -# cpuidle: yes -# dns_query_time: yes -# dnsdist: yes -# dovecot: yes -# elasticsearch: yes - -# this is just an example -example: no - -# exim: yes -# fail2ban: yes -# freeradius: yes - -# gunicorn_log has been replaced by web_log -gunicorn_log: no -go_expvar: no -# haproxy: yes -# hddtemp: yes -# icecast: yes -# ipfs: yes -# isc_dhcpd: yes -# mdstat: yes -# memcached: yes -# mongodb: yes -# mysql: yes -# nginx: yes -# nginx_plus: yes -# nsd: yes -# ntpd: yes - -# nginx_log has been replaced by web_log -nginx_log: no -# ntpd: yes -# ovpn_status_log: yes -# phpfpm: yes -# postfix: yes -# postgres: yes -# powerdns: yes -# rabbitmq: yes -# redis: yes -# retroshare: yes -# sensors: yes -# samba: yes -# smartd_log: yes -# squid: yes -# springboot: yes -# tomcat: yes -# varnish: yes -# web_log: yes diff --git a/conf.d/python.d/apache.conf b/conf.d/python.d/apache.conf deleted file mode 100644 index 3bbc3f786..000000000 --- a/conf.d/python.d/apache.conf +++ /dev/null @@ -1,87 +0,0 @@ -# netdata python.d.plugin configuration for apache -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, apache also supports the following: -# -# url: 'URL' # the URL to fetch apache's mod_status stats -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : 'http://localhost/server-status?auto' - -localipv4: - name : 'local' - url : 'http://127.0.0.1/server-status?auto' - -localipv6: - name : 'local' - url : 'http://::1/server-status?auto' diff --git a/conf.d/python.d/beanstalk.conf b/conf.d/python.d/beanstalk.conf deleted file mode 100644 index 940801877..000000000 --- a/conf.d/python.d/beanstalk.conf +++ /dev/null @@ -1,80 +0,0 @@ -# netdata python.d.plugin configuration for beanstalk -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# chart_cleanup sets the default chart cleanup interval in iterations. -# A chart is marked as obsolete if it has not been updated -# 'chart_cleanup' iterations in a row. -# When a plugin sends the obsolete flag, the charts are not deleted -# from netdata immediately. -# They will be hidden immediately (not offered to dashboard viewer, -# streamed upstream and archived to backends) and deleted one hour -# later (configurable from netdata.conf). -# chart_cleanup: 10 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# chart_cleanup: 10 # the JOB's chart cleanup interval in iterations -# -# Additionally to the above, apache also supports the following: -# -# host: 'host' # Server ip address or hostname. Default: 127.0.0.1 -# port: port # Beanstalkd port. Default: -# -# ---------------------------------------------------------------------- diff --git a/conf.d/python.d/bind_rndc.conf b/conf.d/python.d/bind_rndc.conf deleted file mode 100644 index 71958ff98..000000000 --- a/conf.d/python.d/bind_rndc.conf +++ /dev/null @@ -1,112 +0,0 @@ -# netdata python.d.plugin configuration for bind_rndc -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, bind_rndc also supports the following: -# -# named_stats_path: 'path to named.stats' # Default: '/var/log/bind/named.stats' -#------------------------------------------------------------------------------------------------------------------ -# IMPORTANT Information -# -# BIND APPEND logs at EVERY RUN. Its NOT RECOMMENDED to set update_every below 30 sec. -# STRONGLY RECOMMENDED to create a bind-rndc conf file for logrotate -# -# To set up your BIND to dump stats do the following: -# -# 1. add to 'named.conf.options' options {}: -# statistics-file "/var/log/bind/named.stats"; -# -# 2. Create bind/ directory in /var/log -# cd /var/log/ && mkdir bind -# -# 3. Change owner of directory to 'bind' user -# chown bind bind/ -# -# 4. RELOAD (NOT restart) BIND -# systemctl reload bind9.service -# -# 5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory) -# -# -# To ALLOW NETDATA TO RUN 'rndc stats' change '/etc/bind/rndc.key' group to netdata -# chown :netdata rndc.key -# -# The last BUT NOT least is to create bind-rndc.conf in logrotate.d/ -# The working one -# /var/log/bind/named.stats { -# -# daily -# rotate 4 -# compress -# delaycompress -# create 0644 bind bind -# missingok -# postrotate -# rndc reload > /dev/null -# endscript -# } -# -# To test your logrotate conf file run as root: -# -# logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode) -# -# ---------------------------------------------------------------------- diff --git a/conf.d/python.d/ceph.conf b/conf.d/python.d/ceph.conf deleted file mode 100644 index 78ac1e251..000000000 --- a/conf.d/python.d/ceph.conf +++ /dev/null @@ -1,75 +0,0 @@ -# netdata python.d.plugin configuration for ceph stats -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 10 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 10 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, ceph plugin also supports the following: -# -# config_file: 'config_file' # Ceph config file. -# keyring_file: 'keyring_file' # Ceph keyring file. netdata user must be added into ceph group -# # and keyring file must be read group permission. -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) -# -config_file: '/etc/ceph/ceph.conf' -keyring_file: '/etc/ceph/ceph.client.admin.keyring' - diff --git a/conf.d/python.d/chrony.conf b/conf.d/python.d/chrony.conf deleted file mode 100644 index 9ac906b5f..000000000 --- a/conf.d/python.d/chrony.conf +++ /dev/null @@ -1,79 +0,0 @@ -# netdata python.d.plugin configuration for chrony -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -update_every: 5 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, chrony also supports the following: -# -# command: 'chrony tracking' # the command to run -# - -# ---------------------------------------------------------------------- -# REQUIRED chrony CONFIGURATION -# -# netdata will query chrony as user netdata. -# verify that user netdata is allowed to call 'chronyc tracking' -# Check cmdallow in chrony.conf -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS - -local: - command: 'chronyc -n tracking' diff --git a/conf.d/python.d/couchdb.conf b/conf.d/python.d/couchdb.conf deleted file mode 100644 index 5f6e75cff..000000000 --- a/conf.d/python.d/couchdb.conf +++ /dev/null @@ -1,91 +0,0 @@ -# netdata python.d.plugin configuration for couchdb -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# By default, CouchDB only updates its stats every 10 seconds. -update_every: 10 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, the couchdb plugin also supports the following: -# -# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1 -# port: 'port' # CouchDB port. Default: 15672 -# scheme: 'scheme' # http or https. Default: http -# node: 'couchdb@127.0.0.1' # CouchDB node name. Same as -name vm.args argument. -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# if db-specific stats are desired, place their names in databases: -# databases: 'npm-registry animaldb' -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) -# -localhost: - name: 'local' - host: '127.0.0.1' - port: '5984' - node: 'couchdb@127.0.0.1' - scheme: 'http' -# user: 'admin' -# pass: 'password' diff --git a/conf.d/python.d/cpufreq.conf b/conf.d/python.d/cpufreq.conf deleted file mode 100644 index 0890245d9..000000000 --- a/conf.d/python.d/cpufreq.conf +++ /dev/null @@ -1,43 +0,0 @@ -# netdata python.d.plugin configuration for cpufreq -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# The directory to search for the file scaling_cur_freq -sys_dir: "/sys/devices" diff --git a/conf.d/python.d/dns_query_time.conf b/conf.d/python.d/dns_query_time.conf deleted file mode 100644 index d32c6db83..000000000 --- a/conf.d/python.d/dns_query_time.conf +++ /dev/null @@ -1,71 +0,0 @@ -# netdata python.d.plugin configuration for dns_query_time -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, dns_query_time also supports the following: -# -# dns_servers: 'dns servers' # List of dns servers to query -# domains: 'domains' # List of domains -# aggregate: yes/no # Aggregate all servers in one chart or not -# response_timeout: 4 # Dns query response timeout (query = -100 if response time > response_time) -# -# ----------------------------------------------------------------------
\ No newline at end of file diff --git a/conf.d/python.d/dnsdist.conf b/conf.d/python.d/dnsdist.conf deleted file mode 100644 index aec58b8e1..000000000 --- a/conf.d/python.d/dnsdist.conf +++ /dev/null @@ -1,85 +0,0 @@ -# netdata python.d.plugin configuration for dnsdist -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -#update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -#retries: 600000 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -#autodetection_retry: 1 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# -# Additionally to the above, dnsdist also supports the following: -# -# url: 'URL' # the URL to fetch dnsdist performance statistics -# user: 'username' # username for basic auth -# pass: 'password' # password for basic auth -# header: -# X-API-Key: 'Key' # API key -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -# localhost: -# name : 'local' -# url : 'http://127.0.0.1:5053/jsonstat?command=stats' -# user : 'username' -# pass : 'password' -# header: -# X-API-Key: 'dnsdist-api-key' - - diff --git a/conf.d/python.d/dovecot.conf b/conf.d/python.d/dovecot.conf deleted file mode 100644 index 56c394991..000000000 --- a/conf.d/python.d/dovecot.conf +++ /dev/null @@ -1,96 +0,0 @@ -# netdata python.d.plugin configuration for dovecot -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, dovecot also supports the following: -# -# socket: 'path/to/dovecot/stats' -# -# or -# host: 'IP or HOSTNAME' # the host to connect to -# port: PORT # the port to connect to -# -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - host : 'localhost' - port : 24242 - -localipv4: - name : 'local' - host : '127.0.0.1' - port : 24242 - -localipv6: - name : 'local' - host : '::1' - port : 24242 - -localsocket: - name : 'local' - socket : '/var/run/dovecot/stats' - diff --git a/conf.d/python.d/elasticsearch.conf b/conf.d/python.d/elasticsearch.conf deleted file mode 100644 index 213843bf9..000000000 --- a/conf.d/python.d/elasticsearch.conf +++ /dev/null @@ -1,83 +0,0 @@ -# netdata python.d.plugin configuration for elasticsearch stats -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, elasticsearch plugin also supports the following: -# -# host: 'ipaddress' # Server ip address or hostname. -# port: 'port' # Port on which elasticsearch listen. -# cluster_health: False/True # Calls to cluster health elasticsearch API. Enabled by default. -# cluster_stats: False/True # Calls to cluster stats elasticsearch API. Enabled by default. -# -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) -# -local: - host: '127.0.0.1' - port: '9200' diff --git a/conf.d/python.d/example.conf b/conf.d/python.d/example.conf deleted file mode 100644 index e7fed9b50..000000000 --- a/conf.d/python.d/example.conf +++ /dev/null @@ -1,70 +0,0 @@ -# netdata python.d.plugin configuration for example -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, example also supports the following: -# -# - none -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) diff --git a/conf.d/python.d/exim.conf b/conf.d/python.d/exim.conf deleted file mode 100644 index 2add7b2cb..000000000 --- a/conf.d/python.d/exim.conf +++ /dev/null @@ -1,93 +0,0 @@ -# netdata python.d.plugin configuration for exim -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# exim is slow, so once every 10 seconds -update_every: 10 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, exim also supports the following: -# -# command: 'exim -bpc' # the command to run -# - -# ---------------------------------------------------------------------- -# REQUIRED exim CONFIGURATION -# -# netdata will query exim as user netdata. -# By default exim will refuse to respond. -# -# To allow querying exim as non-admin user, please set the following -# to your exim configuration: -# -# queue_list_requires_admin = false -# -# Your exim configuration should be in -# -# /etc/exim/exim4.conf -# or -# /etc/exim4/conf.d/main/000_local_options -# -# Please consult your distribution information to find the exact file. - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS - -local: - command: 'exim -bpc' diff --git a/conf.d/python.d/fail2ban.conf b/conf.d/python.d/fail2ban.conf deleted file mode 100644 index 60ca87231..000000000 --- a/conf.d/python.d/fail2ban.conf +++ /dev/null @@ -1,70 +0,0 @@ -# netdata python.d.plugin configuration for fail2ban -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, fail2ban also supports the following: -# -# log_path: 'path to fail2ban.log' # Default: '/var/log/fail2ban.log' -# conf_path: 'path to jail.local/jail.conf' # Default: '/etc/fail2ban/jail.local' -# conf_dir: 'path to jail.d/' # Default: '/etc/fail2ban/jail.d/' -# exclude: 'jails you want to exclude from autodetection' # Default: none -#------------------------------------------------------------------------------------------------------------------ diff --git a/conf.d/python.d/freeradius.conf b/conf.d/python.d/freeradius.conf deleted file mode 100644 index 3336d4c49..000000000 --- a/conf.d/python.d/freeradius.conf +++ /dev/null @@ -1,82 +0,0 @@ -# netdata python.d.plugin configuration for freeradius -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, freeradius also supports the following: -# -# host: 'host' # Default: 'localhost'. Server ip address or hostname. -# port: 'port' # Default: '18121'. Port on which freeradius server listen (type = status). -# secret: 'secret' # Default: 'adminsecret'. -# acct: yes/no # Default: no. Freeradius accounting statistics. -# proxy_auth: yes/no # Default: no. Freeradius proxy authentication statistics. -# proxy_acct: yes/no # Default: no. Freeradius proxy accounting statistics. -# -# ------------------------------------------------------------------------------------------------------------------ -# Freeradius server configuration: -# The configuration for the status server is automatically created in the sites-available directory. -# By default, server is enabled and can be queried from every client. -# FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled. -# To do this, create a link from the sites-enabled directory to the status file in the sites-available directory: -# cd sites-enabled -# ln -s ../sites-available/status status -# and restart/reload your FREERADIUS server. -# ------------------------------------------------------------------------------------------------------------------ diff --git a/conf.d/python.d/go_expvar.conf b/conf.d/python.d/go_expvar.conf deleted file mode 100644 index c352b1674..000000000 --- a/conf.d/python.d/go_expvar.conf +++ /dev/null @@ -1,110 +0,0 @@ -# netdata python.d.plugin configuration for go_expvar -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, this plugin also supports the following: -# -# url: 'http://127.0.0.1/debug/vars' # the URL of the expvar endpoint -# -# As the plugin cannot possibly know the port your application listens on, there is no default value. Please include -# the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# collect_memstats: true # enables charts for Go runtime's memory statistics -# extra_charts: {} # defines extra data/charts to monitor, please see the example below -# -# If collect_memstats is disabled and no extra charts are defined, this module will disable itself, as it has no data to -# collect. -# -# Please visit the module wiki page for more information on how to use the extra_charts variable: -# -# https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications#monitoring-custom-vars-with-go_expvar -# -# Configuration example -# --------------------- - -#app1: -# name : 'app1' -# url : 'http://127.0.0.1:8080/debug/vars' -# collect_memstats: true -# extra_charts: -# - id: "runtime_goroutines" -# options: -# name: num_goroutines -# title: "runtime: number of goroutines" -# units: goroutines -# family: runtime -# context: expvar.runtime.goroutines -# chart_type: line -# lines: -# - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines} -# - id: "foo_counters" -# options: -# name: counters -# title: "some random counters" -# units: awesomeness -# family: counters -# context: expvar.foo.counters -# chart_type: line -# lines: -# - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1} -# - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2} - diff --git a/conf.d/python.d/haproxy.conf b/conf.d/python.d/haproxy.conf deleted file mode 100644 index a40dd76a5..000000000 --- a/conf.d/python.d/haproxy.conf +++ /dev/null @@ -1,85 +0,0 @@ -# netdata python.d.plugin configuration for haproxy -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, haproxy also supports the following: -# -# IMPORTANT: socket MUST BE readable AND writable by netdata user -# -# socket: 'path/to/haproxy/sock' -# -# OR -# url: 'http://<ip.address>:<port>/<url>;csv;norefresh' -# [user: USERNAME] only if stats auth is used -# [pass: PASSWORD] only if stats auth is used - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -#via_url: -# user : 'admin' -# pass : 'password' -# url : 'http://127.0.0.1:7000/haproxy_stats;csv;norefresh' - -#via_socket: -# socket: '/var/run/haproxy/admin.sock' diff --git a/conf.d/python.d/hddtemp.conf b/conf.d/python.d/hddtemp.conf deleted file mode 100644 index 9165798a2..000000000 --- a/conf.d/python.d/hddtemp.conf +++ /dev/null @@ -1,97 +0,0 @@ -# netdata python.d.plugin configuration for hddtemp -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, hddtemp also supports the following: -# -# host: 'IP or HOSTNAME' # the host to connect to -# port: PORT # the port to connect to -# - -# By default this module will try to autodetect disks -# (autodetection works only for disk which names start with "sd"). -# However this can be overridden by setting variable `disks` to -# array of desired disks. Example for two disks: -# -# devices: -# - sda -# - sdb -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name: 'local' - host: 'localhost' - port: 7634 - -localipv4: - name: 'local' - host: '127.0.0.1' - port: 7634 - -localipv6: - name: 'local' - host: '::1' - port: 7634 diff --git a/conf.d/python.d/httpcheck.conf b/conf.d/python.d/httpcheck.conf deleted file mode 100644 index 058e057a6..000000000 --- a/conf.d/python.d/httpcheck.conf +++ /dev/null @@ -1,99 +0,0 @@ -# netdata python.d.plugin configuration for httpcheck -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the httpcheck default is used, which is at 3 seconds. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# chart_cleanup sets the default chart cleanup interval in iterations. -# A chart is marked as obsolete if it has not been updated -# 'chart_cleanup' iterations in a row. -# They will be hidden immediately (not offered to dashboard viewer, -# streamed upstream and archived to backends) and deleted one hour -# later (configurable from netdata.conf). -# -- For this plugin, cleanup MUST be disabled, otherwise we lose response -# time charts -chart_cleanup: 0 - -# Autodetection and retries do not work for this plugin - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# ------------------------------- -# ATTENTION: Any valid configuration will be accepted, even if initial connection fails! -# ------------------------------- -# -# There is intentionally no default config, e.g. for 'localhost' - -# job_name: -# name: myname # [optional] the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 3 # [optional] the JOB's data collection frequency -# priority: 60000 # [optional] the JOB's order on the dashboard -# retries: 60 # [optional] the JOB's number of restoration attempts -# timeout: 1 # [optional] the timeout when connecting, supports decimals (e.g. 0.5s) -# url: 'http[s]://host-ip-or-dns[:port][path]' -# # [required] the remote host url to connect to. If [:port] is missing, it defaults to 80 -# # for HTTP and 443 for HTTPS. [path] is optional too, defaults to / -# redirect: yes # [optional] If the remote host returns 3xx status codes, the redirection url will be -# # followed (default). -# status_accepted: # [optional] By default, 200 is accepted. Anything else will result in 'bad status' in the -# # status chart, however: The response time will still be > 0, since the -# # host responded with something. -# # If redirect is enabled, the accepted status will be checked against the redirected page. -# - 200 # Multiple status codes are possible. If you specify 'status_accepted', you would still -# # need to add '200'. E.g. 'status_accepted: [301]' will trigger an error in 'bad status' -# # if code is 200. Do specify numerical entries such as 200, not 'OK'. -# regex: None # [optional] If the status code is accepted, the content of the response will be searched for this -# # regex (if defined). Be aware that you may need to escape the regex string. If redirect is enabled, -# # the regex will be matched to the redirected page, not the initial 3xx response. - -# Simple example: -# -# jira: -# url: 'https://jira.localdomain/' - - -# Complex example: -# -# cool_website: -# url: 'http://cool.website:8080/home' -# status_accepted: -# - 200 -# - 204 -# regex: <title>My cool website!<\/title> -# timeout: 2 - -# This plugin is intended for simple cases. Currently, the accuracy of the response time is low and should be used as reference only. - diff --git a/conf.d/python.d/icecast.conf b/conf.d/python.d/icecast.conf deleted file mode 100644 index a900d06d3..000000000 --- a/conf.d/python.d/icecast.conf +++ /dev/null @@ -1,83 +0,0 @@ -# netdata python.d.plugin configuration for icecast -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, icecast also supports the following: -# -# url: 'URL' # the URL to fetch icecast's stats -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : 'http://localhost:8443/status-json.xsl' - -localipv4: - name : 'local' - url : 'http://127.0.0.1:8443/status-json.xsl'
\ No newline at end of file diff --git a/conf.d/python.d/ipfs.conf b/conf.d/python.d/ipfs.conf deleted file mode 100644 index c247c1b7a..000000000 --- a/conf.d/python.d/ipfs.conf +++ /dev/null @@ -1,74 +0,0 @@ -# netdata python.d.plugin configuration for ipfs -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, ipfs also supports the following: -# -# url: 'URL' # URL to the IPFS API -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : 'http://localhost:5001' diff --git a/conf.d/python.d/isc_dhcpd.conf b/conf.d/python.d/isc_dhcpd.conf deleted file mode 100644 index 4a4c4a5e3..000000000 --- a/conf.d/python.d/isc_dhcpd.conf +++ /dev/null @@ -1,81 +0,0 @@ -# netdata python.d.plugin configuration for isc dhcpd leases -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, isc_dhcpd supports the following: -# -# leases_path: 'PATH' # the path to dhcpd.leases file -# pools: -# office: '192.168.2.0/24' # name(dimension): pool in CIDR format -# wifi: '192.168.3.0/24' # name(dimension): pool in CIDR format -# 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format -# -#----------------------------------------------------------------------- -# IMPORTANT notes -# -# 1. Make sure leases file is readable by netdata. -# 2. Current implementation works only with 'default' db-time-format -# (weekday year/month/day hour:minute:second). -# This is the default, so it will work in most cases. -# 3. Pools MUST BE in CIDR format. -# -# ---------------------------------------------------------------------- diff --git a/conf.d/python.d/mdstat.conf b/conf.d/python.d/mdstat.conf deleted file mode 100644 index 66a2f153c..000000000 --- a/conf.d/python.d/mdstat.conf +++ /dev/null @@ -1,32 +0,0 @@ -# netdata python.d.plugin configuration for mdstat -# -# This file is in YaML format. Generally the format is: -# -# name: value -# - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 diff --git a/conf.d/python.d/memcached.conf b/conf.d/python.d/memcached.conf deleted file mode 100644 index 85c3daf65..000000000 --- a/conf.d/python.d/memcached.conf +++ /dev/null @@ -1,92 +0,0 @@ -# netdata python.d.plugin configuration for memcached -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, memcached also supports the following: -# -# socket: 'path/to/memcached.sock' -# -# or -# host: 'IP or HOSTNAME' # the host to connect to -# port: PORT # the port to connect to -# -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - host : 'localhost' - port : 11211 - -localipv4: - name : 'local' - host : '127.0.0.1' - port : 11211 - -localipv6: - name : 'local' - host : '::1' - port : 11211 - diff --git a/conf.d/python.d/mongodb.conf b/conf.d/python.d/mongodb.conf deleted file mode 100644 index 62faef68d..000000000 --- a/conf.d/python.d/mongodb.conf +++ /dev/null @@ -1,84 +0,0 @@ -# netdata python.d.plugin configuration for mongodb -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, mongodb also supports the following: -# -# host: 'IP or HOSTNAME' # type <str> the host to connect to -# port: PORT # type <int> the port to connect to -# -# in all cases, the following can also be set: -# -# user: 'username' # the mongodb username to use -# pass: 'password' # the mongodb password to use -# - -# ---------------------------------------------------------------------- -# to connect to the mongodb on localhost, without a password: -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -local: - name : 'local' - host : '127.0.0.1' - port : 27017 diff --git a/conf.d/python.d/mysql.conf b/conf.d/python.d/mysql.conf deleted file mode 100644 index b5956a2c6..000000000 --- a/conf.d/python.d/mysql.conf +++ /dev/null @@ -1,286 +0,0 @@ -# netdata python.d.plugin configuration for mysql -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, mysql also supports the following: -# -# socket: 'path/to/mysql.sock' -# -# or -# host: 'IP or HOSTNAME' # the host to connect to -# port: PORT # the port to connect to -# -# in all cases, the following can also be set: -# -# user: 'username' # the mysql username to use -# pass: 'password' # the mysql password to use -# - -# ---------------------------------------------------------------------- -# mySQL CONFIGURATION -# -# netdata does not need any privilege - only the ability to connect -# to the mysql server (netdata will not be able to see any data). -# -# Execute these commands to give the local user 'netdata' the ability -# to connect to the mysql server on localhost, without a password: -# -# > create user 'netdata'@'localhost'; -# > grant usage on *.* to 'netdata'@'localhost'; -# > flush privileges; -# -# with the above statements, netdata will be able to gather mysql -# statistics, without the ability to see or alter any data or affect -# mysql operation in any way. No change is required below. -# -# If you need to monitor mysql replication too, use this instead: -# -# > create user 'netdata'@'localhost'; -# > grant replication client on *.* to 'netdata'@'localhost'; -# > flush privileges; -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -mycnf1: - name : 'local' - 'my.cnf' : '/etc/my.cnf' - -mycnf2: - name : 'local' - 'my.cnf' : '/etc/mysql/my.cnf' - -debiancnf: - name : 'local' - 'my.cnf' : '/etc/mysql/debian.cnf' - -socket1: - name : 'local' - # user : '' - # pass : '' - socket : '/var/run/mysqld/mysqld.sock' - -socket2: - name : 'local' - # user : '' - # pass : '' - socket : '/var/run/mysqld/mysql.sock' - -socket3: - name : 'local' - # user : '' - # pass : '' - socket : '/var/lib/mysql/mysql.sock' - -socket4: - name : 'local' - # user : '' - # pass : '' - socket : '/tmp/mysql.sock' - -tcp: - name : 'local' - # user : '' - # pass : '' - host : 'localhost' - port : '3306' - # keep in mind port might be ignored by mysql, if host = 'localhost' - # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844 - -tcpipv4: - name : 'local' - # user : '' - # pass : '' - host : '127.0.0.1' - port : '3306' - -tcpipv6: - name : 'local' - # user : '' - # pass : '' - host : '::1' - port : '3306' - - -# Now we try the same as above with user: root -# A few systems configure mysql to accept passwordless -# root access. - -mycnf1_root: - name : 'local' - user : 'root' - 'my.cnf' : '/etc/my.cnf' - -mycnf2_root: - name : 'local' - user : 'root' - 'my.cnf' : '/etc/mysql/my.cnf' - -socket1_root: - name : 'local' - user : 'root' - # pass : '' - socket : '/var/run/mysqld/mysqld.sock' - -socket2_root: - name : 'local' - user : 'root' - # pass : '' - socket : '/var/run/mysqld/mysql.sock' - -socket3_root: - name : 'local' - user : 'root' - # pass : '' - socket : '/var/lib/mysql/mysql.sock' - -socket4_root: - name : 'local' - user : 'root' - # pass : '' - socket : '/tmp/mysql.sock' - -tcp_root: - name : 'local' - user : 'root' - # pass : '' - host : 'localhost' - port : '3306' - # keep in mind port might be ignored by mysql, if host = 'localhost' - # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844 - -tcpipv4_root: - name : 'local' - user : 'root' - # pass : '' - host : '127.0.0.1' - port : '3306' - -tcpipv6_root: - name : 'local' - user : 'root' - # pass : '' - host : '::1' - port : '3306' - - -# Now we try the same as above with user: netdata - -mycnf1_netdata: - name : 'local' - user : 'netdata' - 'my.cnf' : '/etc/my.cnf' - -mycnf2_netdata: - name : 'local' - user : 'netdata' - 'my.cnf' : '/etc/mysql/my.cnf' - -socket1_netdata: - name : 'local' - user : 'netdata' - # pass : '' - socket : '/var/run/mysqld/mysqld.sock' - -socket2_netdata: - name : 'local' - user : 'netdata' - # pass : '' - socket : '/var/run/mysqld/mysql.sock' - -socket3_netdata: - name : 'local' - user : 'netdata' - # pass : '' - socket : '/var/lib/mysql/mysql.sock' - -socket4_netdata: - name : 'local' - user : 'netdata' - # pass : '' - socket : '/tmp/mysql.sock' - -tcp_netdata: - name : 'local' - user : 'netdata' - # pass : '' - host : 'localhost' - port : '3306' - # keep in mind port might be ignored by mysql, if host = 'localhost' - # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844 - -tcpipv4_netdata: - name : 'local' - user : 'netdata' - # pass : '' - host : '127.0.0.1' - port : '3306' - -tcpipv6_netdata: - name : 'local' - user : 'netdata' - # pass : '' - host : '::1' - port : '3306' - diff --git a/conf.d/python.d/nginx.conf b/conf.d/python.d/nginx.conf deleted file mode 100644 index 71c521066..000000000 --- a/conf.d/python.d/nginx.conf +++ /dev/null @@ -1,109 +0,0 @@ -# netdata python.d.plugin configuration for nginx -# -# You must have ngx_http_stub_status_module configured on your nginx server for this -# plugin to work. The following is an example config. -# It must be located inside a server { } block. -# -# location /stub_status { -# stub_status; -# # Security: Only allow access from the IP below. -# allow 192.168.1.200; -# # Deny anyone else -# deny all; -# } -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, this plugin also supports the following: -# -# url: 'URL' # the URL to fetch nginx's status stats -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# Example -# -# RemoteNginx: -# name : 'Reverse_Proxy' -# url : 'http://yourdomain.com/stub_status' -# -# "RemoteNginx" will show up in Netdata logs. "Reverse Proxy" will show up in the menu -# in the nginx section. - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : 'http://localhost/stub_status' - -localipv4: - name : 'local' - url : 'http://127.0.0.1/stub_status' - -localipv6: - name : 'local' - url : 'http://[::1]/stub_status' - diff --git a/conf.d/python.d/nginx_plus.conf b/conf.d/python.d/nginx_plus.conf deleted file mode 100644 index 7b5c8f43f..000000000 --- a/conf.d/python.d/nginx_plus.conf +++ /dev/null @@ -1,87 +0,0 @@ -# netdata python.d.plugin configuration for nginx_plus -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, nginx_plus also supports the following: -# -# url: 'URL' # the URL to fetch nginx_plus's stats -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : 'http://localhost/status' - -localipv4: - name : 'local' - url : 'http://127.0.0.1/status' - -localipv6: - name : 'local' - url : 'http://[::1]/status' diff --git a/conf.d/python.d/nsd.conf b/conf.d/python.d/nsd.conf deleted file mode 100644 index 078e97216..000000000 --- a/conf.d/python.d/nsd.conf +++ /dev/null @@ -1,93 +0,0 @@ -# netdata python.d.plugin configuration for nsd -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# nsd-control is slow, so once every 30 seconds -# update_every: 30 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, nsd also supports the following: -# -# command: 'nsd-control stats_noreset' # the command to run -# - -# ---------------------------------------------------------------------- -# IMPORTANT Information -# -# Netdata must have permissions to run `nsd-control stats_noreset` command -# -# - Example-1 (use "sudo") -# 1. sudoers (e.g. visudo -f /etc/sudoers.d/netdata) -# Defaults:netdata !requiretty -# netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset -# 2. etc/netdata/python.d/nsd.conf -# local: -# update_every: 30 -# command: 'sudo /usr/sbin/nsd-control stats_noreset' -# -# - Example-2 (add "netdata" user to "nsd" group) -# usermod -aG nsd netdata -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS - -local: - update_every: 30 - command: 'nsd-control stats_noreset' diff --git a/conf.d/python.d/ntpd.conf b/conf.d/python.d/ntpd.conf deleted file mode 100644 index 7adc4074b..000000000 --- a/conf.d/python.d/ntpd.conf +++ /dev/null @@ -1,91 +0,0 @@ -# netdata python.d.plugin configuration for ntpd -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# -# Additionally to the above, ntp also supports the following: -# -# host: 'localhost' # the host to query -# port: '123' # the UDP port where `ntpd` listens -# show_peers: no # use `yes` to show peer charts. enabling this -# # option is recommended only for debugging, as -# # it could possibly imply memory leaks if the -# # peers change frequently. -# peer_filter: '127\..*' # regex to exclude peers -# # by default local peers are hidden -# # use `''` to show all peers. -# peer_rescan: 60 # interval (>0) to check for new/changed peers -# # use `1` to check on every update -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name: 'local' - host: 'localhost' - port: '123' - show_peers: no - -localhost_ipv4: - name: 'local' - host: '127.0.0.1' - port: '123' - show_peers: no - -localhost_ipv6: - name: 'local' - host: '::1' - port: '123' - show_peers: no diff --git a/conf.d/python.d/ovpn_status_log.conf b/conf.d/python.d/ovpn_status_log.conf deleted file mode 100644 index 907f014f5..000000000 --- a/conf.d/python.d/ovpn_status_log.conf +++ /dev/null @@ -1,95 +0,0 @@ -# netdata python.d.plugin configuration for openvpn status log -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, openvpn status log also supports the following: -# -# log_path: 'PATH' # the path to openvpn status log file -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) -# -# IMPORTANT information -# -# 1. If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files -# so that multiple instances do not overwrite each other's output files. -# 2. Make sure NETDATA USER CAN READ openvpn-status.log -# -# * cd into directory with openvpn-status.log and run the following commands as root -# * #chown :netdata openvpn-status.log && chmod 640 openvpn-status.log -# * To check permission and group membership run -# * #ls -l openvpn-status.log -# -rw-r----- 1 root netdata 359 dec 21 21:22 openvpn-status.log -# -# 3. Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file. -# If its not true traffic chart WILL DISPLAY WRONG values -# -# Default OpenVPN update interval is 10 second on Debian 8 -# # ps -C openvpn -o command= -# /usr/sbin/openvpn --daemon ovpn-server --status /run/openvpn/server.status 10 --cd /etc/openvpn --config /etc/openvpn/server.conf -# -# -#default: -# log_path: '/var/log/openvpn-status.log' -# -# ----------------------------------------------------------------------
\ No newline at end of file diff --git a/conf.d/python.d/phpfpm.conf b/conf.d/python.d/phpfpm.conf deleted file mode 100644 index 08688e2fa..000000000 --- a/conf.d/python.d/phpfpm.conf +++ /dev/null @@ -1,90 +0,0 @@ -# netdata python.d.plugin configuration for PHP-FPM -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, PHP-FPM also supports the following: -# -# url: 'URL' # the URL to fetch nginx's status stats -# # Be sure and include ?full&status at the end of the url -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : "http://localhost/status?full&json" - -localipv4: - name : 'local' - url : "http://127.0.0.1/status?full&json" - -localipv6: - name : 'local' - url : "http://::1/status?full&json" - diff --git a/conf.d/python.d/portcheck.conf b/conf.d/python.d/portcheck.conf deleted file mode 100644 index b3dd8bd3f..000000000 --- a/conf.d/python.d/portcheck.conf +++ /dev/null @@ -1,70 +0,0 @@ -# netdata python.d.plugin configuration for portcheck -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# chart_cleanup sets the default chart cleanup interval in iterations. -# A chart is marked as obsolete if it has not been updated -# 'chart_cleanup' iterations in a row. -# They will be hidden immediately (not offered to dashboard viewer, -# streamed upstream and archived to backends) and deleted one hour -# later (configurable from netdata.conf). -# -- For this plugin, cleanup MUST be disabled, otherwise we lose latency chart -chart_cleanup: 0 - -# Autodetection and retries do not work for this plugin - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# ------------------------------- -# ATTENTION: Any valid configuration will be accepted, even if initial connection fails! -# ------------------------------- -# -# There is intentionally no default config for 'localhost' - -# job_name: -# name: myname # [optional] the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # [optional] the JOB's data collection frequency -# priority: 60000 # [optional] the JOB's order on the dashboard -# retries: 60 # [optional] the JOB's number of restoration attempts -# timeout: 1 # [optional] the socket timeout when connecting -# host: 'dns or ip' # [required] the remote host address in either IPv4, IPv6 or as DNS name. -# port: 22 # [required] the port number to check. Specify an integer, not service name. - -# You just have been warned about possible portscan blocking. The portcheck plugin is meant for simple use cases. -# Currently, the accuracy of the latency is low and should be used as reference only. - diff --git a/conf.d/python.d/postfix.conf b/conf.d/python.d/postfix.conf deleted file mode 100644 index e0d5a5f83..000000000 --- a/conf.d/python.d/postfix.conf +++ /dev/null @@ -1,74 +0,0 @@ -# netdata python.d.plugin configuration for postfix -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# postfix is slow, so once every 10 seconds -update_every: 10 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, postfix also supports the following: -# -# command: 'postqueue -p' # the command to run -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS - -local: - command: 'postqueue -p' diff --git a/conf.d/python.d/postgres.conf b/conf.d/python.d/postgres.conf deleted file mode 100644 index b69ca3717..000000000 --- a/conf.d/python.d/postgres.conf +++ /dev/null @@ -1,124 +0,0 @@ -# netdata python.d.plugin configuration for postgresql -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# A single connection is required in order to pull statistics. -# -# Connections can be configured with the following options: -# -# database : 'example_db_name' -# user : 'example_user' -# password : 'example_pass' -# host : 'localhost' -# port : 5432 -# -# Additionally, the following options allow selective disabling of charts -# -# table_stats : false -# index_stats : false -# database_poll : 'dbase_name1 dbase_name2' # poll only specified databases (all other will be excluded from charts) -# -# Postgres permissions are configured at its pg_hba.conf file. You can -# "trust" local clients to allow netdata to connect, or you can create -# a postgres user for netdata and add its password below to allow -# netdata connect. -# -# Postgres supported versions are : -# - 9.3 (without autovacuum) -# - 9.4 -# - 9.5 -# - 9.6 -# - 10 -# -# Superuser access is needed for theses charts: -# Write-Ahead Logs -# Archive Write-Ahead Logs -# -# Autovacuum charts is allowed since Postgres 9.4 -# ---------------------------------------------------------------------- - -socket: - name : 'local' - user : 'postgres' - database : 'postgres' - -tcp: - name : 'local' - database : 'postgres' - user : 'postgres' - host : 'localhost' - port : 5432 - -tcpipv4: - name : 'local' - database : 'postgres' - user : 'postgres' - host : '127.0.0.1' - port : 5432 - -tcpipv6: - name : 'local' - database : 'postgres' - user : 'postgres' - host : '::1' - port : 5432 - diff --git a/conf.d/python.d/powerdns.conf b/conf.d/python.d/powerdns.conf deleted file mode 100644 index ca6200df1..000000000 --- a/conf.d/python.d/powerdns.conf +++ /dev/null @@ -1,78 +0,0 @@ -# netdata python.d.plugin configuration for powerdns -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, apache also supports the following: -# -# url: 'URL' # the URL to fetch powerdns performance statistics -# header: -# X-API-Key: 'Key' # API key -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -# localhost: -# name : 'local' -# url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics' -# header: -# X-API-Key: 'change_me' diff --git a/conf.d/python.d/rabbitmq.conf b/conf.d/python.d/rabbitmq.conf deleted file mode 100644 index 3f90da8a2..000000000 --- a/conf.d/python.d/rabbitmq.conf +++ /dev/null @@ -1,82 +0,0 @@ -# netdata python.d.plugin configuration for rabbitmq -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, rabbitmq plugin also supports the following: -# -# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1 -# port: 'port' # Rabbitmq port. Default: 15672 -# scheme: 'scheme' # http or https. Default: http -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) -# -local: - host: '127.0.0.1' - user: 'guest' - pass: 'guest' diff --git a/conf.d/python.d/redis.conf b/conf.d/python.d/redis.conf deleted file mode 100644 index 6363f6da7..000000000 --- a/conf.d/python.d/redis.conf +++ /dev/null @@ -1,112 +0,0 @@ -# netdata python.d.plugin configuration for redis -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, redis also supports the following: -# -# socket: 'path/to/redis.sock' -# -# or -# host: 'IP or HOSTNAME' # the host to connect to -# port: PORT # the port to connect to -# -# and -# pass: 'password' # the redis password to use for AUTH command -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -socket1: - name : 'local' - socket : '/tmp/redis.sock' - # pass : '' - -socket2: - name : 'local' - socket : '/var/run/redis/redis.sock' - # pass : '' - -socket3: - name : 'local' - socket : '/var/lib/redis/redis.sock' - # pass : '' - -localhost: - name : 'local' - host : 'localhost' - port : 6379 - # pass : '' - -localipv4: - name : 'local' - host : '127.0.0.1' - port : 6379 - # pass : '' - -localipv6: - name : 'local' - host : '::1' - port : 6379 - # pass : '' - diff --git a/conf.d/python.d/retroshare.conf b/conf.d/python.d/retroshare.conf deleted file mode 100644 index 9c92583f7..000000000 --- a/conf.d/python.d/retroshare.conf +++ /dev/null @@ -1,74 +0,0 @@ -# netdata python.d.plugin configuration for RetroShare -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, RetroShare also supports the following: -# -# - url: 'url' # the URL to the WebUI -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name: 'local' - url: 'http://localhost:9090' diff --git a/conf.d/python.d/samba.conf b/conf.d/python.d/samba.conf deleted file mode 100644 index ee513c60f..000000000 --- a/conf.d/python.d/samba.conf +++ /dev/null @@ -1,62 +0,0 @@ -# netdata python.d.plugin configuration for samba -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -update_every: 5 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds
\ No newline at end of file diff --git a/conf.d/python.d/sensors.conf b/conf.d/python.d/sensors.conf deleted file mode 100644 index 83bbffd7d..000000000 --- a/conf.d/python.d/sensors.conf +++ /dev/null @@ -1,63 +0,0 @@ -# netdata python.d.plugin configuration for sensors -# -# This file is in YaML format. Generally the format is: -# -# name: value -# - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# Limit the number of sensors types. -# Comment the ones you want to disable. -# Also, re-arranging this list controls the order of the charts at the -# netdata dashboard. - -types: - - temperature - - fan - - voltage - - current - - power - - energy - - humidity - -# ---------------------------------------------------------------------- -# Limit the number of sensors chips. -# Uncomment the first line (chips:) and add chip names below it. -# The chip names that start with like that will be matched. -# You can find the chip names using the sensors command. - -#chips: -# - i8k -# - coretemp -# -# chip names can be found using the sensors shell command -# the prefix is matched (anything that starts like that) -# -#---------------------------------------------------------------------- - diff --git a/conf.d/python.d/smartd_log.conf b/conf.d/python.d/smartd_log.conf deleted file mode 100644 index 3fab3f1c0..000000000 --- a/conf.d/python.d/smartd_log.conf +++ /dev/null @@ -1,90 +0,0 @@ -# netdata python.d.plugin configuration for smartd log -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, smartd_log also supports the following: -# -# log_path: '/path/to/smartdlogs' # path to smartd log files. Default is /var/log/smartd -# raw_values: yes # enable/disable raw values charts. Enabled by default. -# smart_attributes: '1 2 3 4 44' # smart attributes charts. Default are ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200']. -# exclude_disks: 'PATTERN1 PATTERN2' # space separated patterns. If the pattern is in the drive name, the module will not collect data for it. -# -# ---------------------------------------------------------------------- -# Additional information -# Plugin reads smartd log files (-A option). -# You need to add (man smartd) to /etc/default/smartmontools '-i 600 -A /var/log/smartd/' to pass additional options to smartd on startup -# Then restart smartd service and check /path/log/smartdlogs -# ls /var/log/smartd/ -# CDC_WD10EZEX_00BN5A0-WD_WCC3F7FLVZS9.ata.csv WDC_WD10EZEX_00BN5A0-WD_WCC3F7FLVZS9.ata.csv ZDC_WD10EZEX_00BN5A0-WD_WCC3F7FLVZS9.ata.csv -# -# Smartd APPEND logs at every run. Its NOT RECOMMENDED to set '-i' option below 60 sec. -# STRONGLY RECOMMENDED to create smartd conf file for logrotate -# -# RAW vs NORMALIZED values -# "Normalized value", commonly referred to as just "value". This is a most universal measurement, on the scale from 0 (bad) to some maximum (good) value. -# Maximum values are typically 100, 200 or 253. Rule of thumb is: high values are good, low values are bad. -# -# "Raw value" - the value of the attribute as it is tracked by the device, before any normalization takes place. -# Some raw numbers provide valuable insight when properly interpreted. These cases will be discussed later on. -# Raw values are typically listed in hexadecimal numbers. The raw value has different structure for different vendors and is often not meaningful as a decimal number. -# -# ---------------------------------------------------------------------- diff --git a/conf.d/python.d/springboot.conf b/conf.d/python.d/springboot.conf deleted file mode 100644 index 40b5fb437..000000000 --- a/conf.d/python.d/springboot.conf +++ /dev/null @@ -1,120 +0,0 @@ -# netdata python.d.plugin configuration for springboot -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, this plugin also supports the following: -# -# url: 'http://127.0.0.1/metrics' # the URL of the spring boot actuator metrics -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# defaults: -# [chart_id]: true | false # enables/disables default charts, defaults true. -# extras: {} # defines extra charts to monitor, please see the example below -# - id: [chart_id] -# options: {} -# lines: [] -# -# If all defaults is disabled and no extra charts are defined, this module will disable itself, as it has no data to -# collect. -# -# Configuration example -# --------------------- -# expample: -# name: 'example' -# url: 'http://localhost:8080/metrics' -# defaults: -# response_code: true -# threads: true -# gc_time: true -# gc_ope: true -# heap: false -# extras: -# - id: 'heap' -# options: { title: 'Heap Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap', charttype: 'stacked' } -# lines: -# - { dimension: 'mem_free', name: 'free'} -# - { dimension: 'mempool_eden_used', name: 'eden', algorithm: 'absolute', multiplier: 1, divisor: 1} -# - { dimension: 'mempool_survivor_used', name: 'survivor', algorithm: 'absolute', multiplier: 1, divisor: 1} -# - { dimension: 'mempool_tenured_used', name: 'tenured', algorithm: 'absolute', multiplier: 1, divisor: 1} -# - id: 'heap_eden' -# options: { title: 'Eden Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_eden', charttype: 'area' } -# lines: -# - { dimension: 'mempool_eden_used', name: 'used'} -# - { dimension: 'mempool_eden_committed', name: 'commited'} -# - id: 'heap_survivor' -# options: { title: 'Survivor Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_survivor', charttype: 'area' } -# lines: -# - { dimension: 'mempool_survivor_used', name: 'used'} -# - { dimension: 'mempool_survivor_committed', name: 'commited'} -# - id: 'heap_tenured' -# options: { title: 'Tenured Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_tenured', charttype: 'area' } -# lines: -# - { dimension: 'mempool_tenured_used', name: 'used'} -# - { dimension: 'mempool_tenured_committed', name: 'commited'} - - -local: - name: 'local' - url: 'http://localhost:8080/metrics' - -local_ip: - name: 'local' - url: 'http://127.0.0.1:8080/metrics' diff --git a/conf.d/python.d/squid.conf b/conf.d/python.d/squid.conf deleted file mode 100644 index 564187f00..000000000 --- a/conf.d/python.d/squid.conf +++ /dev/null @@ -1,169 +0,0 @@ -# netdata python.d.plugin configuration for squid -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, squid also supports the following: -# -# host : 'IP or HOSTNAME' # the host to connect to -# port : PORT # the port to connect to -# request: 'URL' # the URL to request from squid -# - -# ---------------------------------------------------------------------- -# SQUID CONFIGURATION -# -# See: -# http://wiki.squid-cache.org/Features/CacheManager -# -# In short, add to your squid configuration these: -# -# http_access allow localhost manager -# http_access deny manager -# -# To remotely monitor a squid: -# -# acl managerAdmin src 192.0.2.1 -# http_access allow localhost manager -# http_access allow managerAdmin manager -# http_access deny manager -# - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -tcp3128old: - name : 'local' - host : 'localhost' - port : 3128 - request : 'cache_object://localhost:3128/counters' - -tcp8080old: - name : 'local' - host : 'localhost' - port : 8080 - request : 'cache_object://localhost:3128/counters' - -tcp3128new: - name : 'local' - host : 'localhost' - port : 3128 - request : '/squid-internal-mgr/counters' - -tcp8080new: - name : 'local' - host : 'localhost' - port : 8080 - request : '/squid-internal-mgr/counters' - -# IPv4 - -tcp3128oldipv4: - name : 'local' - host : '127.0.0.1' - port : 3128 - request : 'cache_object://127.0.0.1:3128/counters' - -tcp8080oldipv4: - name : 'local' - host : '127.0.0.1' - port : 8080 - request : 'cache_object://127.0.0.1:3128/counters' - -tcp3128newipv4: - name : 'local' - host : '127.0.0.1' - port : 3128 - request : '/squid-internal-mgr/counters' - -tcp8080newipv4: - name : 'local' - host : '127.0.0.1' - port : 8080 - request : '/squid-internal-mgr/counters' - -# IPv6 - -tcp3128oldipv6: - name : 'local' - host : '::1' - port : 3128 - request : 'cache_object://[::1]:3128/counters' - -tcp8080oldipv6: - name : 'local' - host : '::1' - port : 8080 - request : 'cache_object://[::1]:3128/counters' - -tcp3128newipv6: - name : 'local' - host : '::1' - port : 3128 - request : '/squid-internal-mgr/counters' - -tcp8080newipv6: - name : 'local' - host : '::1' - port : 8080 - request : '/squid-internal-mgr/counters' - diff --git a/conf.d/python.d/tomcat.conf b/conf.d/python.d/tomcat.conf deleted file mode 100644 index c63f06cfa..000000000 --- a/conf.d/python.d/tomcat.conf +++ /dev/null @@ -1,91 +0,0 @@ -# netdata python.d.plugin configuration for tomcat -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, tomcat also supports the following: -# -# url: 'URL' # the URL to fetch nginx's status stats -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# if you have multiple connectors, the following are supported: -# -# connector_name: 'ajp-bio-8009' # default is null, which use first connector in status XML -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -localhost: - name : 'local' - url : 'http://localhost:8080/manager/status?XML=true' - -localipv4: - name : 'local' - url : 'http://127.0.0.1:8080/manager/status?XML=true' - -localipv6: - name : 'local' - url : 'http://[::1]:8080/manager/status?XML=true' diff --git a/conf.d/python.d/traefik.conf b/conf.d/python.d/traefik.conf deleted file mode 100644 index 909b9e549..000000000 --- a/conf.d/python.d/traefik.conf +++ /dev/null @@ -1,79 +0,0 @@ -# netdata python.d.plugin configuration for traefik health data API -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 10 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, traefik plugin also supports the following: -# -# url: '<scheme>://<host>:<port>/<health_page_api>' -# # http://localhost:8080/health -# -# if the URL is password protected, the following are supported: -# -# user: 'username' -# pass: 'password' -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) -# -local: - url: 'http://localhost:8080/health' diff --git a/conf.d/python.d/varnish.conf b/conf.d/python.d/varnish.conf deleted file mode 100644 index 4b069d514..000000000 --- a/conf.d/python.d/varnish.conf +++ /dev/null @@ -1,64 +0,0 @@ -# netdata python.d.plugin configuration for varnish -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# ---------------------------------------------------------------------- diff --git a/conf.d/python.d/web_log.conf b/conf.d/python.d/web_log.conf deleted file mode 100644 index c185f8d85..000000000 --- a/conf.d/python.d/web_log.conf +++ /dev/null @@ -1,195 +0,0 @@ -# netdata python.d.plugin configuration for web log -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. - -# ---------------------------------------------------------------------- -# PLUGIN CONFIGURATION -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, web_log also supports the following: -# -# path: 'PATH' # the path to web server log file -# path: 'PATH[0-9]*[0-9]' # log files with date suffix are also supported -# detailed_response_codes: yes/no # default: yes. Additional chart where response codes are not grouped -# detailed_response_aggregate: yes/no # default: yes. Not aggregated detailed response codes charts -# all_time : yes/no # default: yes. All time unique client IPs chart (50000 addresses ~ 400KB) -# filter: # filter with regex -# include: 'REGEX' # only those rows that matches the regex -# exclude: 'REGEX' # all rows except those that matches the regex -# categories: # requests per url chart configuration -# cacti: 'cacti.*' # name(dimension): REGEX to match -# observium: 'observium.*' # name(dimension): REGEX to match -# stub_status: 'stub_status' # name(dimension): REGEX to match -# user_defined: # requests per pattern in <user_defined> field (custom_log_format) -# cacti: 'cacti.*' # name(dimension): REGEX to match -# observium: 'observium.*' # name(dimension): REGEX to match -# stub_status: 'stub_status' # name(dimension): REGEX to match -# custom_log_format: # define a custom log format -# pattern: '(?P<address>[\da-f.:]+) -.*?"(?P<method>[A-Z]+) (?P<url>.*?)" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+\.\d+) ' -# time_multiplier: 1000000 # type <int> - convert time to microseconds -# histogram: [1,3,10,30,100, ...] # type list of int - Cumulative histogram of response time in milli seconds - -# ---------------------------------------------------------------------- -# WEB SERVER CONFIGURATION -# -# Make sure the web server log directory and the web server log files -# can be read by user 'netdata'. -# -# To enable the timings chart and the requests size dimension, the -# web server needs to log them. This is how to add them: -# -# nginx: -# log_format netdata '$remote_addr - $remote_user [$time_local] ' -# '"$request" $status $body_bytes_sent ' -# '$request_length $request_time $upstream_response_time ' -# '"$http_referer" "$http_user_agent"'; -# access_log /var/log/nginx/access.log netdata; -# -# apache (you need mod_logio enabled): -# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" vhost_netdata -# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" netdata -# CustomLog "/var/log/apache2/access.log" netdata - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them per web server will run (when they have the same name) - - -# ------------------------------------------- -# nginx log on various distros - -# debian, arch -nginx_log: - name: 'nginx' - path: '/var/log/nginx/access.log' - -# gentoo -nginx_log2: - name: 'nginx' - path: '/var/log/nginx/localhost.access_log' - - -# ------------------------------------------- -# apache log on various distros - -# debian -apache_log: - name: 'apache' - path: '/var/log/apache2/access.log' - -# gentoo -apache_log2: - name: 'apache' - path: '/var/log/apache2/access_log' - -# arch -apache_log3: - name: 'apache' - path: '/var/log/httpd/access_log' - -# debian -apache_vhosts_log: - name: 'apache_vhosts' - path: '/var/log/apache2/other_vhosts_access.log' - - -# ------------------------------------------- -# gunicorn log on various distros - -gunicorn_log: - name: 'gunicorn' - path: '/var/log/gunicorn/access.log' - -gunicorn_log2: - name: 'gunicorn' - path: '/var/log/gunicorn/gunicorn-access.log' - -# ------------------------------------------- -# Apache Cache -apache_cache: - name: 'apache_cache' - type: 'apache_cache' - path: '/var/log/apache/cache.log' - -apache2_cache: - name: 'apache_cache' - type: 'apache_cache' - path: '/var/log/apache2/cache.log' - -httpd_cache: - name: 'apache_cache' - type: 'apache_cache' - path: '/var/log/httpd/cache.log' - -# ------------------------------------------- -# Squid - -# debian/ubuntu -squid_log1: - name: 'squid' - type: 'squid' - path: '/var/log/squid3/access.log' - -#gentoo -squid_log2: - name: 'squid' - type: 'squid' - path: '/var/log/squid/access.log' diff --git a/conf.d/statsd.d/example.conf b/conf.d/statsd.d/example.conf deleted file mode 100644 index 0af9dd27d..000000000 --- a/conf.d/statsd.d/example.conf +++ /dev/null @@ -1,65 +0,0 @@ -# statsd synthetic charts configuration - -# You can add many .conf files, one for each of your apps - -# start a new app - you can add many apps in the same file -[app] - # give a name for this app - # this controls the main menu on the dashboard - # and will be the prefix for all charts of the app - name = myapp - - # match all the metrics of the app - metrics = myapp.* - - # shall private charts of these metrics be created? - private charts = no - - # shall gaps be shown when metrics are not collected? - gaps when not collected = no - - # the memory mode for the charts of this app: none|map|save - # the default is to use the global memory mode - #memory mode = ram - - # the history size for the charts of this app, in seconds - # the default is to use the global history - #history = 3600 - - - -# create a chart -# this is its id - the chart will be named myapp.mychart -[mychart] - # a name for the chart, similar to the id (2 names for each chart) - name = mychart - - # the chart title - title = my chart title - - # the submenu of the dashboard - family = my family - - # the context for alarm templates - context = chart.context - - # the units of the chart - units = tests/s - - # the sorting priority of the chart on the dashboard - priority = 91000 - - # the type of chart to create: line | area | stacked - type = area - - # one or more dimensions for the chart - # type = events | last | min | max | sum | average | percentile | median | stddev - # events = the number of events for this metric - # last = the last value collected - # all the others are only valid for histograms and timers - dimension = myapp.metric1 avg average 1 1 - dimension = myapp.metric1 lower min 1 1 - dimension = myapp.metric1 upper max 1 1 - dimension = myapp.metric2 other last 1 1 - -# You can add as many charts as needed diff --git a/conf.d/stream.conf b/conf.d/stream.conf deleted file mode 100644 index d0c9a8b18..000000000 --- a/conf.d/stream.conf +++ /dev/null @@ -1,179 +0,0 @@ -# netdata configuration for aggregating data from remote hosts -# -# API keys authorize a pair of sending-receiving netdata servers. -# Once their communication is authorized, they can exchange metrics for any -# number of hosts. -# -# You can generate API keys, with the linux command: uuidgen - - -# ----------------------------------------------------------------------------- -# 1. ON SLAVE NETDATA - THE ONE THAT WILL BE SENDING METRICS - -[stream] - # Enable this on slaves, to have them send metrics. - enabled = no - - # Where is the receiving netdata? - # A space separated list of: - # - # [PROTOCOL:]HOST[%INTERFACE][:PORT] - # - # If many are given, the first available will get the metrics. - # - # PROTOCOL = tcp, udp, or unix (only tcp and unix are supported by masters) - # HOST = an IPv4, IPv6 IP, or a hostname, or a unix domain socket path. - # IPv6 IPs should be given with brackets [ip:address] - # INTERFACE = the network interface to use (only for IPv6) - # PORT = the port number or service name (/etc/services) - # - # This communication is not HTTP (it cannot be proxied by web proxies). - destination = - - # The API_KEY to use (as the sender) - api key = - - # The timeout to connect and send metrics - timeout seconds = 60 - - # If the destination line above does not specify a port, use this - default port = 19999 - - # The buffer to use for sending metrics. - # 1MB is good for 10-20 seconds of data, so increase this - # if you expect latencies. - buffer size bytes = 1048576 - - # If the connection fails, or it disconnects, - # retry after that many seconds. - reconnect delay seconds = 5 - - # Attempt to sync the clock the of the master with the clock of the - # slave for that many iterations, when starting. - initial clock resync iterations = 60 - - -# ----------------------------------------------------------------------------- -# 2. ON MASTER NETDATA - THE ONE THAT WILL BE RECEIVING METRICS - -# You can have one API key per slave, -# or the same API key for all slaves. -# -# netdata searches for options in this order: -# -# a) master netdata settings (netdata.conf) -# b) [API_KEY] section (below, settings for the API key) -# c) [MACHINE_GUID] section (below, settings for each machine) -# -# You can combine the above (the more specific setting will be used). - -# API key authentication -# If the key is not listed here, it will not be able to push metrics. - -# [API_KEY] is [YOUR-API-KEY], i.e [11111111-2222-3333-4444-555555555555] -[API_KEY] - # Default settings for this API key - - # You can disable the API key, by setting this to: no - # The default (for unknown API keys) is: no - enabled = no - - # A list of simple patterns matching the IPs of the servers that - # will be pushing metrics using this API key. - # The metrics are received via the API port, so the same IPs - # should also be matched at netdata.conf [web].allow connections from - allow from = * - - # The default history in entries, for all hosts using this API key. - # You can also set it per host below. - # If you don't set it here, the history size of the central netdata - # will be used. - default history = 3600 - - # The default memory mode to be used for all hosts using this API key. - # You can also set it per host below. - # If you don't set it here, the memory mode of netdata.conf will be used. - # Valid modes: - # save save on exit, load on start - # map like swap (continuously syncing to disks) - # ram keep it in RAM, don't touch the disk - # none no database at all (use this on headless proxies) - default memory mode = ram - - # Shall we enable health monitoring for the hosts using this API key? - # 3 possible values: - # yes enable alarms - # no do not enable alarms - # auto enable alarms, only when the sending netdata is connected - # You can also set it per host, below. - # The default is the same as to netdata.conf - health enabled by default = auto - - # postpone alarms for a short period after the sender is connected - default postpone alarms on connect seconds = 60 - - # allow or deny multiple connections for the same host? - # If you are sure all your netdata have their own machine GUID, - # set this to 'allow', since it allows faster reconnects. - # When set to 'deny', new connections for a host will not be - # accepted until an existing connection is cleared. - multiple connections = allow - - # need to route metrics differently? set these. - # the defaults are the ones at the [stream] section - #default proxy enabled = yes | no - #default proxy destination = IP:PORT IP:PORT ... - #default proxy api key = API_KEY - - -# ----------------------------------------------------------------------------- -# 3. PER SENDING HOST SETTINGS, ON MASTER NETDATA -# THIS IS OPTIONAL - YOU DON'T NEED IT - -# This section exists to give you finer control of the master settings for each -# slave host, when the same API key is used by many netdata slaves / proxies. -# -# Each netdata has a unique GUID - generated the first time netdata starts. -# You can find it at /var/lib/netdata/registry/netdata.public.unique.id -# (at the slave). -# -# The host sending data will have one. If the host is not ephemeral, -# you can give settings for each sending host here. - -[MACHINE_GUID] - # enable this host: yes | no - # When disabled, the master will not receive metrics for this host. - # THIS IS NOT A SECURITY MECHANISM - AN ATTACKER CAN SET ANY OTHER GUID. - # Use only the API key for security. - enabled = no - - # A list of simple patterns matching the IPs of the servers that - # will be pushing metrics using this MACHINE GUID. - # The metrics are received via the API port, so the same IPs - # should also be matched at netdata.conf [web].allow connections from - # and at stream.conf [API_KEY].allow from - allow from = * - - # The number of entries in the database - history = 3600 - - # The memory mode of the database: save | map | ram | none - memory mode = save - - # Health / alarms control: yes | no | auto - health enabled = yes - - # postpone alarms when the sender connects - postpone alarms on connect seconds = 60 - - # allow or deny multiple connections for the same host? - # If you are sure all your netdata have their own machine GUID, - # set this to 'allow', since it allows faster reconnects. - # When set to 'deny', new connections for a host will not be - # accepted until an existing connection is cleared. - multiple connections = allow - - # need to route metrics differently? - #proxy enabled = yes | no - #proxy destination = IP:PORT IP:PORT ... - #proxy api key = API_KEY |