summaryrefslogtreecommitdiffstats
path: root/conf.d
diff options
context:
space:
mode:
Diffstat (limited to 'conf.d')
-rw-r--r--conf.d/Makefile.am48
-rw-r--r--conf.d/Makefile.in176
-rw-r--r--conf.d/apps_groups.conf209
-rw-r--r--conf.d/charts.d.conf43
-rw-r--r--conf.d/health.d/apache.conf13
-rw-r--r--conf.d/health.d/cpu.conf24
-rw-r--r--conf.d/health.d/disks.conf85
-rw-r--r--conf.d/health.d/entropy.conf13
-rw-r--r--conf.d/health.d/memcached.conf46
-rw-r--r--conf.d/health.d/named.conf12
-rw-r--r--conf.d/health.d/net.conf27
-rw-r--r--conf.d/health.d/nginx.conf12
-rw-r--r--conf.d/health.d/qos.conf12
-rw-r--r--conf.d/health.d/ram.conf9
-rw-r--r--conf.d/health.d/redis.conf12
-rw-r--r--conf.d/health.d/squid.conf12
-rw-r--r--conf.d/health.d/swap.conf20
-rw-r--r--conf.d/python.d.conf42
-rw-r--r--conf.d/python.d/apache.conf80
-rw-r--r--conf.d/python.d/apache_cache.conf76
-rw-r--r--conf.d/python.d/cpufreq.conf37
-rw-r--r--conf.d/python.d/dovecot.conf89
-rw-r--r--conf.d/python.d/example.conf63
-rw-r--r--conf.d/python.d/exim.conf86
-rw-r--r--conf.d/python.d/hddtemp.conf90
-rw-r--r--conf.d/python.d/ipfs.conf67
-rw-r--r--conf.d/python.d/memcached.conf85
-rw-r--r--conf.d/python.d/mysql.conf175
-rw-r--r--conf.d/python.d/nginx.conf82
-rw-r--r--conf.d/python.d/nginx_log.conf72
-rw-r--r--conf.d/python.d/phpfpm.conf82
-rw-r--r--conf.d/python.d/postfix.conf67
-rw-r--r--conf.d/python.d/redis.conf97
-rw-r--r--conf.d/python.d/sensors.conf54
-rw-r--r--conf.d/python.d/squid.conf162
-rw-r--r--conf.d/python.d/tomcat.conf81
36 files changed, 2292 insertions, 68 deletions
diff --git a/conf.d/Makefile.am b/conf.d/Makefile.am
index 381b546e3..02fe86b01 100644
--- a/conf.d/Makefile.am
+++ b/conf.d/Makefile.am
@@ -6,4 +6,52 @@ MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
dist_config_DATA = \
apps_groups.conf \
charts.d.conf \
+ python.d.conf \
+ $(NULL)
+
+chartsconfigdir=$(configdir)/charts.d
+dist_chartsconfig_DATA = \
+ $(NULL)
+
+nodeconfigdir=$(configdir)/node.d
+dist_nodeconfig_DATA = \
+ $(NULL)
+
+pythonconfigdir=$(configdir)/python.d
+dist_pythonconfig_DATA = \
+ python.d/apache.conf \
+ python.d/apache_cache.conf \
+ python.d/cpufreq.conf \
+ python.d/dovecot.conf \
+ python.d/example.conf \
+ python.d/exim.conf \
+ python.d/hddtemp.conf \
+ python.d/ipfs.conf \
+ python.d/memcached.conf \
+ python.d/mysql.conf \
+ python.d/nginx.conf \
+ python.d/nginx_log.conf \
+ python.d/phpfpm.conf \
+ python.d/postfix.conf \
+ python.d/redis.conf \
+ python.d/sensors.conf \
+ python.d/squid.conf \
+ python.d/tomcat.conf \
+ $(NULL)
+
+healthconfigdir=$(configdir)/health.d
+dist_healthconfig_DATA = \
+ health.d/apache.conf \
+ health.d/cpu.conf \
+ health.d/disks.conf \
+ health.d/entropy.conf \
+ health.d/memcached.conf \
+ health.d/named.conf \
+ health.d/net.conf \
+ health.d/nginx.conf \
+ health.d/qos.conf \
+ health.d/ram.conf \
+ health.d/redis.conf \
+ health.d/swap.conf \
+ health.d/squid.conf \
$(NULL)
diff --git a/conf.d/Makefile.in b/conf.d/Makefile.in
index 1938bd940..9356f60e2 100644
--- a/conf.d/Makefile.in
+++ b/conf.d/Makefile.in
@@ -80,7 +80,9 @@ build_triplet = @build@
host_triplet = @host@
subdir = conf.d
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_config_DATA)
+ $(dist_chartsconfig_DATA) $(dist_config_DATA) \
+ $(dist_healthconfig_DATA) $(dist_nodeconfig_DATA) \
+ $(dist_pythonconfig_DATA)
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_pthread.m4 \
$(top_srcdir)/configure.ac
@@ -136,8 +138,12 @@ am__uninstall_files_from_dir = { \
|| { echo " ( cd '$$dir' && rm -f" $$files ")"; \
$(am__cd) "$$dir" && rm -f $$files; }; \
}
-am__installdirs = "$(DESTDIR)$(configdir)"
-DATA = $(dist_config_DATA)
+am__installdirs = "$(DESTDIR)$(chartsconfigdir)" \
+ "$(DESTDIR)$(configdir)" "$(DESTDIR)$(healthconfigdir)" \
+ "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(pythonconfigdir)"
+DATA = $(dist_chartsconfig_DATA) $(dist_config_DATA) \
+ $(dist_healthconfig_DATA) $(dist_nodeconfig_DATA) \
+ $(dist_pythonconfig_DATA)
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
@@ -261,6 +267,8 @@ pluginsdir = @pluginsdir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
@@ -279,6 +287,54 @@ MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
dist_config_DATA = \
apps_groups.conf \
charts.d.conf \
+ python.d.conf \
+ $(NULL)
+
+chartsconfigdir = $(configdir)/charts.d
+dist_chartsconfig_DATA = \
+ $(NULL)
+
+nodeconfigdir = $(configdir)/node.d
+dist_nodeconfig_DATA = \
+ $(NULL)
+
+pythonconfigdir = $(configdir)/python.d
+dist_pythonconfig_DATA = \
+ python.d/apache.conf \
+ python.d/apache_cache.conf \
+ python.d/cpufreq.conf \
+ python.d/dovecot.conf \
+ python.d/example.conf \
+ python.d/exim.conf \
+ python.d/hddtemp.conf \
+ python.d/ipfs.conf \
+ python.d/memcached.conf \
+ python.d/mysql.conf \
+ python.d/nginx.conf \
+ python.d/nginx_log.conf \
+ python.d/phpfpm.conf \
+ python.d/postfix.conf \
+ python.d/redis.conf \
+ python.d/sensors.conf \
+ python.d/squid.conf \
+ python.d/tomcat.conf \
+ $(NULL)
+
+healthconfigdir = $(configdir)/health.d
+dist_healthconfig_DATA = \
+ health.d/apache.conf \
+ health.d/cpu.conf \
+ health.d/disks.conf \
+ health.d/entropy.conf \
+ health.d/memcached.conf \
+ health.d/named.conf \
+ health.d/net.conf \
+ health.d/nginx.conf \
+ health.d/qos.conf \
+ health.d/ram.conf \
+ health.d/redis.conf \
+ health.d/swap.conf \
+ health.d/squid.conf \
$(NULL)
all: all-am
@@ -314,6 +370,27 @@ $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
+install-dist_chartsconfigDATA: $(dist_chartsconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(chartsconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(chartsconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_chartsconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(chartsconfigdir)'; $(am__uninstall_files_from_dir)
install-dist_configDATA: $(dist_config_DATA)
@$(NORMAL_INSTALL)
@list='$(dist_config_DATA)'; test -n "$(configdir)" || list=; \
@@ -335,6 +412,69 @@ uninstall-dist_configDATA:
@list='$(dist_config_DATA)'; test -n "$(configdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
dir='$(DESTDIR)$(configdir)'; $(am__uninstall_files_from_dir)
+install-dist_healthconfigDATA: $(dist_healthconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_healthconfig_DATA)'; test -n "$(healthconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(healthconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(healthconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(healthconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(healthconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_healthconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_healthconfig_DATA)'; test -n "$(healthconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(healthconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_nodeconfigDATA: $(dist_nodeconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(nodeconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(nodeconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodeconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(nodeconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_nodeconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(nodeconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonconfigDATA: $(dist_pythonconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythonconfigdir)'; $(am__uninstall_files_from_dir)
tags TAGS:
ctags CTAGS:
@@ -376,7 +516,7 @@ check-am: all-am
check: check-am
all-am: Makefile $(DATA)
installdirs:
- for dir in "$(DESTDIR)$(configdir)"; do \
+ for dir in "$(DESTDIR)$(chartsconfigdir)" "$(DESTDIR)$(configdir)" "$(DESTDIR)$(healthconfigdir)" "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(pythonconfigdir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-am
@@ -430,7 +570,9 @@ info: info-am
info-am:
-install-data-am: install-dist_configDATA
+install-data-am: install-dist_chartsconfigDATA install-dist_configDATA \
+ install-dist_healthconfigDATA install-dist_nodeconfigDATA \
+ install-dist_pythonconfigDATA
install-dvi: install-dvi-am
@@ -474,21 +616,27 @@ ps: ps-am
ps-am:
-uninstall-am: uninstall-dist_configDATA
+uninstall-am: uninstall-dist_chartsconfigDATA \
+ uninstall-dist_configDATA uninstall-dist_healthconfigDATA \
+ uninstall-dist_nodeconfigDATA uninstall-dist_pythonconfigDATA
.MAKE: install-am install-strip
.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
ctags-am distclean distclean-generic distdir dvi dvi-am html \
html-am info info-am install install-am install-data \
- install-data-am install-dist_configDATA install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
- uninstall-am uninstall-dist_configDATA
+ install-data-am install-dist_chartsconfigDATA \
+ install-dist_configDATA install-dist_healthconfigDATA \
+ install-dist_nodeconfigDATA install-dist_pythonconfigDATA \
+ install-dvi install-dvi-am install-exec install-exec-am \
+ install-html install-html-am install-info install-info-am \
+ install-man install-pdf install-pdf-am install-ps \
+ install-ps-am install-strip installcheck installcheck-am \
+ installdirs maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
+ uninstall uninstall-am uninstall-dist_chartsconfigDATA \
+ uninstall-dist_configDATA uninstall-dist_healthconfigDATA \
+ uninstall-dist_nodeconfigDATA uninstall-dist_pythonconfigDATA
# Tell versions [3.59,3.63) of GNU make to not export all variables.
diff --git a/conf.d/apps_groups.conf b/conf.d/apps_groups.conf
index 887563c44..0a6f55cd7 100644
--- a/conf.d/apps_groups.conf
+++ b/conf.d/apps_groups.conf
@@ -28,7 +28,7 @@
# *name* substring mode: will search for 'name' in the whole command line (/proc/PID/cmdline)
#
# If you enter even just one *name* (substring), apps.plugin will process
-# /proc/PID/cmdline for all processes, on every iteration.
+# /proc/PID/cmdline for all processes, just once (when they are first seen).
#
# To add process names with single quotes, enclose them in double quotes
# example: "process with this ' single quote"
@@ -44,53 +44,176 @@
# You can add any number of groups you like. Only the ones found running will
# affect the charts generated. However, producing charts with hundreds of
# dimensions may slow down your web browser.
+#
+# The order of the entries in this list is important: the first that matches
+# a process is used, so put important ones at the top. Processes not matched
+# by any row, will inherit it from their parents or children.
+#
+# The order also controls the order of the dimensions on the generated charts
+# (although applications started after apps.plugin is started, will be appended
+# to the existing list of dimensions the netdata daemon maintains).
+
+# -----------------------------------------------------------------------------
+# NETDATA processes accounting
+
+# netdata main process
+netdata: netdata
+
+# netdata known plugins
+# plugins not defined here will be accumulated in netdata, above
+apps.plugin: apps.plugin
+charts.d.plugin: *charts.d.plugin*
+node.d.plugin: *node.d.plugin*
+python.d.plugin: *python.d.plugin*
+tc-qos-helper: *tc-qos-helper.sh*
-compile: cc1 cc1plus as gcc* ld make automake autoconf git
-rsync: rsync
-media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd
-squid: squid* c-icap
-apache: apache* httpd
-mysql: mysql*
-asterisk: asterisk
-opensips: opensips* stund
-radius: radius*
+# -----------------------------------------------------------------------------
+# authentication/authorization related servers
+
+auth: radius* openldap* ldap*
fail2ban: fail2ban*
-mail: dovecot imapd pop3d
-postfix: master
-nginx: nginx
+
+# -----------------------------------------------------------------------------
+# web/ftp servers
+
+httpd: apache* httpd nginx* lighttpd
+proxy: squid* c-icap squidGuard varnish*
+php: php*
+ftpd: proftpd in.tftpd vsftpd
+uwsgi: uwsgi
+unicorn: *unicorn*
+
+# -----------------------------------------------------------------------------
+# database servers
+
+sql: mysqld* mariad* postgres*
+nosql: mongod redis*
+
+# -----------------------------------------------------------------------------
+# email servers
+
+email: dovecot imapd pop3d amavis* master zmstat* zmmailboxdmgr qmgr oqmgr
+
+# -----------------------------------------------------------------------------
+# networking and VPN servers
+
+ppp: ppp*
+vpn: openvpn pptp* cjdroute
+wifi: hostapd wpa_supplicant
+
+# -----------------------------------------------------------------------------
+# high availability and balancers
+
+camo: *camo*
+balancer: ipvs_* haproxy
+ha: corosync hs_logd ha_logd stonithd
+
+# -----------------------------------------------------------------------------
+# telephony
+
+pbx: asterisk safe_asterisk *vicidial*
+sip: opensips* stund
+murmur: murmurd
+vines: *vines*
+
+# -----------------------------------------------------------------------------
+# monitoring
+
+logs: ulogd* syslog* rsyslog* logrotate
+nms: snmpd vnstatd smokeping zabbix* monit munin* mon openhpid watchdog tailon nrpe
splunk: splunkd
-mongo: mongod
-lighttpd: lighttpd
-ftpd: proftpd in.tftpd
+
+# -----------------------------------------------------------------------------
+# file systems and file servers
+
samba: smbd nmbd winbindd
nfs: rpcbind rpc.* nfs*
+zfs: spl_* z_* txg_* zil_* arc_* l2arc*
+btrfs: btrfs*
+
+# -----------------------------------------------------------------------------
+# containers & virtual machines
+
+containers: lxc* docker*
+VMs: vbox* VBox* qemu*
+
+# -----------------------------------------------------------------------------
+# ssh servers and clients
+
ssh: ssh* scp
-X: X lightdm xdm pulseaudio gkrellm
-xfce: xfwm4 xfdesktop xfce* Thunar xfsettingsd
-gnome: gnome-* gdm gconfd-2
-named: named rncd
-clam: clam* *clam
-cups: cups*
-ntp: ntp*
-deluge: deluge*
-vbox: vbox* VBox*
-log: ulogd syslog* rsyslog* logrotate
-nms: snmpd vnstatd smokeping zabbix* monit munin* mon openhpid
-ppp: ppp* pptp*
-inetd: inetd xinetd
-openvpn: openvpn
-cjdns: cjdroute
-cron: cron atd
-ha: corosync hs_logd ha_logd stonithd
-ipvs: ipvs_*
+
+# -----------------------------------------------------------------------------
+# print servers and clients
+
+print: cups* lpd lpq
+
+# -----------------------------------------------------------------------------
+# time servers and clients
+
+time: ntp*
+
+# -----------------------------------------------------------------------------
+# dhcp servers and clients
+
+dhcp: *dhcp*
+
+# -----------------------------------------------------------------------------
+# name servers and clients
+
+named: named rncd dig
+
+# -----------------------------------------------------------------------------
+# installation / compilation / debugging
+
+build: cc1 cc1plus as gcc* ld make automake autoconf autoreconf git valgrind*
+
+# -----------------------------------------------------------------------------
+# antivirus
+
+antivirus: clam* *clam
+
+# -----------------------------------------------------------------------------
+# torrent clients
+
+torrents: *deluge* transmission* *SickBeard*
+
+# -----------------------------------------------------------------------------
+# backup servers and clients
+
+backup: rsync bacula*
+
+# -----------------------------------------------------------------------------
+# cron
+
+cron: cron atd anacron
+
+# -----------------------------------------------------------------------------
+# UPS
+
+ups: upsmon upsd */nut/*
+
+# -----------------------------------------------------------------------------
+# Kernel / System
+
+system: systemd* udisks* udevd* *udevd connmand ipv6_addrconf dbus-* inetd xinetd mdadm
kernel: kthreadd kauditd lockd khelper kdevtmpfs khungtaskd rpciod fsnotify_mark kthrotld iscsi_eh deferwq
-netdata: netdata
-crsproxy: crsproxy
-wifi: hostapd wpa_supplicant
-system: systemd* udisks* udevd connmand ipv6_addrconf dbus-*
ksmd: ksmd
-lxc: lxc*
-zfs-spl: spl_*
-zfs-posix: z_*
-zfs-txg: txg_* zil_*
-zfs-arc: arc_* l2arc*
+
+# -----------------------------------------------------------------------------
+# media players, servers, clients
+
+media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd mpd minidlnad mt-daapd avahi*
+
+# -----------------------------------------------------------------------------
+# X
+
+X: X lightdm xdm pulseaudio gkrellm xfwm4 xfdesktop xfce* Thunar xfsettingsd xfconfd gnome-* gdm gconfd-2 *gvfsd gvfsd* kdm slim
+
+# -----------------------------------------------------------------------------
+# other application servers
+
+crsproxy: crsproxy
+sidekiq: *sidekiq*
+java: java
+chat: irssi
+ipfs: ipfs
diff --git a/conf.d/charts.d.conf b/conf.d/charts.d.conf
index daec33251..acb2a6fae 100644
--- a/conf.d/charts.d.conf
+++ b/conf.d/charts.d.conf
@@ -29,14 +29,35 @@
# -----------------------------------------------------------------------------
# the default enable/disable for all charts.d collectors
-#enable_all_charts="yes"
-
-# per charts.d collector enable/disable
-#nut=yes
-#squid=yes
-#postfix=yes
-#sensors=yes
-#cpufreq=yes
-#mysql=yes
-#example=yes
-#load_average=yes
+# the default is "yes"
+# enable_all_charts="yes"
+
+# BY DEFAULT ENABLED MODULES
+# ap=yes
+# nut=yes
+# opensips=yes
+
+# -----------------------------------------------------------------------------
+# THESE NEED TO BE SET TO "force" TO BE ENABLED
+
+# Nothing useful.
+# Just an example charts.d plugin you can use as a template.
+# example=force
+
+# OLD MODULES THAT ARE NOW SERVED BY python.d.plugin
+# apache=force
+# cpufreq=force
+# exim=force
+# hddtemp=force
+# mysql=force
+# nginx=force
+# phpfpm=force
+# postfix=force
+# sensors=force
+# squid=force
+# tomcat=force
+
+# OLD MODULES THAT ARE NOW SERVED BY NETDATA DAEMON
+# cpu_apps=force
+# mem_apps=force
+# load_average=force
diff --git a/conf.d/health.d/apache.conf b/conf.d/health.d/apache.conf
new file mode 100644
index 000000000..1fddbc99f
--- /dev/null
+++ b/conf.d/health.d/apache.conf
@@ -0,0 +1,13 @@
+
+# make sure apache is running
+
+template: apache_last_collected_secs
+ on: apache.requests
+ calc: $now - $last_collected_t
+ every: 10s
+ warn: $this > ( 5 * $update_every)
+ crit: $this > (10 * $update_every)
+ units: seconds ago
+ info: number of seconds since the last successful data collection
+
+
diff --git a/conf.d/health.d/cpu.conf b/conf.d/health.d/cpu.conf
new file mode 100644
index 000000000..9332e508a
--- /dev/null
+++ b/conf.d/health.d/cpu.conf
@@ -0,0 +1,24 @@
+
+template: 5min_cpu_pcent
+ on: system.cpu
+ lookup: average -5m unaligned of user,system,nice,softirq,irq,guest,guest_nice
+ every: 1m
+ warn: $this > 90
+ units: %
+ info: average cpu utilization for the last 5 minutes
+
+template: 5min_iowait_cpu_pcent
+ on: system.cpu
+ lookup: average -5m unaligned of iowait
+ every: 1m
+ warn: $this > 10
+ units: %
+ info: average wait I/O for the last 5 minutes
+
+template: 20min_steal_cpu_pcent
+ on: system.cpu
+ lookup: average -20m unaligned of steal
+ every: 5m
+ warn: $this > 10
+ units: %
+ info: average stolen CPU time for the last 20 minutes
diff --git a/conf.d/health.d/disks.conf b/conf.d/health.d/disks.conf
new file mode 100644
index 000000000..c38f1a0a0
--- /dev/null
+++ b/conf.d/health.d/disks.conf
@@ -0,0 +1,85 @@
+# -----------------------------------------------------------------------------
+# low disk space
+
+# checking the latest collected values
+# raise an alarm if the disk is low on
+# available disk space
+
+template: disk_full_percent
+ on: disk.space
+ calc: $used * 100 / ($avail + $used)
+ every: 1m
+ warn: $this > 80
+ crit: $this > 95
+ units: %
+ info: current disk space usage
+
+
+# -----------------------------------------------------------------------------
+# disk fill rate
+
+# calculate the rate the disk fills
+# use as base, the available space change
+# during the last 30 minutes
+
+# this is just a calculation - it has no alarm
+# we will use it in the next template to find
+# the hours remaining
+
+template: disk_fill_rate
+ on: disk.space
+ lookup: max -1s at -30m unaligned of avail
+ calc: ($this - $avail) / ($now - $after)
+ every: 15s
+ units: MB/s
+ info: average rate the disk fills up (positive), or frees up (negative) space, for the last 30 minutes
+
+
+# calculate the hours remaining
+# if the disk continues to fill
+# in this rate
+
+template: disk_full_after_hours
+ on: disk.space
+ calc: $avail / $disk_fill_rate / 3600
+ every: 10s
+ warn: $this > 0 and $this < 48
+ crit: $this > 0 and $this < 24
+ units: hours
+ info: estimated time the disk will run out of space, if the system continues to add data with the rate of the last 30 minutes
+
+
+# -----------------------------------------------------------------------------
+# disk congestion
+
+# raise an alarm if the disk is congested
+# by calculating the average disk utilization
+# for the last 10 minutes
+
+template: 10min_disk_utilization
+ on: disk.util
+ lookup: average -10m unaligned
+ every: 1m
+ green: 90
+ red: 98
+ warn: $this > $green
+ crit: $this > $red
+ units: %
+ info: the percentage of time the disk was busy, during the last 10 minutes
+
+
+# raise an alarm if the disk backlog
+# is above 1000ms (1s) per second
+# for 10 minutes
+# (i.e. the disk cannot catch up)
+
+template: 10min_disk_backlog
+ on: disk.backlog
+ lookup: average -10m unaligned
+ every: 1m
+ green: 1000
+ red: 2000
+ warn: $this > $green
+ crit: $this > $red
+ units: ms
+ info: average of the kernel estimated disk backlog, for the last 10 minutes
diff --git a/conf.d/health.d/entropy.conf b/conf.d/health.d/entropy.conf
new file mode 100644
index 000000000..6f8b6e851
--- /dev/null
+++ b/conf.d/health.d/entropy.conf
@@ -0,0 +1,13 @@
+
+# check if entropy is too low
+# the alarm is checked every 1 minute
+# and examines the last 30 minutes of data
+
+ alarm: min_30min_entropy
+ on: system.entropy
+ lookup: min -30m unaligned
+ every: 1m
+ warn: $this < 200
+ crit: $this < 100
+ units: entries
+ info: minimum entries in the random numbers pool (entropy), for the last 30 minutes
diff --git a/conf.d/health.d/memcached.conf b/conf.d/health.d/memcached.conf
new file mode 100644
index 000000000..05ff14711
--- /dev/null
+++ b/conf.d/health.d/memcached.conf
@@ -0,0 +1,46 @@
+
+# make sure memcached is running
+
+template: memcached_last_collected_secs
+ on: memcached.cache
+ calc: $now - $last_collected_t
+ every: 10s
+ warn: $this > ( 5 * $update_every)
+ crit: $this > (10 * $update_every)
+ units: seconds ago
+ info: number of seconds since the last successful data collection
+
+
+# detect if memcached cache is full
+
+template: cache_full_pcent
+ on: memcached.cache
+ calc: $used * 100 / ($used + $available)
+ every: 10s
+ warn: $this > 80
+ crit: $this > 90
+ units: %
+ info: current cache memory usage
+
+
+# find the rate memcached cache is filling
+
+template: cache_fill_rate
+ on: memcached.cache
+ lookup: max -1s at -30m unaligned of available
+ calc: ($this - $available) / ($now - $after)
+ every: 15s
+ units: KB/s
+ info: average rate the cache fills up (positive), or frees up (negative) space, for the last 30 minutes
+
+
+# find the hours remaining until memcached cache is full
+
+template: cache_full_after_hours
+ on: memcached.cache
+ calc: $available / $cache_fill_rate / 3600
+ every: 10s
+ warn: $this > 0 and $this < 48
+ crit: $this > 0 and $this < 24
+ units: hours
+ info: estimated time the cache will run out of space, if the system continues to add data with the rate of the last 30 minutes
diff --git a/conf.d/health.d/named.conf b/conf.d/health.d/named.conf
new file mode 100644
index 000000000..e46d1d330
--- /dev/null
+++ b/conf.d/health.d/named.conf
@@ -0,0 +1,12 @@
+
+# make sure named is running
+
+template: named_last_collected_secs
+ on: named.global_queries
+ calc: $now - $last_collected_t
+ every: 10s
+ warn: $this > ( 5 * $update_every)
+ crit: $this > (10 * $update_every)
+ units: seconds ago
+ info: number of seconds since the last successful data collection
+
diff --git a/conf.d/health.d/net.conf b/conf.d/health.d/net.conf
new file mode 100644
index 000000000..f65bc4fcb
--- /dev/null
+++ b/conf.d/health.d/net.conf
@@ -0,0 +1,27 @@
+
+# check if an interface is dropping packets
+# the alarm is checked every 10 seconds
+# and examines the last 30 minutes of data
+
+template: 30min_packet_drops
+ on: net.drops
+ lookup: sum -30m unaligned absolute
+ every: 1m
+ crit: $this > 0
+ units: packets
+ info: dropped packets in the last 30 minutes
+
+
+# check if an interface is having FIFO
+# buffer errors
+# the alarm is checked every 10 seconds
+# and examines the last 30 minutes of data
+
+template: 30min_fifo_errors
+ on: net.fifo
+ lookup: sum -30m unaligned absolute
+ every: 1m
+ crit: $this > 0
+ units: errors
+ info: network interface fifo errors in the last 30 minutes
+
diff --git a/conf.d/health.d/nginx.conf b/conf.d/health.d/nginx.conf
new file mode 100644
index 000000000..da13008e3
--- /dev/null
+++ b/conf.d/health.d/nginx.conf
@@ -0,0 +1,12 @@
+
+# make sure nginx is running
+
+template: nginx_last_collected_secs
+ on: nginx.requests
+ calc: $now - $last_collected_t
+ every: 10s
+ warn: $this > ( 5 * $update_every)
+ crit: $this > (10 * $update_every)
+ units: seconds ago
+ info: number of seconds since the last successful data collection
+
diff --git a/conf.d/health.d/qos.conf b/conf.d/health.d/qos.conf
new file mode 100644
index 000000000..ac3bf8ff4
--- /dev/null
+++ b/conf.d/health.d/qos.conf
@@ -0,0 +1,12 @@
+
+# check if a QoS class is dropping packets
+# the alarm is checked every 10 seconds
+# and examines the last minute of data
+
+#template: 10min_qos_packet_drops
+# on: tc.qos_dropped
+# lookup: sum -10m unaligned absolute
+# every: 30s
+# warn: $this > 0
+# units: packets
+# info: dropped packets in the last 30 minutes
diff --git a/conf.d/health.d/ram.conf b/conf.d/health.d/ram.conf
new file mode 100644
index 000000000..1d3681128
--- /dev/null
+++ b/conf.d/health.d/ram.conf
@@ -0,0 +1,9 @@
+
+ alarm: used_ram_pcent
+ on: system.ram
+ calc: $used * 100 / ($used + $cached + $free)
+ every: 10s
+ warn: $this > 80
+ crit: $this > 90
+ units: %
+ info: system RAM usage
diff --git a/conf.d/health.d/redis.conf b/conf.d/health.d/redis.conf
new file mode 100644
index 000000000..3750176c5
--- /dev/null
+++ b/conf.d/health.d/redis.conf
@@ -0,0 +1,12 @@
+
+# make sure redis is running
+
+template: redis_last_collected_secs
+ on: redis.operations
+ calc: $now - $last_collected_t
+ every: 10s
+ warn: $this > ( 5 * $update_every)
+ crit: $this > (10 * $update_every)
+ units: seconds ago
+ info: number of seconds since the last successful data collection
+
diff --git a/conf.d/health.d/squid.conf b/conf.d/health.d/squid.conf
new file mode 100644
index 000000000..cc5ce1c3a
--- /dev/null
+++ b/conf.d/health.d/squid.conf
@@ -0,0 +1,12 @@
+
+# make sure squid is running
+
+template: squid_last_collected_secs
+ on: squid.clients_requests
+ calc: $now - $last_collected_t
+ every: 10s
+ warn: $this > ( 5 * $update_every)
+ crit: $this > (10 * $update_every)
+ units: seconds ago
+ info: number of seconds since the last successful data collection
+
diff --git a/conf.d/health.d/swap.conf b/conf.d/health.d/swap.conf
new file mode 100644
index 000000000..552dd310a
--- /dev/null
+++ b/conf.d/health.d/swap.conf
@@ -0,0 +1,20 @@
+
+ alarm: 30min_ram_swapped_out
+ on: system.swapio
+ lookup: sum -30m unaligned absolute of out
+ # we have to convert KB to MB by dividing $this (i.e. the result of the lookup) with 1024
+ calc: $this / 1024 * 100 / ( $system.ram.used + $system.ram.cached + $system.ram.free )
+ every: 1m
+ warn: $this > 1
+ crit: $this > 10
+ units: % of RAM
+ info: the sum of all memory swapped out during the last 30 minutes, as a percentage of the available RAM
+
+ alarm: pcent_of_ram_in_swap
+ on: system.swap
+ calc: $used * 100 / ( $system.ram.used + $system.ram.cached + $system.ram.free )
+ every: 10s
+ warn: $this > 10
+ crit: $this > 50
+ units: % of RAM
+ info: the currently used swap space, as a percentage of the available RAM
diff --git a/conf.d/python.d.conf b/conf.d/python.d.conf
new file mode 100644
index 000000000..940bd9183
--- /dev/null
+++ b/conf.d/python.d.conf
@@ -0,0 +1,42 @@
+# netdata python.d.plugin configuration
+#
+# This file is in YaML format.
+# Generally the format is:
+#
+# name: value
+#
+
+# Enable / disable the whole python.d.plugin (all its modules)
+enabled: yes
+
+# Prevent log flood
+# Define how many log messages can be written to log file in one log_interval
+logs_per_interval: 200
+
+# Define how long is one logging interval (in seconds)
+log_interval: 3600
+
+# ----------------------------------------------------------------------
+# Enable / Disable python.d.plugin modules
+#
+# The default for all modules is enabled (yes).
+# Setting any of these to no will disable it.
+
+# apache: yes
+# apache_cache: yes
+# cpufreq: yes
+# dovecot: yes
+example: no
+# exim: yes
+# hddtemp: yes
+# ipfs: yes
+# memcached: yes
+# mysql: yes
+# nginx: yes
+# nginx_log: yes
+# phpfpm: yes
+# postfix: yes
+# redis: yes
+# sensors: yes
+# squid: yes
+# tomcat: yes
diff --git a/conf.d/python.d/apache.conf b/conf.d/python.d/apache.conf
new file mode 100644
index 000000000..5b151ef70
--- /dev/null
+++ b/conf.d/python.d/apache.conf
@@ -0,0 +1,80 @@
+# netdata python.d.plugin configuration for apache
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, apache also supports the following:
+#
+# url: 'URL' # the URL to fetch apache's mod_status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/server-status?auto'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/server-status?auto'
+
+localipv6:
+ name : 'local'
+ url : 'http://::1/server-status?auto'
diff --git a/conf.d/python.d/apache_cache.conf b/conf.d/python.d/apache_cache.conf
new file mode 100644
index 000000000..98eecd0e8
--- /dev/null
+++ b/conf.d/python.d/apache_cache.conf
@@ -0,0 +1,76 @@
+# netdata python.d.plugin configuration for apache cache
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, apache_cache also supports the following:
+#
+# path: 'PATH' # the path to apache's cache.log
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+apache:
+ name: 'local'
+ path: '/var/log/apache/cache.log'
+
+apache2:
+ name: 'local'
+ path: '/var/log/apache2/cache.log'
+
+httpd:
+ name: 'local'
+ path: '/var/log/httpd/cache.log'
diff --git a/conf.d/python.d/cpufreq.conf b/conf.d/python.d/cpufreq.conf
new file mode 100644
index 000000000..10c96917f
--- /dev/null
+++ b/conf.d/python.d/cpufreq.conf
@@ -0,0 +1,37 @@
+# netdata python.d.plugin configuration for cpufreq
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# The directory to search for the file scaling_cur_freq
+sys_dir: "/sys/devices"
diff --git a/conf.d/python.d/dovecot.conf b/conf.d/python.d/dovecot.conf
new file mode 100644
index 000000000..917c5272e
--- /dev/null
+++ b/conf.d/python.d/dovecot.conf
@@ -0,0 +1,89 @@
+# netdata python.d.plugin configuration for dovecot
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, dovecot also supports the following:
+#
+# socket: 'path/to/dovecot/stats'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 24242
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 24242
+
+localsocket:
+ name : 'local'
+ socket : '/var/run/dovecot/stats'
+
diff --git a/conf.d/python.d/example.conf b/conf.d/python.d/example.conf
new file mode 100644
index 000000000..31f9a49a0
--- /dev/null
+++ b/conf.d/python.d/example.conf
@@ -0,0 +1,63 @@
+# netdata python.d.plugin configuration for example
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, example also supports the following:
+#
+# - none
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
diff --git a/conf.d/python.d/exim.conf b/conf.d/python.d/exim.conf
new file mode 100644
index 000000000..6aca13c34
--- /dev/null
+++ b/conf.d/python.d/exim.conf
@@ -0,0 +1,86 @@
+# netdata python.d.plugin configuration for postfix
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# exim is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, postfix also supports the following:
+#
+# command: 'exim -bpc' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# REQUIRED exim CONFIGURATION
+#
+# netdata will query exim as user netdata.
+# By default exim will refuse to respond.
+#
+# To allow querying exim as non-admin user, please set the following
+# to your exim configuration:
+#
+# queue_list_requires_admin = false
+#
+# Your exim configuration should be in
+#
+# /etc/exim/exim4.conf
+# or
+# /etc/exim4/conf.d/main/000_local_options
+#
+# Please consult your distribution information to find the exact file.
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'exim -bpc'
diff --git a/conf.d/python.d/hddtemp.conf b/conf.d/python.d/hddtemp.conf
new file mode 100644
index 000000000..0c78449b4
--- /dev/null
+++ b/conf.d/python.d/hddtemp.conf
@@ -0,0 +1,90 @@
+# netdata python.d.plugin configuration for hddtemp
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, hddtemp also supports the following:
+#
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+
+# By default this module will try to autodetect disks
+# (autodetection works only for disk which names start with "sd").
+# However this can be overridden by setting variable `disks` to
+# array of desired disks. Example for two disks:
+#
+# devices:
+# - sda
+# - sdb
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ host: 'localhost'
+ port: 7634
+
+localipv4:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 7634
+
+localipv6:
+ name: 'local'
+ host: '::1'
+ port: 7634
diff --git a/conf.d/python.d/ipfs.conf b/conf.d/python.d/ipfs.conf
new file mode 100644
index 000000000..e039026cc
--- /dev/null
+++ b/conf.d/python.d/ipfs.conf
@@ -0,0 +1,67 @@
+# netdata python.d.plugin configuration for ipfs
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, ipfs also supports the following:
+#
+# url: 'URL' # URL to the IPFS API
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:5001'
diff --git a/conf.d/python.d/memcached.conf b/conf.d/python.d/memcached.conf
new file mode 100644
index 000000000..f1723dc81
--- /dev/null
+++ b/conf.d/python.d/memcached.conf
@@ -0,0 +1,85 @@
+# netdata python.d.plugin configuration for memcached
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, memcached also supports the following:
+#
+# socket: 'path/to/memcached.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 11211
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 11211
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 11211
+
diff --git a/conf.d/python.d/mysql.conf b/conf.d/python.d/mysql.conf
new file mode 100644
index 000000000..d247b89a0
--- /dev/null
+++ b/conf.d/python.d/mysql.conf
@@ -0,0 +1,175 @@
+# netdata python.d.plugin configuration for mysql
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, mysql also supports the following:
+#
+# socket: 'path/to/mysql.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# in all cases, the following can also be set:
+#
+# user: 'username' # the mysql username to use
+# pass: 'password' # the mysql password to use
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+mycnf1:
+ name : 'local'
+ 'my.cnf' : '/etc/my.cnf'
+
+mycnf2:
+ name : 'local'
+ 'my.cnf' : '/etc/mysql/my.cnf'
+
+socket1:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/var/run/mysqld/mysqld.sock'
+
+socket2:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/var/lib/mysql/mysql.sock'
+
+socket3:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/tmp/mysql.sock'
+
+tcp:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : 'localhost'
+ port : '3306'
+
+tcpipv4:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : '127.0.0.1'
+ port : '3306'
+
+tcpipv6:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : '::1'
+ port : '3306'
+
+
+# Now we try the same as above with user: root
+# A few systems configure mysql to accept passwordless
+# root access.
+
+mycnf1_root:
+ name : 'local'
+ user : 'root'
+ 'my.cnf' : '/etc/my.cnf'
+
+mycnf2_root:
+ name : 'local'
+ user : 'root'
+ 'my.cnf' : '/etc/mysql/my.cnf'
+
+socket1_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/var/run/mysqld/mysqld.sock'
+
+socket2_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/var/lib/mysql/mysql.sock'
+
+socket3_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/tmp/mysql.sock'
+
+tcp_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : 'localhost'
+ port : '3306'
+
+tcpipv4_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : '127.0.0.1'
+ port : '3306'
+
+tcpipv6_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : '::1'
+ port : '3306'
+
diff --git a/conf.d/python.d/nginx.conf b/conf.d/python.d/nginx.conf
new file mode 100644
index 000000000..1a27d67c5
--- /dev/null
+++ b/conf.d/python.d/nginx.conf
@@ -0,0 +1,82 @@
+# netdata python.d.plugin configuration for nginx
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, nginx also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/stub_status'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/stub_status'
+
+localipv6:
+ name : 'local'
+ url : 'http://::1/stub_status'
+
diff --git a/conf.d/python.d/nginx_log.conf b/conf.d/python.d/nginx_log.conf
new file mode 100644
index 000000000..6a53c5204
--- /dev/null
+++ b/conf.d/python.d/nginx_log.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for nginx log
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, nginx_log also supports the following:
+#
+# path: 'PATH' # the path to nginx's access.log
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+nginx_log:
+ name: 'local'
+ path: '/var/log/nginx/access.log'
+
+nginx_log2:
+ name: 'local'
+ path: '/var/log/nginx/nginx-access.log'
diff --git a/conf.d/python.d/phpfpm.conf b/conf.d/python.d/phpfpm.conf
new file mode 100644
index 000000000..06d2367ae
--- /dev/null
+++ b/conf.d/python.d/phpfpm.conf
@@ -0,0 +1,82 @@
+# netdata python.d.plugin configuration for PHP-FPM
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, PHP-FPM also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : "http://localhost/status"
+
+localipv4:
+ name : 'local'
+ url : "http://127.0.0.1/status"
+
+localipv6:
+ name : 'local'
+ url : "http://::1/status"
+
diff --git a/conf.d/python.d/postfix.conf b/conf.d/python.d/postfix.conf
new file mode 100644
index 000000000..ca9d8fada
--- /dev/null
+++ b/conf.d/python.d/postfix.conf
@@ -0,0 +1,67 @@
+# netdata python.d.plugin configuration for postfix
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# postfix is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, postfix also supports the following:
+#
+# command: 'postqueue -p' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'postqueue -p'
diff --git a/conf.d/python.d/redis.conf b/conf.d/python.d/redis.conf
new file mode 100644
index 000000000..9935bff77
--- /dev/null
+++ b/conf.d/python.d/redis.conf
@@ -0,0 +1,97 @@
+# netdata python.d.plugin configuration for redis
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, redis also supports the following:
+#
+# socket: 'path/to/mysql.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+socket1:
+ name : 'local'
+ socket : '/tmp/redis.sock'
+
+socket2:
+ name : 'local'
+ socket : '/var/run/redis/redis.sock'
+
+socket3:
+ name : 'local'
+ socket : '/var/lib/redis/redis.sock'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 6379
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 6379
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 6379
+
diff --git a/conf.d/python.d/sensors.conf b/conf.d/python.d/sensors.conf
new file mode 100644
index 000000000..7d895c348
--- /dev/null
+++ b/conf.d/python.d/sensors.conf
@@ -0,0 +1,54 @@
+# netdata python.d.plugin configuration for sensors
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# Limit the number of sensors types.
+# Comment the ones you want to disable.
+# Also, re-arranging this list controls the order of the charts at the
+# netdata dashboard.
+
+types:
+ - temperature
+ - fan
+ - voltage
+ - current
+ - power
+ - energy
+ - humidity
+
+# ----------------------------------------------------------------------
+# Limit the number of sensors chips.
+# Uncomment the first line (chips:) and add chip names below it.
+# The chip names that start with like that will be matched.
+# You can find the chip names using the sensors command.
+
+#chips:
+# - i8k
+# - coretemp
+#
+# chip names can be found using the sensors shell command
+# the prefix is matched (anything that starts like that)
diff --git a/conf.d/python.d/squid.conf b/conf.d/python.d/squid.conf
new file mode 100644
index 000000000..27800bde7
--- /dev/null
+++ b/conf.d/python.d/squid.conf
@@ -0,0 +1,162 @@
+# netdata python.d.plugin configuration for squid
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, squid also supports the following:
+#
+# host : 'IP or HOSTNAME' # the host to connect to
+# port : PORT # the port to connect to
+# request: 'URL' # the URL to request from squid
+#
+
+# ----------------------------------------------------------------------
+# SQUID CONFIGURATION
+#
+# See:
+# http://wiki.squid-cache.org/Features/CacheManager
+#
+# In short, add to your squid configuration these:
+#
+# http_access allow localhost manager
+# http_access deny manager
+#
+# To remotely monitor a squid:
+#
+# acl managerAdmin src 192.0.2.1
+# http_access allow localhost manager
+# http_access allow managerAdmin manager
+# http_access deny manager
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+tcp3128old:
+ name : 'local'
+ host : 'localhost'
+ port : 3128
+ request : 'cache_object://localhost:3128/counters'
+
+tcp8080old:
+ name : 'local'
+ host : 'localhost'
+ port : 8080
+ request : 'cache_object://localhost:3128/counters'
+
+tcp3128new:
+ name : 'local'
+ host : 'localhost'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080new:
+ name : 'local'
+ host : 'localhost'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
+# IPv4
+
+tcp3128oldipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 3128
+ request : 'cache_object://127.0.0.1:3128/counters'
+
+tcp8080oldipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 8080
+ request : 'cache_object://127.0.0.1:3128/counters'
+
+tcp3128newipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080newipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
+# IPv6
+
+tcp3128oldipv6:
+ name : 'local'
+ host : '::1'
+ port : 3128
+ request : 'cache_object://[::1]:3128/counters'
+
+tcp8080oldipv6:
+ name : 'local'
+ host : '::1'
+ port : 8080
+ request : 'cache_object://[::1]:3128/counters'
+
+tcp3128newipv6:
+ name : 'local'
+ host : '::1'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080newipv6:
+ name : 'local'
+ host : '::1'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
diff --git a/conf.d/python.d/tomcat.conf b/conf.d/python.d/tomcat.conf
new file mode 100644
index 000000000..aef9631b9
--- /dev/null
+++ b/conf.d/python.d/tomcat.conf
@@ -0,0 +1,81 @@
+# netdata python.d.plugin configuration for tomcat
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, tomcat also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:8080/manager/status?XML=true'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1:8080/manager/status?XML=true'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]:8080/manager/status?XML=true'