summaryrefslogtreecommitdiffstats
path: root/conf.d/python.d
diff options
context:
space:
mode:
authorFederico Ceratto <federico.ceratto@gmail.com>2017-12-19 23:39:21 +0000
committerFederico Ceratto <federico.ceratto@gmail.com>2017-12-19 23:39:21 +0000
commit61aedf201c2c4bf0e5aa4db32e74f4d860b88593 (patch)
treebcf4f9a0cd8bc2daf38b2ff9f29bfcc1e5ed8968 /conf.d/python.d
parentNew upstream version 1.8.0+dfsg (diff)
downloadnetdata-61aedf201c2c4bf0e5aa4db32e74f4d860b88593.tar.xz
netdata-61aedf201c2c4bf0e5aa4db32e74f4d860b88593.zip
New upstream version 1.9.0+dfsgupstream/1.9.0+dfsg
Diffstat (limited to '')
-rw-r--r--conf.d/python.d.conf14
-rw-r--r--conf.d/python.d/apache.conf21
-rw-r--r--conf.d/python.d/beanstalk.conf80
-rw-r--r--conf.d/python.d/bind_rndc.conf29
-rw-r--r--conf.d/python.d/chrony.conf21
-rw-r--r--conf.d/python.d/couchdb.conf91
-rw-r--r--conf.d/python.d/cpufreq.conf8
-rw-r--r--conf.d/python.d/dns_query_time.conf39
-rw-r--r--conf.d/python.d/dnsdist.conf85
-rw-r--r--conf.d/python.d/dovecot.conf21
-rw-r--r--conf.d/python.d/elasticsearch.conf21
-rw-r--r--conf.d/python.d/example.conf21
-rw-r--r--conf.d/python.d/exim.conf21
-rw-r--r--conf.d/python.d/fail2ban.conf29
-rw-r--r--conf.d/python.d/freeradius.conf42
-rw-r--r--conf.d/python.d/go_expvar.conf24
-rw-r--r--conf.d/python.d/haproxy.conf21
-rw-r--r--conf.d/python.d/hddtemp.conf24
-rw-r--r--conf.d/python.d/ipfs.conf21
-rw-r--r--conf.d/python.d/isc_dhcpd.conf32
-rw-r--r--conf.d/python.d/mdstat.conf8
-rw-r--r--conf.d/python.d/memcached.conf21
-rw-r--r--conf.d/python.d/mongodb.conf21
-rw-r--r--conf.d/python.d/mysql.conf21
-rw-r--r--conf.d/python.d/nginx.conf24
-rw-r--r--conf.d/python.d/nsd.conf21
-rw-r--r--conf.d/python.d/ovpn_status_log.conf23
-rw-r--r--conf.d/python.d/phpfpm.conf21
-rw-r--r--conf.d/python.d/postfix.conf21
-rw-r--r--conf.d/python.d/postgres.conf23
-rw-r--r--conf.d/python.d/powerdns.conf (renamed from conf.d/python.d/apache_cache.conf)46
-rw-r--r--conf.d/python.d/rabbitmq.conf21
-rw-r--r--conf.d/python.d/redis.conf21
-rw-r--r--conf.d/python.d/retroshare.conf21
-rw-r--r--conf.d/python.d/samba.conf24
-rw-r--r--conf.d/python.d/sensors.conf11
-rw-r--r--conf.d/python.d/smartd_log.conf33
-rw-r--r--conf.d/python.d/squid.conf21
-rw-r--r--conf.d/python.d/tomcat.conf21
-rw-r--r--conf.d/python.d/varnish.conf23
-rw-r--r--conf.d/python.d/web_log.conf21
41 files changed, 797 insertions, 335 deletions
diff --git a/conf.d/python.d.conf b/conf.d/python.d.conf
index 741d49914..2c3d400ca 100644
--- a/conf.d/python.d.conf
+++ b/conf.d/python.d.conf
@@ -9,13 +9,6 @@
# Enable / disable the whole python.d.plugin (all its modules)
enabled: yes
-# Prevent log flood
-# Define how many log messages can be written to log file in one log_interval
-logs_per_interval: 200
-
-# Define how long is one logging interval (in seconds)
-log_interval: 3600
-
# ----------------------------------------------------------------------
# Enable / Disable python.d.plugin modules
#default_run: yes
@@ -29,11 +22,14 @@ log_interval: 3600
# apache_cache has been replaced by web_log
apache_cache: no
# apache: yes
+# beanstalk: yes
# bind_rndc: yes
-# chrony: yes
+chrony: no
+# couchdb: yes
# cpufreq: yes
# cpuidle: yes
# dns_query_time: yes
+# dnsdist: yes
# dovecot: yes
# elasticsearch: yes
@@ -53,6 +49,7 @@ go_expvar: no
# isc_dhcpd: yes
# mdstat: yes
# memcached: yes
+# mongodb: yes
# mysql: yes
# nginx: yes
# nsd: yes
@@ -64,6 +61,7 @@ nginx_log: no
# phpfpm: yes
# postfix: yes
# postgres: yes
+# powerdns: yes
# rabbitmq: yes
# redis: yes
# retroshare: yes
diff --git a/conf.d/python.d/apache.conf b/conf.d/python.d/apache.conf
index 5b151ef70..3bbc3f786 100644
--- a/conf.d/python.d/apache.conf
+++ b/conf.d/python.d/apache.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, apache also supports the following:
#
diff --git a/conf.d/python.d/beanstalk.conf b/conf.d/python.d/beanstalk.conf
new file mode 100644
index 000000000..940801877
--- /dev/null
+++ b/conf.d/python.d/beanstalk.conf
@@ -0,0 +1,80 @@
+# netdata python.d.plugin configuration for beanstalk
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# chart_cleanup sets the default chart cleanup interval in iterations.
+# A chart is marked as obsolete if it has not been updated
+# 'chart_cleanup' iterations in a row.
+# When a plugin sends the obsolete flag, the charts are not deleted
+# from netdata immediately.
+# They will be hidden immediately (not offered to dashboard viewer,
+# streamed upstream and archived to backends) and deleted one hour
+# later (configurable from netdata.conf).
+# chart_cleanup: 10
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+# chart_cleanup: 10 # the JOB's chart cleanup interval in iterations
+#
+# Additionally to the above, apache also supports the following:
+#
+# host: 'host' # Server ip address or hostname. Default: 127.0.0.1
+# port: port # Beanstalkd port. Default:
+#
+# ----------------------------------------------------------------------
diff --git a/conf.d/python.d/bind_rndc.conf b/conf.d/python.d/bind_rndc.conf
index e4f7ac825..71958ff98 100644
--- a/conf.d/python.d/bind_rndc.conf
+++ b/conf.d/python.d/bind_rndc.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, bind_rndc also supports the following:
#
@@ -75,7 +82,7 @@
# chown bind bind/
#
# 4. RELOAD (NOT restart) BIND
-# systemctl reload bind9.serice
+# systemctl reload bind9.service
#
# 5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)
#
@@ -101,9 +108,5 @@
# To test your logrotate conf file run as root:
#
# logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)
-# ------------------------------------------------------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
#
-#local:
-# named_stats_path: '/var/log/bind/named.stats'
+# ----------------------------------------------------------------------
diff --git a/conf.d/python.d/chrony.conf b/conf.d/python.d/chrony.conf
index 46229687b..9ac906b5f 100644
--- a/conf.d/python.d/chrony.conf
+++ b/conf.d/python.d/chrony.conf
@@ -31,7 +31,13 @@ update_every: 5
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@ update_every: 5
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, chrony also supports the following:
#
diff --git a/conf.d/python.d/couchdb.conf b/conf.d/python.d/couchdb.conf
new file mode 100644
index 000000000..5f6e75cff
--- /dev/null
+++ b/conf.d/python.d/couchdb.conf
@@ -0,0 +1,91 @@
+# netdata python.d.plugin configuration for couchdb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# By default, CouchDB only updates its stats every 10 seconds.
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, the couchdb plugin also supports the following:
+#
+# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1
+# port: 'port' # CouchDB port. Default: 15672
+# scheme: 'scheme' # http or https. Default: http
+# node: 'couchdb@127.0.0.1' # CouchDB node name. Same as -name vm.args argument.
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# if db-specific stats are desired, place their names in databases:
+# databases: 'npm-registry animaldb'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: '5984'
+ node: 'couchdb@127.0.0.1'
+ scheme: 'http'
+# user: 'admin'
+# pass: 'password'
diff --git a/conf.d/python.d/cpufreq.conf b/conf.d/python.d/cpufreq.conf
index 10c96917f..0890245d9 100644
--- a/conf.d/python.d/cpufreq.conf
+++ b/conf.d/python.d/cpufreq.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# The directory to search for the file scaling_cur_freq
sys_dir: "/sys/devices"
diff --git a/conf.d/python.d/dns_query_time.conf b/conf.d/python.d/dns_query_time.conf
index f4d4dbf92..d32c6db83 100644
--- a/conf.d/python.d/dns_query_time.conf
+++ b/conf.d/python.d/dns_query_time.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,26 +53,19 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, dns_query_time also supports the following:
#
-# dns_servers: 'dns servers' # list of dns servers to query
-# domains: 'domains' # list of domains
-# aggregate: yes/no # Default: yes. Aggregate all servers in one chart or not
-# response_timeout: 4 # Defalt: 4. Dns query response timeout (query = -100 if response time > response_time)
-#
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-#
+# dns_servers: 'dns servers' # List of dns servers to query
+# domains: 'domains' # List of domains
+# aggregate: yes/no # Aggregate all servers in one chart or not
+# response_timeout: 4 # Dns query response timeout (query = -100 if response time > response_time)
#
-#aggregate: yes
-#dns_servers: '8.8.8.8 8.8.4.4'
-#domains: 'python.org distrowatch.com linuxmint.com linux.com rsyslog.com liblognorm.com archlinux.org cisco.com debian.org kernel.org gns3.com opera.com github.com youtube.com amazon.co.uk kde.org netdata.firehol.org ubuntu.com redhat.com opensuse.org wireshark.org vmware.com microsoft.com elastic.co'
+# ---------------------------------------------------------------------- \ No newline at end of file
diff --git a/conf.d/python.d/dnsdist.conf b/conf.d/python.d/dnsdist.conf
new file mode 100644
index 000000000..aec58b8e1
--- /dev/null
+++ b/conf.d/python.d/dnsdist.conf
@@ -0,0 +1,85 @@
+# netdata python.d.plugin configuration for dnsdist
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+#update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+#retries: 600000
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+#autodetection_retry: 1
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+#
+# Additionally to the above, dnsdist also supports the following:
+#
+# url: 'URL' # the URL to fetch dnsdist performance statistics
+# user: 'username' # username for basic auth
+# pass: 'password' # password for basic auth
+# header:
+# X-API-Key: 'Key' # API key
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+# localhost:
+# name : 'local'
+# url : 'http://127.0.0.1:5053/jsonstat?command=stats'
+# user : 'username'
+# pass : 'password'
+# header:
+# X-API-Key: 'dnsdist-api-key'
+
+
diff --git a/conf.d/python.d/dovecot.conf b/conf.d/python.d/dovecot.conf
index 917c5272e..56c394991 100644
--- a/conf.d/python.d/dovecot.conf
+++ b/conf.d/python.d/dovecot.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, dovecot also supports the following:
#
diff --git a/conf.d/python.d/elasticsearch.conf b/conf.d/python.d/elasticsearch.conf
index 7c35df229..213843bf9 100644
--- a/conf.d/python.d/elasticsearch.conf
+++ b/conf.d/python.d/elasticsearch.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, elasticsearch plugin also supports the following:
#
diff --git a/conf.d/python.d/example.conf b/conf.d/python.d/example.conf
index 31f9a49a0..e7fed9b50 100644
--- a/conf.d/python.d/example.conf
+++ b/conf.d/python.d/example.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, example also supports the following:
#
diff --git a/conf.d/python.d/exim.conf b/conf.d/python.d/exim.conf
index 07d72c5a3..2add7b2cb 100644
--- a/conf.d/python.d/exim.conf
+++ b/conf.d/python.d/exim.conf
@@ -32,7 +32,13 @@ update_every: 10
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -48,12 +54,13 @@ update_every: 10
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, exim also supports the following:
#
diff --git a/conf.d/python.d/fail2ban.conf b/conf.d/python.d/fail2ban.conf
index 76277108b..60ca87231 100644
--- a/conf.d/python.d/fail2ban.conf
+++ b/conf.d/python.d/fail2ban.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,17 +53,18 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, fail2ban also supports the following:
#
-# log_path: 'path to fail2ban.log' # Default: '/var/log/fail2ban.log'
-# conf_path: 'path to jail.local/jail.conf' # Default: '/etc/fail2ban/jail.local'
-# conf_dir: 'path to jail.d/' # Default: '/etc/fail2ban/jail.d/'
-# exclude: 'jails you want to exclude from autodetection' # Default: '[]' empty list
+# log_path: 'path to fail2ban.log' # Default: '/var/log/fail2ban.log'
+# conf_path: 'path to jail.local/jail.conf' # Default: '/etc/fail2ban/jail.local'
+# conf_dir: 'path to jail.d/' # Default: '/etc/fail2ban/jail.d/'
+# exclude: 'jails you want to exclude from autodetection' # Default: none
#------------------------------------------------------------------------------------------------------------------
diff --git a/conf.d/python.d/freeradius.conf b/conf.d/python.d/freeradius.conf
index b2c8abf6b..3336d4c49 100644
--- a/conf.d/python.d/freeradius.conf
+++ b/conf.d/python.d/freeradius.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,21 +53,22 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, freeradius also supports the following:
#
-# host: 'host' # Default: 'localhost'. Server ip address or hostname.
-# port: 'port' # Default: '18121'. Port on which freeradius server listen (type = status).
+# host: 'host' # Default: 'localhost'. Server ip address or hostname.
+# port: 'port' # Default: '18121'. Port on which freeradius server listen (type = status).
# secret: 'secret' # Default: 'adminsecret'.
-# acct: True/False # Defalt: False. Freeradius accounting statistics.
-# proxy_auth: True/False # Default: False. Freeradius proxy authentication statistics.
-# proxy_acct: True/False # Default: False. Freeradius proxy accounting statistics.
+# acct: yes/no # Default: no. Freeradius accounting statistics.
+# proxy_auth: yes/no # Default: no. Freeradius proxy authentication statistics.
+# proxy_acct: yes/no # Default: no. Freeradius proxy accounting statistics.
#
# ------------------------------------------------------------------------------------------------------------------
# Freeradius server configuration:
@@ -73,14 +80,3 @@
# ln -s ../sites-available/status status
# and restart/reload your FREERADIUS server.
# ------------------------------------------------------------------------------------------------------------------
-#
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-local:
- host: 'localhost'
- port: '18121'
- secret: 'adminsecret'
-#acct: False
-#proxy_auth: False
-#proxy_acct: False
diff --git a/conf.d/python.d/go_expvar.conf b/conf.d/python.d/go_expvar.conf
index 5be4890dc..c352b1674 100644
--- a/conf.d/python.d/go_expvar.conf
+++ b/conf.d/python.d/go_expvar.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -42,19 +48,17 @@
# predefined parameters. These are:
#
# job_name:
-# name: my_name # the JOB's name as it will appear at the
-# # dashboard. If name: is not supplied the
-# # job_name: will be used (use _ for spaces)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, this plugin also supports the following:
#
# url: 'http://127.0.0.1/debug/vars' # the URL of the expvar endpoint
-# ss_cert: # ignore HTTPS self-signed certificate
-# proxy: # use HTTP proxy
#
# As the plugin cannot possibly know the port your application listens on, there is no default value. Please include
# the whole path of the endpoint, as the expvar handler can be installed in a non-standard location.
diff --git a/conf.d/python.d/haproxy.conf b/conf.d/python.d/haproxy.conf
index a9e048791..a40dd76a5 100644
--- a/conf.d/python.d/haproxy.conf
+++ b/conf.d/python.d/haproxy.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, haproxy also supports the following:
#
diff --git a/conf.d/python.d/hddtemp.conf b/conf.d/python.d/hddtemp.conf
index f74a09803..9165798a2 100644
--- a/conf.d/python.d/hddtemp.conf
+++ b/conf.d/python.d/hddtemp.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,18 +53,18 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, hddtemp also supports the following:
#
# host: 'IP or HOSTNAME' # the host to connect to
# port: PORT # the port to connect to
-# fahrenheit: True/False # fahrenheit instead of celsius. Default is False
#
# By default this module will try to autodetect disks
@@ -78,13 +84,11 @@
localhost:
name: 'local'
host: 'localhost'
- fahrenheit: False
port: 7634
localipv4:
name: 'local'
host: '127.0.0.1'
- fahrenheit: False
port: 7634
localipv6:
diff --git a/conf.d/python.d/ipfs.conf b/conf.d/python.d/ipfs.conf
index e039026cc..c247c1b7a 100644
--- a/conf.d/python.d/ipfs.conf
+++ b/conf.d/python.d/ipfs.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, ipfs also supports the following:
#
diff --git a/conf.d/python.d/isc_dhcpd.conf b/conf.d/python.d/isc_dhcpd.conf
index 938ca6e72..4a4c4a5e3 100644
--- a/conf.d/python.d/isc_dhcpd.conf
+++ b/conf.d/python.d/isc_dhcpd.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, isc_dhcpd supports the following:
#
@@ -71,13 +78,4 @@
# This is the default, so it will work in most cases.
# 3. Pools MUST BE in CIDR format.
#
-#-----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# This is disabled by default.
-# To enable it, uncomment the following.
-#
-#leases:
-# leases_path : '/var/lib/dhcp/dhcpd.leases'
-# pools:
-# office: '192.168.2.0/24'
-# wifi: '192.168.3.0/24'
+# ----------------------------------------------------------------------
diff --git a/conf.d/python.d/mdstat.conf b/conf.d/python.d/mdstat.conf
index c89d463be..66a2f153c 100644
--- a/conf.d/python.d/mdstat.conf
+++ b/conf.d/python.d/mdstat.conf
@@ -23,4 +23,10 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
diff --git a/conf.d/python.d/memcached.conf b/conf.d/python.d/memcached.conf
index f1723dc81..85c3daf65 100644
--- a/conf.d/python.d/memcached.conf
+++ b/conf.d/python.d/memcached.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, memcached also supports the following:
#
diff --git a/conf.d/python.d/mongodb.conf b/conf.d/python.d/mongodb.conf
index a19b6570b..62faef68d 100644
--- a/conf.d/python.d/mongodb.conf
+++ b/conf.d/python.d/mongodb.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, mongodb also supports the following:
#
diff --git a/conf.d/python.d/mysql.conf b/conf.d/python.d/mysql.conf
index 63d635174..def9f7e96 100644
--- a/conf.d/python.d/mysql.conf
+++ b/conf.d/python.d/mysql.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, mysql also supports the following:
#
diff --git a/conf.d/python.d/nginx.conf b/conf.d/python.d/nginx.conf
index 645925a55..71c521066 100644
--- a/conf.d/python.d/nginx.conf
+++ b/conf.d/python.d/nginx.conf
@@ -43,7 +43,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -59,13 +65,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: my_name # the JOB's name as it will appear at the
-# # dashboard. If name: is not supplied the
-# # job_name: will be used (use _ for spaces)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, this plugin also supports the following:
#
@@ -99,5 +105,5 @@ localipv4:
localipv6:
name : 'local'
- url : 'http://::1/stub_status'
+ url : 'http://[::1]/stub_status'
diff --git a/conf.d/python.d/nsd.conf b/conf.d/python.d/nsd.conf
index 7566fe85e..078e97216 100644
--- a/conf.d/python.d/nsd.conf
+++ b/conf.d/python.d/nsd.conf
@@ -32,7 +32,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -48,12 +54,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, nsd also supports the following:
#
diff --git a/conf.d/python.d/ovpn_status_log.conf b/conf.d/python.d/ovpn_status_log.conf
index 39bc8e9d4..907f014f5 100644
--- a/conf.d/python.d/ovpn_status_log.conf
+++ b/conf.d/python.d/ovpn_status_log.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, openvpn status log also supports the following:
#
@@ -84,3 +91,5 @@
#
#default:
# log_path: '/var/log/openvpn-status.log'
+#
+# ---------------------------------------------------------------------- \ No newline at end of file
diff --git a/conf.d/python.d/phpfpm.conf b/conf.d/python.d/phpfpm.conf
index f5d067cc7..08688e2fa 100644
--- a/conf.d/python.d/phpfpm.conf
+++ b/conf.d/python.d/phpfpm.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, PHP-FPM also supports the following:
#
diff --git a/conf.d/python.d/postfix.conf b/conf.d/python.d/postfix.conf
index ca9d8fada..e0d5a5f83 100644
--- a/conf.d/python.d/postfix.conf
+++ b/conf.d/python.d/postfix.conf
@@ -32,7 +32,13 @@ update_every: 10
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -48,12 +54,13 @@ update_every: 10
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, postfix also supports the following:
#
diff --git a/conf.d/python.d/postgres.conf b/conf.d/python.d/postgres.conf
index 1dbb64f40..3a70a7184 100644
--- a/conf.d/python.d/postgres.conf
+++ b/conf.d/python.d/postgres.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# A single connection is required in order to pull statistics.
#
@@ -70,7 +77,7 @@
# index_stats : false
# database_poll : 'dbase_name1 dbase_name2' # poll only specified databases (all other will be excluded from charts)
#
-# Postfix permissions are configured at its pg_hba.conf file. You can
+# Postgres permissions are configured at its pg_hba.conf file. You can
# "trust" local clients to allow netdata to connect, or you can create
# a postgres user for netdata and add its password below to allow
# netdata connect.
diff --git a/conf.d/python.d/apache_cache.conf b/conf.d/python.d/powerdns.conf
index 98eecd0e8..ca6200df1 100644
--- a/conf.d/python.d/apache_cache.conf
+++ b/conf.d/python.d/powerdns.conf
@@ -1,4 +1,4 @@
-# netdata python.d.plugin configuration for apache cache
+# netdata python.d.plugin configuration for powerdns
#
# This file is in YaML format. Generally the format is:
#
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,30 +53,26 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
-# Additionally to the above, apache_cache also supports the following:
+# Additionally to the above, apache also supports the following:
#
-# path: 'PATH' # the path to apache's cache.log
+# url: 'URL' # the URL to fetch powerdns performance statistics
+# header:
+# X-API-Key: 'Key' # API key
#
-
# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
# only one of them will run (they have the same name)
-apache:
- name: 'local'
- path: '/var/log/apache/cache.log'
-
-apache2:
- name: 'local'
- path: '/var/log/apache2/cache.log'
-
-httpd:
- name: 'local'
- path: '/var/log/httpd/cache.log'
+# localhost:
+# name : 'local'
+# url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
+# header:
+# X-API-Key: 'change_me'
diff --git a/conf.d/python.d/rabbitmq.conf b/conf.d/python.d/rabbitmq.conf
index eccf65df9..3f90da8a2 100644
--- a/conf.d/python.d/rabbitmq.conf
+++ b/conf.d/python.d/rabbitmq.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, rabbitmq plugin also supports the following:
#
diff --git a/conf.d/python.d/redis.conf b/conf.d/python.d/redis.conf
index 983fbfbdb..6363f6da7 100644
--- a/conf.d/python.d/redis.conf
+++ b/conf.d/python.d/redis.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, redis also supports the following:
#
diff --git a/conf.d/python.d/retroshare.conf b/conf.d/python.d/retroshare.conf
index 79614373b..9c92583f7 100644
--- a/conf.d/python.d/retroshare.conf
+++ b/conf.d/python.d/retroshare.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, RetroShare also supports the following:
#
diff --git a/conf.d/python.d/samba.conf b/conf.d/python.d/samba.conf
index 865281cd6..ee513c60f 100644
--- a/conf.d/python.d/samba.conf
+++ b/conf.d/python.d/samba.conf
@@ -31,7 +31,13 @@ update_every: 5
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,10 @@ update_every: 5
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
-#
-#
-
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds \ No newline at end of file
diff --git a/conf.d/python.d/sensors.conf b/conf.d/python.d/sensors.conf
index 2e9a41338..83bbffd7d 100644
--- a/conf.d/python.d/sensors.conf
+++ b/conf.d/python.d/sensors.conf
@@ -23,7 +23,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# Limit the number of sensors types.
@@ -54,5 +60,4 @@ types:
# the prefix is matched (anything that starts like that)
#
#----------------------------------------------------------------------
-# To change celsius to fahrenheit uncomment line below
-#fahrenheit: True
+
diff --git a/conf.d/python.d/smartd_log.conf b/conf.d/python.d/smartd_log.conf
index 8764ffd3e..3fab3f1c0 100644
--- a/conf.d/python.d/smartd_log.conf
+++ b/conf.d/python.d/smartd_log.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,18 +53,20 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, smartd_log also supports the following:
#
-# log_path: '/path/to/smartdlogs' # path to smartd log files. Default is /var/log/smartd
-# raw_values: no # raw or normalized values on charts. Default is normalized.
-# smart_attributes: '1 2 3 4 44' # smart attributes charts. Default are ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200'].
+# log_path: '/path/to/smartdlogs' # path to smartd log files. Default is /var/log/smartd
+# raw_values: yes # enable/disable raw values charts. Enabled by default.
+# smart_attributes: '1 2 3 4 44' # smart attributes charts. Default are ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200'].
+# exclude_disks: 'PATTERN1 PATTERN2' # space separated patterns. If the pattern is in the drive name, the module will not collect data for it.
#
# ----------------------------------------------------------------------
# Additional information
@@ -79,7 +87,4 @@
# Some raw numbers provide valuable insight when properly interpreted. These cases will be discussed later on.
# Raw values are typically listed in hexadecimal numbers. The raw value has different structure for different vendors and is often not meaningful as a decimal number.
#
-#
-# JOB configuration
-#
-log_path: '/var/log/smartd'
+# ----------------------------------------------------------------------
diff --git a/conf.d/python.d/squid.conf b/conf.d/python.d/squid.conf
index 27800bde7..564187f00 100644
--- a/conf.d/python.d/squid.conf
+++ b/conf.d/python.d/squid.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, squid also supports the following:
#
diff --git a/conf.d/python.d/tomcat.conf b/conf.d/python.d/tomcat.conf
index ce89175f6..c63f06cfa 100644
--- a/conf.d/python.d/tomcat.conf
+++ b/conf.d/python.d/tomcat.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,12 +53,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, tomcat also supports the following:
#
diff --git a/conf.d/python.d/varnish.conf b/conf.d/python.d/varnish.conf
index c25f3010f..4b069d514 100644
--- a/conf.d/python.d/varnish.conf
+++ b/conf.d/python.d/varnish.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -47,11 +53,12 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
-#
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
+# ----------------------------------------------------------------------
diff --git a/conf.d/python.d/web_log.conf b/conf.d/python.d/web_log.conf
index cd1f1af00..dd1fff075 100644
--- a/conf.d/python.d/web_log.conf
+++ b/conf.d/python.d/web_log.conf
@@ -31,7 +31,13 @@
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
-# retries: 5
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -50,12 +56,13 @@
# predefined parameters. These are:
#
# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, web_log also supports the following:
#