From 1e6c93250172946eeb38e94a92a1fd12c9d3011e Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 7 Nov 2018 13:22:44 +0100 Subject: Merging upstream version 1.11.0+dfsg. Signed-off-by: Daniel Baumann --- conf.d/python.d/haproxy.conf | 85 -------------------------------------------- 1 file changed, 85 deletions(-) delete mode 100644 conf.d/python.d/haproxy.conf (limited to 'conf.d/python.d/haproxy.conf') diff --git a/conf.d/python.d/haproxy.conf b/conf.d/python.d/haproxy.conf deleted file mode 100644 index a40dd76a5..000000000 --- a/conf.d/python.d/haproxy.conf +++ /dev/null @@ -1,85 +0,0 @@ -# netdata python.d.plugin configuration for haproxy -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 1 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 1 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, haproxy also supports the following: -# -# IMPORTANT: socket MUST BE readable AND writable by netdata user -# -# socket: 'path/to/haproxy/sock' -# -# OR -# url: 'http://:/;csv;norefresh' -# [user: USERNAME] only if stats auth is used -# [pass: PASSWORD] only if stats auth is used - -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) - -#via_url: -# user : 'admin' -# pass : 'password' -# url : 'http://127.0.0.1:7000/haproxy_stats;csv;norefresh' - -#via_socket: -# socket: '/var/run/haproxy/admin.sock' -- cgit v1.2.3