summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin/haproxy/haproxy.conf
blob: a40dd76a52563c4696fe6439b00eb9a98b6c8af5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
# netdata python.d.plugin configuration for haproxy
#
# This file is in YaML format. Generally the format is:
#
# name: value
#
# There are 2 sections:
#  - global variables
#  - one or more JOBS
#
# JOBS allow you to collect values from multiple sources.
# Each source will have its own set of charts.
#
# JOB parameters have to be indented (using spaces only, example below).

# ----------------------------------------------------------------------
# Global Variables
# These variables set the defaults for all JOBs, however each JOB
# may define its own, overriding the defaults.

# update_every sets the default data collection frequency.
# If unset, the python.d.plugin default is used.
# update_every: 1

# priority controls the order of charts at the netdata dashboard.
# Lower numbers move the charts towards the top of the page.
# If unset, the default for python.d.plugin is used.
# priority: 60000

# retries sets the number of retries to be made in case of failures.
# If unset, the default for python.d.plugin is used.
# Attempts to restore the service are made once every update_every
# and only if the module has collected values in the past.
# retries: 60

# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
# Attempts to start the job are made once every autodetection_retry.
# This feature is disabled by default.
# autodetection_retry: 0

# ----------------------------------------------------------------------
# JOBS (data collection sources)
#
# The default JOBS share the same *name*. JOBS with the same name
# are mutually exclusive. Only one of them will be allowed running at
# any time. This allows autodetection to try several alternatives and
# pick the one that works.
#
# Any number of jobs is supported.
#
# All python.d.plugin JOBS (for all its modules) support a set of
# predefined parameters. These are:
#
# job_name:
#     name: myname            # the JOB's name as it will appear at the
#                             # dashboard (by default is the job_name)
#                             # JOBs sharing a name are mutually exclusive
#     update_every: 1         # the JOB's data collection frequency
#     priority: 60000         # the JOB's order on the dashboard
#     retries: 60             # the JOB's number of restoration attempts
#     autodetection_retry: 0  # the JOB's re-check interval in seconds
#
# Additionally to the above, haproxy also supports the following:
#
# IMPORTANT: socket MUST BE readable AND writable by netdata user
#
#     socket: 'path/to/haproxy/sock'
#
#  OR
#     url: 'http://<ip.address>:<port>/<url>;csv;norefresh'
#     [user: USERNAME] only if stats auth is used
#     [pass: PASSWORD] only if stats auth is used

# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
# only one of them will run (they have the same name)

#via_url:
# user : 'admin'
# pass : 'password'
# url : 'http://127.0.0.1:7000/haproxy_stats;csv;norefresh'

#via_socket:
# socket: '/var/run/haproxy/admin.sock'