summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin/haproxy
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 11:19:16 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-07-24 09:53:24 +0000
commitb5f8ee61a7f7e9bd291dd26b0585d03eb686c941 (patch)
treed4d31289c39fc00da064a825df13a0b98ce95b10 /collectors/python.d.plugin/haproxy
parentAdding upstream version 1.44.3. (diff)
downloadnetdata-b5f8ee61a7f7e9bd291dd26b0585d03eb686c941.tar.xz
netdata-b5f8ee61a7f7e9bd291dd26b0585d03eb686c941.zip
Adding upstream version 1.46.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors/python.d.plugin/haproxy')
-rw-r--r--collectors/python.d.plugin/haproxy/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/haproxy/README.md90
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py368
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.conf83
-rw-r--r--collectors/python.d.plugin/haproxy/metadata.yaml322
5 files changed, 0 insertions, 876 deletions
diff --git a/collectors/python.d.plugin/haproxy/Makefile.inc b/collectors/python.d.plugin/haproxy/Makefile.inc
deleted file mode 100644
index ad24deaa0..000000000
--- a/collectors/python.d.plugin/haproxy/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += haproxy/haproxy.chart.py
-dist_pythonconfig_DATA += haproxy/haproxy.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += haproxy/README.md haproxy/Makefile.inc
-
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
deleted file mode 100644
index 2fa203f60..000000000
--- a/collectors/python.d.plugin/haproxy/README.md
+++ /dev/null
@@ -1,90 +0,0 @@
-<!--
-title: "HAProxy monitoring with Netdata"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/haproxy/README.md"
-sidebar_label: "haproxy-python.d.plugin"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/Webapps"
--->
-
-# HAProxy collector
-
-Monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
-And health metrics such as backend servers status (server check should be used).
-
-Plugin can obtain data from URL or Unix socket.
-
-Requirement:
-
-- Socket must be readable and writable by the `netdata` user.
-- URL must have `stats uri <path>` present in the haproxy config, otherwise you will get HTTP 503 in the haproxy logs.
-
-It produces:
-
-1. **Frontend** family charts
-
- - Kilobytes in/s
- - Kilobytes out/s
- - Sessions current
- - Sessions in queue current
-
-2. **Backend** family charts
-
- - Kilobytes in/s
- - Kilobytes out/s
- - Sessions current
- - Sessions in queue current
-
-3. **Health** chart
-
- - number of failed servers for every backend (in DOWN state)
-
-## Configuration
-
-Edit the `python.d/haproxy.conf` configuration file using `edit-config` from the Netdata [config
-directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/haproxy.conf
-```
-
-Sample:
-
-```yaml
-via_url:
- user: 'username' # ONLY IF stats auth is used
- pass: 'password' # # ONLY IF stats auth is used
- url: 'http://ip.address:port/url;csv;norefresh'
-```
-
-OR
-
-```yaml
-via_socket:
- socket: 'path/to/haproxy/sock'
-```
-
-If no configuration is given, module will fail to run.
-
-
-### Troubleshooting
-
-To troubleshoot issues with the `haproxy` module, run the `python.d.plugin` with the debug option enabled. The
-output will give you the output of the data collection job or error messages on why the collector isn't working.
-
-First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's
-not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the
-plugin's directory, switch to the `netdata` user.
-
-```bash
-cd /usr/libexec/netdata/plugins.d/
-sudo su -s /bin/bash netdata
-```
-
-Now you can manually run the `haproxy` module in debug mode:
-
-```bash
-./python.d.plugin haproxy debug trace
-```
-
diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
deleted file mode 100644
index f412febb7..000000000
--- a/collectors/python.d.plugin/haproxy/haproxy.chart.py
+++ /dev/null
@@ -1,368 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: haproxy netdata python.d module
-# Author: ilyam8, ktarasz
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from collections import defaultdict
-from re import compile as re_compile
-
-try:
- from urlparse import urlparse
-except ImportError:
- from urllib.parse import urlparse
-
-from bases.FrameworkServices.SocketService import SocketService
-from bases.FrameworkServices.UrlService import UrlService
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = [
- 'fbin',
- 'fbout',
- 'fscur',
- 'fqcur',
- 'fhrsp_1xx',
- 'fhrsp_2xx',
- 'fhrsp_3xx',
- 'fhrsp_4xx',
- 'fhrsp_5xx',
- 'fhrsp_other',
- 'fhrsp_total',
- 'bbin',
- 'bbout',
- 'bscur',
- 'bqcur',
- 'bhrsp_1xx',
- 'bhrsp_2xx',
- 'bhrsp_3xx',
- 'bhrsp_4xx',
- 'bhrsp_5xx',
- 'bhrsp_other',
- 'bhrsp_total',
- 'bqtime',
- 'bttime',
- 'brtime',
- 'bctime',
- 'health_sup',
- 'health_sdown',
- 'health_smaint',
- 'health_bdown',
- 'health_idle'
-]
-
-CHARTS = {
- 'fbin': {
- 'options': [None, 'Kilobytes In', 'KiB/s', 'frontend', 'haproxy_f.bin', 'line'],
- 'lines': []
- },
- 'fbout': {
- 'options': [None, 'Kilobytes Out', 'KiB/s', 'frontend', 'haproxy_f.bout', 'line'],
- 'lines': []
- },
- 'fscur': {
- 'options': [None, 'Sessions Active', 'sessions', 'frontend', 'haproxy_f.scur', 'line'],
- 'lines': []
- },
- 'fqcur': {
- 'options': [None, 'Session In Queue', 'sessions', 'frontend', 'haproxy_f.qcur', 'line'],
- 'lines': []
- },
- 'fhrsp_1xx': {
- 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_1xx', 'line'],
- 'lines': []
- },
- 'fhrsp_2xx': {
- 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_2xx', 'line'],
- 'lines': []
- },
- 'fhrsp_3xx': {
- 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_3xx', 'line'],
- 'lines': []
- },
- 'fhrsp_4xx': {
- 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_4xx', 'line'],
- 'lines': []
- },
- 'fhrsp_5xx': {
- 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_5xx', 'line'],
- 'lines': []
- },
- 'fhrsp_other': {
- 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'frontend',
- 'haproxy_f.hrsp_other', 'line'],
- 'lines': []
- },
- 'fhrsp_total': {
- 'options': [None, 'HTTP responses', 'responses', 'frontend', 'haproxy_f.hrsp_total', 'line'],
- 'lines': []
- },
- 'bbin': {
- 'options': [None, 'Kilobytes In', 'KiB/s', 'backend', 'haproxy_b.bin', 'line'],
- 'lines': []
- },
- 'bbout': {
- 'options': [None, 'Kilobytes Out', 'KiB/s', 'backend', 'haproxy_b.bout', 'line'],
- 'lines': []
- },
- 'bscur': {
- 'options': [None, 'Sessions Active', 'sessions', 'backend', 'haproxy_b.scur', 'line'],
- 'lines': []
- },
- 'bqcur': {
- 'options': [None, 'Sessions In Queue', 'sessions', 'backend', 'haproxy_b.qcur', 'line'],
- 'lines': []
- },
- 'bhrsp_1xx': {
- 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_1xx', 'line'],
- 'lines': []
- },
- 'bhrsp_2xx': {
- 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_2xx', 'line'],
- 'lines': []
- },
- 'bhrsp_3xx': {
- 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_3xx', 'line'],
- 'lines': []
- },
- 'bhrsp_4xx': {
- 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_4xx', 'line'],
- 'lines': []
- },
- 'bhrsp_5xx': {
- 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_5xx', 'line'],
- 'lines': []
- },
- 'bhrsp_other': {
- 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'backend',
- 'haproxy_b.hrsp_other', 'line'],
- 'lines': []
- },
- 'bhrsp_total': {
- 'options': [None, 'HTTP responses (total)', 'responses/s', 'backend', 'haproxy_b.hrsp_total', 'line'],
- 'lines': []
- },
- 'bqtime': {
- 'options': [None, 'The average queue time over the 1024 last requests', 'milliseconds', 'backend',
- 'haproxy_b.qtime', 'line'],
- 'lines': []
- },
- 'bctime': {
- 'options': [None, 'The average connect time over the 1024 last requests', 'milliseconds', 'backend',
- 'haproxy_b.ctime', 'line'],
- 'lines': []
- },
- 'brtime': {
- 'options': [None, 'The average response time over the 1024 last requests', 'milliseconds', 'backend',
- 'haproxy_b.rtime', 'line'],
- 'lines': []
- },
- 'bttime': {
- 'options': [None, 'The average total session time over the 1024 last requests', 'milliseconds', 'backend',
- 'haproxy_b.ttime', 'line'],
- 'lines': []
- },
- 'health_sdown': {
- 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health', 'haproxy_hs.down', 'line'],
- 'lines': []
- },
- 'health_sup': {
- 'options': [None, 'Backend Servers In UP State', 'health servers', 'health', 'haproxy_hs.up', 'line'],
- 'lines': []
- },
- 'health_smaint': {
- 'options': [None, 'Backend Servers In MAINT State', 'maintenance servers', 'health', 'haproxy_hs.maint', 'line'],
- 'lines': []
- },
- 'health_bdown': {
- 'options': [None, 'Is Backend Failed?', 'boolean', 'health', 'haproxy_hb.down', 'line'],
- 'lines': []
- },
- 'health_idle': {
- 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percentage', 'health', 'haproxy.idle', 'line'],
- 'lines': [
- ['idle', None, 'absolute']
- ]
- }
-}
-
-METRICS = {
- 'bin': {'algorithm': 'incremental', 'divisor': 1024},
- 'bout': {'algorithm': 'incremental', 'divisor': 1024},
- 'scur': {'algorithm': 'absolute', 'divisor': 1},
- 'qcur': {'algorithm': 'absolute', 'divisor': 1},
- 'hrsp_1xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_2xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_3xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_4xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_5xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_other': {'algorithm': 'incremental', 'divisor': 1}
-}
-
-BACKEND_METRICS = {
- 'qtime': {'algorithm': 'absolute', 'divisor': 1},
- 'ctime': {'algorithm': 'absolute', 'divisor': 1},
- 'rtime': {'algorithm': 'absolute', 'divisor': 1},
- 'ttime': {'algorithm': 'absolute', 'divisor': 1}
-}
-
-REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
- socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
-
-
-# TODO: the code is unreadable
-class Service(UrlService, SocketService):
- def __init__(self, configuration=None, name=None):
- if 'socket' in configuration:
- SocketService.__init__(self, configuration=configuration, name=name)
- self.poll = SocketService
- self.options_ = dict(regex=REGEX['socket'],
- stat='show stat\n'.encode(),
- info='show info\n'.encode())
- else:
- UrlService.__init__(self, configuration=configuration, name=name)
- self.poll = UrlService
- self.options_ = dict(regex=REGEX['url'],
- stat=self.url,
- info=url_remove_params(self.url))
- self.order = ORDER
- self.definitions = CHARTS
-
- def check(self):
- if self.poll.check(self):
- self.create_charts()
- self.info('We are using %s.' % self.poll.__name__)
- return True
- return False
-
- def _get_data(self):
- to_netdata = dict()
- self.request, self.url = self.options_['stat'], self.options_['stat']
- stat_data = self._get_stat_data()
- self.request, self.url = self.options_['info'], self.options_['info']
- info_data = self._get_info_data(regex=self.options_['regex'])
-
- to_netdata.update(stat_data)
- to_netdata.update(info_data)
- return to_netdata or None
-
- def _get_stat_data(self):
- """
- :return: dict
- """
- raw_data = self.poll._get_raw_data(self)
-
- if not raw_data:
- return dict()
-
- raw_data = raw_data.splitlines()
- self.data = parse_data_([dict(zip(raw_data[0].split(','), raw_data[_].split(',')))
- for _ in range(1, len(raw_data))])
- if not self.data:
- return dict()
-
- stat_data = dict()
-
- for frontend in self.data['frontend']:
- for metric in METRICS:
- idx = frontend['# pxname'].replace('.', '_')
- stat_data['_'.join(['frontend', metric, idx])] = frontend.get(metric) or 0
-
- for backend in self.data['backend']:
- name, idx = backend['# pxname'], backend['# pxname'].replace('.', '_')
- stat_data['hsup_' + idx] = len([server for server in self.data['servers']
- if server_status(server, name, 'UP')])
- stat_data['hsdown_' + idx] = len([server for server in self.data['servers']
- if server_status(server, name, 'DOWN')])
- stat_data['hsmaint_' + idx] = len([server for server in self.data['servers']
- if server_status(server, name, 'MAINT')])
- stat_data['hbdown_' + idx] = 1 if backend.get('status') == 'DOWN' else 0
- for metric in BACKEND_METRICS:
- stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
- hrsp_total = 0
- for metric in METRICS:
- stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
- if metric.startswith('hrsp_'):
- hrsp_total += int(backend.get(metric) or 0)
- stat_data['_'.join(['backend', 'hrsp_total', idx])] = hrsp_total
- return stat_data
-
- def _get_info_data(self, regex):
- """
- :return: dict
- """
- raw_data = self.poll._get_raw_data(self)
- if not raw_data:
- return dict()
-
- match = regex.search(raw_data)
- return match.groupdict() if match else dict()
-
- @staticmethod
- def _check_raw_data(data):
- """
- Check if all data has been gathered from socket
- :param data: str
- :return: boolean
- """
- return not bool(data)
-
- def create_charts(self):
- for front in self.data['frontend']:
- name, idx = front['# pxname'], front['# pxname'].replace('.', '_')
- for metric in METRICS:
- self.definitions['f' + metric]['lines'].append(['_'.join(['frontend', metric, idx]),
- name, METRICS[metric]['algorithm'], 1,
- METRICS[metric]['divisor']])
- self.definitions['fhrsp_total']['lines'].append(['_'.join(['frontend', 'hrsp_total', idx]),
- name, 'incremental', 1, 1])
- for back in self.data['backend']:
- name, idx = back['# pxname'], back['# pxname'].replace('.', '_')
- for metric in METRICS:
- self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
- name, METRICS[metric]['algorithm'], 1,
- METRICS[metric]['divisor']])
- self.definitions['bhrsp_total']['lines'].append(['_'.join(['backend', 'hrsp_total', idx]),
- name, 'incremental', 1, 1])
- for metric in BACKEND_METRICS:
- self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
- name, BACKEND_METRICS[metric]['algorithm'], 1,
- BACKEND_METRICS[metric]['divisor']])
- self.definitions['health_sup']['lines'].append(['hsup_' + idx, name, 'absolute'])
- self.definitions['health_sdown']['lines'].append(['hsdown_' + idx, name, 'absolute'])
- self.definitions['health_smaint']['lines'].append(['hsmaint_' + idx, name, 'absolute'])
- self.definitions['health_bdown']['lines'].append(['hbdown_' + idx, name, 'absolute'])
-
-
-def parse_data_(data):
- def is_backend(backend):
- return backend.get('svname') == 'BACKEND' and backend.get('# pxname') != 'stats'
-
- def is_frontend(frontend):
- return frontend.get('svname') == 'FRONTEND' and frontend.get('# pxname') != 'stats'
-
- def is_server(server):
- return not server.get('svname', '').startswith(('FRONTEND', 'BACKEND'))
-
- if not data:
- return None
-
- result = defaultdict(list)
- for elem in data:
- if is_backend(elem):
- result['backend'].append(elem)
- continue
- elif is_frontend(elem):
- result['frontend'].append(elem)
- continue
- elif is_server(elem):
- result['servers'].append(elem)
-
- return result or None
-
-
-def server_status(server, backend_name, status='DOWN'):
- return server.get('# pxname') == backend_name and server.get('status').partition(' ')[0] == status
-
-
-def url_remove_params(url):
- parsed = urlparse(url or str())
- return '{scheme}://{netloc}{path}'.format(scheme=parsed.scheme, netloc=parsed.netloc, path=parsed.path)
diff --git a/collectors/python.d.plugin/haproxy/haproxy.conf b/collectors/python.d.plugin/haproxy/haproxy.conf
deleted file mode 100644
index 10a0df3c3..000000000
--- a/collectors/python.d.plugin/haproxy/haproxy.conf
+++ /dev/null
@@ -1,83 +0,0 @@
-# netdata python.d.plugin configuration for haproxy
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, haproxy also supports the following:
-#
-# IMPORTANT: socket MUST BE readable AND writable by netdata user
-#
-# socket: 'path/to/haproxy/sock'
-#
-# OR
-# url: 'http://<ip.address>:<port>/<url>;csv;norefresh'
-# [user: USERNAME] only if stats auth is used
-# [pass: PASSWORD] only if stats auth is used
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-#via_url:
-# user : 'admin'
-# pass : 'password'
-# url : 'http://127.0.0.1:7000/haproxy_stats;csv;norefresh'
-
-#via_socket:
-# socket: '/var/run/haproxy/admin.sock'
diff --git a/collectors/python.d.plugin/haproxy/metadata.yaml b/collectors/python.d.plugin/haproxy/metadata.yaml
deleted file mode 100644
index 82ab37d26..000000000
--- a/collectors/python.d.plugin/haproxy/metadata.yaml
+++ /dev/null
@@ -1,322 +0,0 @@
-# This collector will not appear in documentation, as the go version is preferred,
-# https://github.com/netdata/go.d.plugin/blob/master/modules/haproxy/README.md
-#
-#
-# meta:
-# plugin_name: python.d.plugin
-# module_name: haproxy
-# monitored_instance:
-# name: HAProxy
-# link: 'https://www.haproxy.org/'
-# categories:
-# - data-collection.web-servers-and-web-proxies
-# icon_filename: 'haproxy.png'
-# related_resources:
-# integrations:
-# list: []
-# info_provided_to_referring_integrations:
-# description: ''
-# keywords:
-# - haproxy
-# - tcp
-# - balancer
-# most_popular: false
-# overview:
-# data_collection:
-# metrics_description: 'This collector monitors HAProxy metrics about frontend servers, backend servers, responses and more.'
-# method_description: 'It connects to the HAProxy instance via URL or UNIX socket.'
-# supported_platforms:
-# include: []
-# exclude: []
-# multi_instance: true
-# additional_permissions:
-# description: ''
-# default_behavior:
-# auto_detection:
-# description: ''
-# limits:
-# description: ''
-# performance_impact:
-# description: ''
-# setup:
-# prerequisites:
-# list:
-# - title: 'HAProxy setup for socket'
-# description: 'Socket must be readable and writable by the netdata user.'
-# - title: 'HAProxy setup for URL'
-# description: 'URL must have `stats uri <path>` present in the haproxy config, otherwise you will get HTTP 503 in the haproxy logs.'
-# configuration:
-# file:
-# name: python.d/haproxy.conf
-# options:
-# description: |
-# There are 2 sections:
-
-# * Global variables
-# * One or more JOBS that can define multiple different instances to monitor.
-
-# The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-# Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-# Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-# folding:
-# title: "Config options"
-# enabled: true
-# list:
-# - name: update_every
-# description: Sets the default data collection frequency.
-# default_value: 5
-# required: false
-# - name: priority
-# description: Controls the order of charts at the netdata dashboard.
-# default_value: 60000
-# required: false
-# - name: autodetection_retry
-# description: Sets the job re-check interval in seconds.
-# default_value: 0
-# required: false
-# - name: penalty
-# description: Indicates whether to apply penalty to update_every in case of failures.
-# default_value: yes
-# required: false
-# - name: name
-# description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
-# default_value: ''
-# required: false
-# - name: user
-# description: Username if stats auth is used.
-# default_value: ''
-# required: false
-# - name: pass
-# description: Password if stats auth is used.
-# default_value: ''
-# required: false
-# - name: url
-# description: URL to the haproxy_stats endpoint. Also make sure the parameters `csv` and `norefresh` are provided.
-# default_value: ''
-# required: false
-# - name: socket
-# description: Unix socket path to the haproxy sock file.
-# default_value: ''
-# required: false
-# examples:
-# folding:
-# enabled: true
-# title: "Config"
-# list:
-# - name: URL method
-# description: Use a URL to specify the endpoint to check for haproxy statistics.
-# config: |
-# via_url:
-# user: 'username' # ONLY IF stats auth is used
-# pass: 'password' # # ONLY IF stats auth is used
-# url: 'http://ip.address:port/url;csv;norefresh'
-# - name: Local socket
-# description: Use a local socket to check for haproxy statistics.
-# config: |
-# via_socket:
-# socket: 'path/to/haproxy/sock'
-# troubleshooting:
-# problems:
-# list: []
-# alerts:
-# - name: haproxy_backend_server_status
-# link: https://github.com/netdata/netdata/blob/master/health/health.d/haproxy.conf
-# metric: haproxy_hs.down
-# info: average number of failed haproxy backend servers over the last 10 seconds
-# - name: haproxy_backend_status
-# link: https://github.com/netdata/netdata/blob/master/health/health.d/haproxy.conf
-# metric: haproxy_hb.down
-# info: average number of failed haproxy backends over the last 10 seconds
-# metrics:
-# folding:
-# title: Metrics
-# enabled: false
-# description: ""
-# availability: []
-# scopes:
-# - name: global
-# description: 'These metrics refer to the entire monitored application.'
-# labels: []
-# metrics:
-# - name: haproxy_f.bin
-# description: Kilobytes In
-# unit: "KiB/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.bout
-# description: Kilobytes Out
-# unit: "KiB/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.scur
-# description: Sessions Active
-# unit: "sessions"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.qcur
-# description: Session In Queue
-# unit: "sessions"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_1xx
-# description: HTTP responses with 1xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_2xx
-# description: HTTP responses with 2xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_3xx
-# description: HTTP responses with 3xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_4xx
-# description: HTTP responses with 4xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_5xx
-# description: HTTP responses with 5xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_other
-# description: HTTP responses with other codes (protocol error)
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_total
-# description: HTTP responses
-# unit: "responses"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_b.bin
-# description: Kilobytes In
-# unit: "KiB/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.bout
-# description: Kilobytes Out
-# unit: "KiB/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.scur
-# description: Sessions Active
-# unit: "sessions"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.qcur
-# description: Sessions In Queue
-# unit: "sessions"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_1xx
-# description: HTTP responses with 1xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_2xx
-# description: HTTP responses with 2xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_3xx
-# description: HTTP responses with 3xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_4xx
-# description: HTTP responses with 4xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_5xx
-# description: HTTP responses with 5xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_other
-# description: HTTP responses with other codes (protocol error)
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_total
-# description: HTTP responses (total)
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.qtime
-# description: The average queue time over the 1024 last requests
-# unit: "milliseconds"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.ctime
-# description: The average connect time over the 1024 last requests
-# unit: "milliseconds"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.rtime
-# description: The average response time over the 1024 last requests
-# unit: "milliseconds"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.ttime
-# description: The average total session time over the 1024 last requests
-# unit: "milliseconds"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_hs.down
-# description: Backend Servers In DOWN State
-# unit: "failed servers"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_hs.up
-# description: Backend Servers In UP State
-# unit: "health servers"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_hb.down
-# description: Is Backend Failed?
-# unit: "boolean"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy.idle
-# description: The Ratio Of Polling Time Vs Total Time
-# unit: "percentage"
-# chart_type: line
-# dimensions:
-# - name: idle