summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2022-06-09 04:52:47 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2022-06-09 04:52:57 +0000
commit00151562145df50cc65e9902d52d5fa77f89fe50 (patch)
tree2737716802f6725a5074d606ec8fe5422c58a83c /collectors/python.d.plugin
parentReleasing debian version 1.34.1-1. (diff)
downloadnetdata-00151562145df50cc65e9902d52d5fa77f89fe50.tar.xz
netdata-00151562145df50cc65e9902d52d5fa77f89fe50.zip
Merging upstream version 1.35.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors/python.d.plugin')
-rw-r--r--collectors/python.d.plugin/Makefile.am16
-rw-r--r--collectors/python.d.plugin/README.md3
-rw-r--r--collectors/python.d.plugin/alarms/README.md5
-rw-r--r--collectors/python.d.plugin/alarms/alarms.chart.py7
-rw-r--r--collectors/python.d.plugin/alarms/alarms.conf3
-rw-r--r--collectors/python.d.plugin/anomalies/README.md2
-rw-r--r--collectors/python.d.plugin/apache/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/apache/README.md82
-rw-r--r--collectors/python.d.plugin/apache/apache.chart.py159
-rw-r--r--collectors/python.d.plugin/apache/apache.conf85
-rw-r--r--collectors/python.d.plugin/couchdb/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/couchdb/README.md53
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.chart.py398
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.conf89
-rw-r--r--collectors/python.d.plugin/dns_query_time/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dns_query_time/README.md29
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py149
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.conf69
-rw-r--r--collectors/python.d.plugin/dnsdist/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dnsdist/README.md72
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.chart.py131
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.conf83
-rw-r--r--collectors/python.d.plugin/elasticsearch/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/elasticsearch/README.md94
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py808
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.conf83
-rw-r--r--collectors/python.d.plugin/energid/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/energid/README.md77
-rw-r--r--collectors/python.d.plugin/energid/energid.chart.py163
-rw-r--r--collectors/python.d.plugin/energid/energid.conf90
-rw-r--r--collectors/python.d.plugin/freeradius/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/freeradius/README.md90
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.chart.py177
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.conf80
-rw-r--r--collectors/python.d.plugin/httpcheck/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/httpcheck/README.md59
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.chart.py125
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.conf107
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/README.md57
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py269
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf80
-rw-r--r--collectors/python.d.plugin/mysql/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/mysql/README.md396
-rw-r--r--collectors/python.d.plugin/mysql/mysql.chart.py976
-rw-r--r--collectors/python.d.plugin/mysql/mysql.conf293
-rw-r--r--collectors/python.d.plugin/nginx/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nginx/README.md65
-rw-r--r--collectors/python.d.plugin/nginx/nginx.chart.py71
-rw-r--r--collectors/python.d.plugin/nginx/nginx.conf107
-rw-r--r--collectors/python.d.plugin/phpfpm/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/phpfpm/README.md51
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.chart.py174
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.conf88
-rw-r--r--collectors/python.d.plugin/portcheck/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/portcheck/README.md52
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.chart.py157
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.conf74
-rw-r--r--collectors/python.d.plugin/powerdns/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/powerdns/README.md104
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.chart.py153
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.conf76
-rw-r--r--collectors/python.d.plugin/redis/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/redis/README.md64
-rw-r--r--collectors/python.d.plugin/redis/redis.chart.py268
-rw-r--r--collectors/python.d.plugin/redis/redis.conf110
-rw-r--r--collectors/python.d.plugin/web_log/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/web_log/README.md219
-rw-r--r--collectors/python.d.plugin/web_log/web_log.chart.py1194
-rw-r--r--collectors/python.d.plugin/web_log/web_log.conf219
70 files changed, 17 insertions, 8896 deletions
diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am
index 38eb90f7..667f1627 100644
--- a/collectors/python.d.plugin/Makefile.am
+++ b/collectors/python.d.plugin/Makefile.am
@@ -43,41 +43,30 @@ include adaptec_raid/Makefile.inc
include alarms/Makefile.inc
include am2320/Makefile.inc
include anomalies/Makefile.inc
-include apache/Makefile.inc
include beanstalk/Makefile.inc
include bind_rndc/Makefile.inc
include boinc/Makefile.inc
include ceph/Makefile.inc
include changefinder/Makefile.inc
include chrony/Makefile.inc
-include couchdb/Makefile.inc
-include dnsdist/Makefile.inc
-include dns_query_time/Makefile.inc
include dockerd/Makefile.inc
include dovecot/Makefile.inc
-include elasticsearch/Makefile.inc
-include energid/Makefile.inc
include example/Makefile.inc
include exim/Makefile.inc
include fail2ban/Makefile.inc
-include freeradius/Makefile.inc
include gearman/Makefile.inc
include go_expvar/Makefile.inc
include haproxy/Makefile.inc
include hddtemp/Makefile.inc
-include httpcheck/Makefile.inc
include hpssa/Makefile.inc
include icecast/Makefile.inc
include ipfs/Makefile.inc
-include isc_dhcpd/Makefile.inc
include litespeed/Makefile.inc
include logind/Makefile.inc
include megacli/Makefile.inc
include memcached/Makefile.inc
include mongodb/Makefile.inc
include monit/Makefile.inc
-include mysql/Makefile.inc
-include nginx/Makefile.inc
include nginx_plus/Makefile.inc
include nvidia_smi/Makefile.inc
include nsd/Makefile.inc
@@ -85,15 +74,11 @@ include ntpd/Makefile.inc
include ovpn_status_log/Makefile.inc
include openldap/Makefile.inc
include oracledb/Makefile.inc
-include phpfpm/Makefile.inc
-include portcheck/Makefile.inc
include postfix/Makefile.inc
include postgres/Makefile.inc
-include powerdns/Makefile.inc
include proxysql/Makefile.inc
include puppet/Makefile.inc
include rabbitmq/Makefile.inc
-include redis/Makefile.inc
include rethinkdbs/Makefile.inc
include retroshare/Makefile.inc
include riakkv/Makefile.inc
@@ -109,7 +94,6 @@ include traefik/Makefile.inc
include uwsgi/Makefile.inc
include varnish/Makefile.inc
include w1sensor/Makefile.inc
-include web_log/Makefile.inc
include zscores/Makefile.inc
pythonmodulesdir=$(pythondir)/python_modules
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
index 7c060f81..2f5ebfcb 100644
--- a/collectors/python.d.plugin/README.md
+++ b/collectors/python.d.plugin/README.md
@@ -227,8 +227,7 @@ For additional security it uses python `subprocess.Popen` (without `shell=True`
_Examples: `apache`, `nginx`, `tomcat`_
-_Multiple Endpoints (urls) Examples: [`rabbitmq`](/collectors/python.d.plugin/rabbitmq/README.md) (simpler) ,
-[`elasticsearch`](/collectors/python.d.plugin/elasticsearch/README.md) (threaded)_
+_Multiple Endpoints (urls) Examples: [`rabbitmq`](/collectors/python.d.plugin/rabbitmq/README.md) (simpler).
_Variables from config file_: `url`, `user`, `pass`.
diff --git a/collectors/python.d.plugin/alarms/README.md b/collectors/python.d.plugin/alarms/README.md
index cd5e1b81..ee1e5997 100644
--- a/collectors/python.d.plugin/alarms/README.md
+++ b/collectors/python.d.plugin/alarms/README.md
@@ -53,6 +53,11 @@ local:
CRITICAL: 2
# set to true to include a chart with calculated alarm values over time
collect_alarm_values: false
+ # define the type of chart for plotting status over time e.g. 'line' or 'stacked'
+ alarm_status_chart_type: 'line'
+ # a "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only
+ # alarms with "cpu" or "load" in alarm name. Default includes all.
+ alarm_contains_words: ''
```
It will default to pulling all alarms at each time step from the Netdata rest api at `http://127.0.0.1:19999/api/v1/alarms?all`
diff --git a/collectors/python.d.plugin/alarms/alarms.chart.py b/collectors/python.d.plugin/alarms/alarms.chart.py
index 1eec4045..314b0e7a 100644
--- a/collectors/python.d.plugin/alarms/alarms.chart.py
+++ b/collectors/python.d.plugin/alarms/alarms.chart.py
@@ -38,7 +38,7 @@ DEFAULT_STATUS_MAP = {'CLEAR': 0, 'WARNING': 1, 'CRITICAL': 2}
DEFAULT_URL = 'http://127.0.0.1:19999/api/v1/alarms?all'
DEFAULT_COLLECT_ALARM_VALUES = False
DEFAULT_ALARM_STATUS_CHART_TYPE = 'line'
-
+DEFAULT_ALARM_CONTAINS_WORDS = ''
class Service(UrlService):
def __init__(self, configuration=None, name=None):
@@ -49,6 +49,8 @@ class Service(UrlService):
self.url = self.configuration.get('url', DEFAULT_URL)
self.collect_alarm_values = bool(self.configuration.get('collect_alarm_values', DEFAULT_COLLECT_ALARM_VALUES))
self.collected_dims = {'alarms': set(), 'values': set()}
+ self.alarm_contains_words = self.configuration.get('alarm_contains_words', DEFAULT_ALARM_CONTAINS_WORDS)
+ self.alarm_contains_words_list = [alarm_contains_word.lstrip(' ').rstrip(' ') for alarm_contains_word in self.alarm_contains_words.split(',')]
def _get_data(self):
raw_data = self._get_raw_data()
@@ -57,6 +59,9 @@ class Service(UrlService):
raw_data = loads(raw_data)
alarms = raw_data.get('alarms', {})
+ if self.alarm_contains_words != '':
+ alarms = {alarm_name: alarms[alarm_name] for alarm_name in alarms for alarm_contains_word in
+ self.alarm_contains_words_list if alarm_contains_word in alarm_name}
data = {a: self.sm[alarms[a]['status']] for a in alarms if alarms[a]['status'] in self.sm}
self.update_charts('alarms', data)
diff --git a/collectors/python.d.plugin/alarms/alarms.conf b/collectors/python.d.plugin/alarms/alarms.conf
index 5e83d8f5..cd48d441 100644
--- a/collectors/python.d.plugin/alarms/alarms.conf
+++ b/collectors/python.d.plugin/alarms/alarms.conf
@@ -52,3 +52,6 @@ local:
collect_alarm_values: false
# define the type of chart for plotting status over time e.g. 'line' or 'stacked'
alarm_status_chart_type: 'line'
+ # a "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only
+ # alarms with "cpu" or "load" in alarm name. Default includes all.
+ alarm_contains_words: ''
diff --git a/collectors/python.d.plugin/anomalies/README.md b/collectors/python.d.plugin/anomalies/README.md
index 32e79a82..aaf39ab9 100644
--- a/collectors/python.d.plugin/anomalies/README.md
+++ b/collectors/python.d.plugin/anomalies/README.md
@@ -7,6 +7,8 @@ sidebar_url: Anomalies
# Anomaly detection with Netdata
+**Note**: Check out the [Netdata Anomaly Advisor](https://learn.netdata.cloud/docs/cloud/insights/anomaly-advisor) for a more native anomaly detection experience within Netdata.
+
This collector uses the Python [PyOD](https://pyod.readthedocs.io/en/latest/index.html) library to perform unsupervised [anomaly detection](https://en.wikipedia.org/wiki/Anomaly_detection) on your Netdata charts and/or dimensions.
Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return an anomaly probability and anomaly flag for each chart or custom model you define. This computation consists of a **train** function that runs every `train_n_secs` to train the ML models to learn what 'normal' typically looks like on your node. At each iteration there is also a **predict** function that uses the latest trained models and most recent metrics to produce an anomaly probability and anomaly flag for each chart or custom model you define.
diff --git a/collectors/python.d.plugin/apache/Makefile.inc b/collectors/python.d.plugin/apache/Makefile.inc
deleted file mode 100644
index 70a42155..00000000
--- a/collectors/python.d.plugin/apache/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += apache/apache.chart.py
-dist_pythonconfig_DATA += apache/apache.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += apache/README.md apache/Makefile.inc
-
diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md
deleted file mode 100644
index c6086835..00000000
--- a/collectors/python.d.plugin/apache/README.md
+++ /dev/null
@@ -1,82 +0,0 @@
-<!--
-title: "Apache monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/apache/README.md
-sidebar_label: "Apache"
--->
-
-# Apache monitoring with Netdata
-
-Monitors one or more Apache servers depending on configuration.
-
-## Requirements
-
-- apache with enabled `mod_status`
-
-It produces the following charts:
-
-1. **Requests** in requests/s
-
- - requests
-
-2. **Connections**
-
- - connections
-
-3. **Async Connections**
-
- - keepalive
- - closing
- - writing
-
-4. **Bandwidth** in kilobytes/s
-
- - sent
-
-5. **Workers**
-
- - idle
- - busy
-
-6. **Lifetime Avg. Requests/s** in requests/s
-
- - requests_sec
-
-7. **Lifetime Avg. Bandwidth/s** in kilobytes/s
-
- - size_sec
-
-8. **Lifetime Avg. Response Size** in bytes/request
-
- - size_req
-
-## Configuration
-
-Edit the `python.d/apache.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/apache.conf
-```
-
-Needs only `url` to server's `server-status?auto`
-
-Example for two servers:
-
-```yaml
-update_every : 10
-priority : 90100
-
-local:
- url : 'http://localhost/server-status?auto'
-
-remote:
- url : 'http://www.apache.org/server-status?auto'
- update_every : 5
-```
-
-Without configuration, module attempts to connect to `http://localhost/server-status?auto`
-
----
-
-
diff --git a/collectors/python.d.plugin/apache/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py
deleted file mode 100644
index ceac9ecd..00000000
--- a/collectors/python.d.plugin/apache/apache.chart.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: apache netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.UrlService import UrlService
-
-ORDER = [
- 'requests',
- 'connections',
- 'conns_async',
- 'net',
- 'workers',
- 'reqpersec',
- 'bytespersec',
- 'bytesperreq',
-]
-
-CHARTS = {
- 'bytesperreq': {
- 'options': [None, 'Lifetime Avg. Request Size', 'KiB',
- 'statistics', 'apache.bytesperreq', 'area'],
- 'lines': [
- ['size_req', 'size', 'absolute', 1, 1024 * 100000]
- ]},
- 'workers': {
- 'options': [None, 'Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
- 'lines': [
- ['idle'],
- ['busy'],
- ]},
- 'reqpersec': {
- 'options': [None, 'Lifetime Avg. Requests/s', 'requests/s', 'statistics',
- 'apache.reqpersec', 'area'],
- 'lines': [
- ['requests_sec', 'requests', 'absolute', 1, 100000]
- ]},
- 'bytespersec': {
- 'options': [None, 'Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
- 'apache.bytespersec', 'area'],
- 'lines': [
- ['size_sec', None, 'absolute', 8, 1000 * 100000]
- ]},
- 'requests': {
- 'options': [None, 'Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
- 'lines': [
- ['requests', None, 'incremental']
- ]},
- 'net': {
- 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
- 'lines': [
- ['sent', None, 'incremental', 8, 1]
- ]},
- 'connections': {
- 'options': [None, 'Connections', 'connections', 'connections', 'apache.connections', 'line'],
- 'lines': [
- ['connections']
- ]},
- 'conns_async': {
- 'options': [None, 'Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
- 'lines': [
- ['keepalive'],
- ['closing'],
- ['writing']
- ]}
-}
-
-ASSIGNMENT = {
- 'BytesPerReq': 'size_req',
- 'IdleWorkers': 'idle',
- 'IdleServers': 'idle_servers',
- 'BusyWorkers': 'busy',
- 'BusyServers': 'busy_servers',
- 'ReqPerSec': 'requests_sec',
- 'BytesPerSec': 'size_sec',
- 'Total Accesses': 'requests',
- 'Total kBytes': 'sent',
- 'ConnsTotal': 'connections',
- 'ConnsAsyncKeepAlive': 'keepalive',
- 'ConnsAsyncClosing': 'closing',
- 'ConnsAsyncWriting': 'writing'
-}
-
-FLOAT_VALUES = [
- 'BytesPerReq',
- 'ReqPerSec',
- 'BytesPerSec',
-]
-
-LIGHTTPD_MARKER = 'idle_servers'
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.url = self.configuration.get('url', 'http://localhost/server-status?auto')
-
- def check(self):
- self._manager = self._build_manager()
-
- data = self._get_data()
-
- if not data:
- return None
-
- if LIGHTTPD_MARKER in data:
- self.turn_into_lighttpd()
-
- return True
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- raw_data = self._get_raw_data()
-
- if not raw_data:
- return None
-
- data = dict()
-
- for line in raw_data.split('\n'):
- try:
- parse_line(line, data)
- except ValueError:
- continue
-
- return data or None
-
- def turn_into_lighttpd(self):
- self.module_name = 'lighttpd'
- for chart in self.definitions:
- if chart == 'workers':
- lines = self.definitions[chart]['lines']
- lines[0] = ['idle_servers', 'idle']
- lines[1] = ['busy_servers', 'busy']
- opts = self.definitions[chart]['options']
- opts[1] = opts[1].replace('apache', 'lighttpd')
- opts[4] = opts[4].replace('apache', 'lighttpd')
-
-
-def parse_line(line, data):
- parts = line.split(':')
-
- if len(parts) != 2:
- return
-
- key, value = parts[0], parts[1]
-
- if key not in ASSIGNMENT:
- return
-
- if key in FLOAT_VALUES:
- data[ASSIGNMENT[key]] = int((float(value) * 100000))
- else:
- data[ASSIGNMENT[key]] = int(value)
diff --git a/collectors/python.d.plugin/apache/apache.conf b/collectors/python.d.plugin/apache/apache.conf
deleted file mode 100644
index 84e12a57..00000000
--- a/collectors/python.d.plugin/apache/apache.conf
+++ /dev/null
@@ -1,85 +0,0 @@
-# netdata python.d.plugin configuration for apache
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, apache also supports the following:
-#
-# url: 'URL' # the URL to fetch apache's mod_status stats
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- url : 'http://localhost/server-status?auto'
-
-localipv4:
- name : 'local'
- url : 'http://127.0.0.1/server-status?auto'
-
-localipv6:
- name : 'local'
- url : 'http://[::1]/server-status?auto'
diff --git a/collectors/python.d.plugin/couchdb/Makefile.inc b/collectors/python.d.plugin/couchdb/Makefile.inc
deleted file mode 100644
index 89dfb51c..00000000
--- a/collectors/python.d.plugin/couchdb/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += couchdb/couchdb.chart.py
-dist_pythonconfig_DATA += couchdb/couchdb.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += couchdb/README.md couchdb/Makefile.inc
-
diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md
deleted file mode 100644
index d359c8f7..00000000
--- a/collectors/python.d.plugin/couchdb/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-<!--
-title: "Apache CouchDB monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/couchdb/README.md
-sidebar_label: "CouchDB"
--->
-
-# Apache CouchDB monitoring with Netdata
-
-Monitors vital statistics of a local Apache CouchDB 2.x server, including:
-
-- Overall server reads/writes
-- HTTP traffic breakdown
- - Request methods (`GET`, `PUT`, `POST`, etc.)
- - Response status codes (`200`, `201`, `4xx`, etc.)
-- Active server tasks
-- Replication status (CouchDB 2.1 and up only)
-- Erlang VM stats
-- Optional per-database statistics: sizes, # of docs, # of deleted docs
-
-## Configuration
-
-Edit the `python.d/couchdb.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/couchdb.conf
-```
-
-Sample for a local server running on port 5984:
-
-```yaml
-local:
- user: 'admin'
- pass: 'password'
- node: 'couchdb@127.0.0.1'
-```
-
-Be sure to specify a correct admin-level username and password.
-
-You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server.
-
-If you want per-database statistics, these need to be added to the configuration, separated by spaces:
-
-```yaml
-local:
- ...
- databases: 'db1 db2 db3 ...'
-```
-
----
-
-
diff --git a/collectors/python.d.plugin/couchdb/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py
deleted file mode 100644
index a395f356..00000000
--- a/collectors/python.d.plugin/couchdb/couchdb.chart.py
+++ /dev/null
@@ -1,398 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: couchdb netdata python.d module
-# Author: wohali <wohali@apache.org>
-# Thanks to ilyam8 for good examples :)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from collections import namedtuple, defaultdict
-from json import loads
-from socket import gethostbyname, gaierror
-from threading import Thread
-
-try:
- from queue import Queue
-except ImportError:
- from Queue import Queue
-
-from bases.FrameworkServices.UrlService import UrlService
-
-update_every = 1
-
-METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
-
-OVERVIEW_STATS = [
- 'couchdb.database_reads.value',
- 'couchdb.database_writes.value',
- 'couchdb.httpd.view_reads.value',
- 'couchdb.httpd_request_methods.COPY.value',
- 'couchdb.httpd_request_methods.DELETE.value',
- 'couchdb.httpd_request_methods.GET.value',
- 'couchdb.httpd_request_methods.HEAD.value',
- 'couchdb.httpd_request_methods.OPTIONS.value',
- 'couchdb.httpd_request_methods.POST.value',
- 'couchdb.httpd_request_methods.PUT.value',
- 'couchdb.httpd_status_codes.200.value',
- 'couchdb.httpd_status_codes.201.value',
- 'couchdb.httpd_status_codes.202.value',
- 'couchdb.httpd_status_codes.204.value',
- 'couchdb.httpd_status_codes.206.value',
- 'couchdb.httpd_status_codes.301.value',
- 'couchdb.httpd_status_codes.302.value',
- 'couchdb.httpd_status_codes.304.value',
- 'couchdb.httpd_status_codes.400.value',
- 'couchdb.httpd_status_codes.401.value',
- 'couchdb.httpd_status_codes.403.value',
- 'couchdb.httpd_status_codes.404.value',
- 'couchdb.httpd_status_codes.405.value',
- 'couchdb.httpd_status_codes.406.value',
- 'couchdb.httpd_status_codes.409.value',
- 'couchdb.httpd_status_codes.412.value',
- 'couchdb.httpd_status_codes.413.value',
- 'couchdb.httpd_status_codes.414.value',
- 'couchdb.httpd_status_codes.415.value',
- 'couchdb.httpd_status_codes.416.value',
- 'couchdb.httpd_status_codes.417.value',
- 'couchdb.httpd_status_codes.500.value',
- 'couchdb.httpd_status_codes.501.value',
- 'couchdb.open_os_files.value',
- 'couch_replicator.jobs.running.value',
- 'couch_replicator.jobs.pending.value',
- 'couch_replicator.jobs.crashed.value',
-]
-
-SYSTEM_STATS = [
- 'context_switches',
- 'run_queue',
- 'ets_table_count',
- 'reductions',
- 'memory.atom',
- 'memory.atom_used',
- 'memory.binary',
- 'memory.code',
- 'memory.ets',
- 'memory.other',
- 'memory.processes',
- 'io_input',
- 'io_output',
- 'os_proc_count',
- 'process_count',
- 'internal_replication_jobs'
-]
-
-DB_STATS = [
- 'doc_count',
- 'doc_del_count',
- 'sizes.file',
- 'sizes.external',
- 'sizes.active'
-]
-
-ORDER = [
- 'activity',
- 'request_methods',
- 'response_codes',
- 'active_tasks',
- 'replicator_jobs',
- 'open_files',
- 'db_sizes_file',
- 'db_sizes_external',
- 'db_sizes_active',
- 'db_doc_counts',
- 'db_doc_del_counts',
- 'erlang_memory',
- 'erlang_proc_counts',
- 'erlang_peak_msg_queue',
- 'erlang_reductions'
-]
-
-CHARTS = {
- 'activity': {
- 'options': [None, 'Overall Activity', 'requests/s',
- 'dbactivity', 'couchdb.activity', 'stacked'],
- 'lines': [
- ['couchdb_database_reads', 'DB reads', 'incremental'],
- ['couchdb_database_writes', 'DB writes', 'incremental'],
- ['couchdb_httpd_view_reads', 'View reads', 'incremental']
- ]
- },
- 'request_methods': {
- 'options': [None, 'HTTP request methods', 'requests/s',
- 'httptraffic', 'couchdb.request_methods',
- 'stacked'],
- 'lines': [
- ['couchdb_httpd_request_methods_COPY', 'COPY', 'incremental'],
- ['couchdb_httpd_request_methods_DELETE', 'DELETE', 'incremental'],
- ['couchdb_httpd_request_methods_GET', 'GET', 'incremental'],
- ['couchdb_httpd_request_methods_HEAD', 'HEAD', 'incremental'],
- ['couchdb_httpd_request_methods_OPTIONS', 'OPTIONS',
- 'incremental'],
- ['couchdb_httpd_request_methods_POST', 'POST', 'incremental'],
- ['couchdb_httpd_request_methods_PUT', 'PUT', 'incremental']
- ]
- },
- 'response_codes': {
- 'options': [None, 'HTTP response status codes', 'responses/s',
- 'httptraffic', 'couchdb.response_codes',
- 'stacked'],
- 'lines': [
- ['couchdb_httpd_status_codes_200', '200 OK', 'incremental'],
- ['couchdb_httpd_status_codes_201', '201 Created', 'incremental'],
- ['couchdb_httpd_status_codes_202', '202 Accepted', 'incremental'],
- ['couchdb_httpd_status_codes_2xx', 'Other 2xx Success',
- 'incremental'],
- ['couchdb_httpd_status_codes_3xx', '3xx Redirection',
- 'incremental'],
- ['couchdb_httpd_status_codes_4xx', '4xx Client error',
- 'incremental'],
- ['couchdb_httpd_status_codes_5xx', '5xx Server error',
- 'incremental']
- ]
- },
- 'open_files': {
- 'options': [None, 'Open files', 'files', 'ops', 'couchdb.open_files', 'line'],
- 'lines': [
- ['couchdb_open_os_files', '# files', 'absolute']
- ]
- },
- 'active_tasks': {
- 'options': [None, 'Active task breakdown', 'tasks', 'ops', 'couchdb.active_tasks', 'stacked'],
- 'lines': [
- ['activetasks_indexer', 'Indexer', 'absolute'],
- ['activetasks_database_compaction', 'DB Compaction', 'absolute'],
- ['activetasks_replication', 'Replication', 'absolute'],
- ['activetasks_view_compaction', 'View Compaction', 'absolute']
- ]
- },
- 'replicator_jobs': {
- 'options': [None, 'Replicator job breakdown', 'jobs', 'ops', 'couchdb.replicator_jobs', 'stacked'],
- 'lines': [
- ['couch_replicator_jobs_running', 'Running', 'absolute'],
- ['couch_replicator_jobs_pending', 'Pending', 'absolute'],
- ['couch_replicator_jobs_crashed', 'Crashed', 'absolute'],
- ['internal_replication_jobs', 'Internal replication jobs',
- 'absolute']
- ]
- },
- 'erlang_memory': {
- 'options': [None, 'Erlang VM memory usage', 'B', 'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
- 'lines': [
- ['memory_atom', 'atom', 'absolute'],
- ['memory_binary', 'binaries', 'absolute'],
- ['memory_code', 'code', 'absolute'],
- ['memory_ets', 'ets', 'absolute'],
- ['memory_processes', 'procs', 'absolute'],
- ['memory_other', 'other', 'absolute']
- ]
- },
- 'erlang_reductions': {
- 'options': [None, 'Erlang reductions', 'count', 'erlang', 'couchdb.reductions', 'line'],
- 'lines': [
- ['reductions', 'reductions', 'incremental']
- ]
- },
- 'erlang_proc_counts': {
- 'options': [None, 'Process counts', 'count', 'erlang', 'couchdb.proccounts', 'line'],
- 'lines': [
- ['os_proc_count', 'OS procs', 'absolute'],
- ['process_count', 'erl procs', 'absolute']
- ]
- },
- 'erlang_peak_msg_queue': {
- 'options': [None, 'Peak message queue size', 'count', 'erlang', 'couchdb.peakmsgqueue',
- 'line'],
- 'lines': [
- ['peak_msg_queue', 'peak size', 'absolute']
- ]
- },
- # Lines for the following are added as part of check()
- 'db_sizes_file': {
- 'options': [None, 'Database sizes (file)', 'KiB', 'perdbstats', 'couchdb.db_sizes_file', 'line'],
- 'lines': []
- },
- 'db_sizes_external': {
- 'options': [None, 'Database sizes (external)', 'KiB', 'perdbstats', 'couchdb.db_sizes_external', 'line'],
- 'lines': []
- },
- 'db_sizes_active': {
- 'options': [None, 'Database sizes (active)', 'KiB', 'perdbstats', 'couchdb.db_sizes_active', 'line'],
- 'lines': []
- },
- 'db_doc_counts': {
- 'options': [None, 'Database # of docs', 'docs',
- 'perdbstats', 'couchdb_db_doc_count', 'line'],
- 'lines': []
- },
- 'db_doc_del_counts': {
- 'options': [None, 'Database # of deleted docs', 'docs', 'perdbstats', 'couchdb_db_doc_del_count', 'line'],
- 'lines': []
- }
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = self.configuration.get('host', '127.0.0.1')
- self.port = self.configuration.get('port', 5984)
- self.node = self.configuration.get('node', 'couchdb@127.0.0.1')
- self.scheme = self.configuration.get('scheme', 'http')
- self.user = self.configuration.get('user')
- self.password = self.configuration.get('pass')
- try:
- self.dbs = self.configuration.get('databases').split(' ')
- except (KeyError, AttributeError):
- self.dbs = list()
-
- def check(self):
- if not (self.host and self.port):
- self.error('Host is not defined in the module configuration file')
- return False
- try:
- self.host = gethostbyname(self.host)
- except gaierror as error:
- self.error(str(error))
- return False
- self.url = '{scheme}://{host}:{port}'.format(scheme=self.scheme,
- host=self.host,
- port=self.port)
- stats = self.url + '/_node/{node}/_stats'.format(node=self.node)
- active_tasks = self.url + '/_active_tasks'
- system = self.url + '/_node/{node}/_system'.format(node=self.node)
- self.methods = [METHODS(get_data=self._get_overview_stats,
- url=stats,
- stats=OVERVIEW_STATS),
- METHODS(get_data=self._get_active_tasks_stats,
- url=active_tasks,
- stats=None),
- METHODS(get_data=self._get_overview_stats,
- url=system,
- stats=SYSTEM_STATS),
- METHODS(get_data=self._get_dbs_stats,
- url=self.url,
- stats=DB_STATS)]
- # must initialise manager before using _get_raw_data
- self._manager = self._build_manager()
- self.dbs = [db for db in self.dbs
- if self._get_raw_data(self.url + '/' + db)]
- for db in self.dbs:
- self.definitions['db_sizes_file']['lines'].append(
- ['db_' + db + '_sizes_file', db, 'absolute', 1, 1000]
- )
- self.definitions['db_sizes_external']['lines'].append(
- ['db_' + db + '_sizes_external', db, 'absolute', 1, 1000]
- )
- self.definitions['db_sizes_active']['lines'].append(
- ['db_' + db + '_sizes_active', db, 'absolute', 1, 1000]
- )
- self.definitions['db_doc_counts']['lines'].append(
- ['db_' + db + '_doc_count', db, 'absolute']
- )
- self.definitions['db_doc_del_counts']['lines'].append(
- ['db_' + db + '_doc_del_count', db, 'absolute']
- )
- return UrlService.check(self)
-
- def _get_data(self):
- threads = list()
- queue = Queue()
- result = dict()
-
- for method in self.methods:
- th = Thread(target=method.get_data,
- args=(queue, method.url, method.stats))
- th.start()
- threads.append(th)
-
- for thread in threads:
- thread.join()
- result.update(queue.get())
-
- # self.info('couchdb result = ' + str(result))
- return result or None
-
- def _get_overview_stats(self, queue, url, stats):
- raw_data = self._get_raw_data(url)
- if not raw_data:
- return queue.put(dict())
- data = loads(raw_data)
- to_netdata = self._fetch_data(raw_data=data, metrics=stats)
- if 'message_queues' in data:
- to_netdata['peak_msg_queue'] = get_peak_msg_queue(data)
- return queue.put(to_netdata)
-
- def _get_active_tasks_stats(self, queue, url, _):
- taskdict = defaultdict(int)
- taskdict["activetasks_indexer"] = 0
- taskdict["activetasks_database_compaction"] = 0
- taskdict["activetasks_replication"] = 0
- taskdict["activetasks_view_compaction"] = 0
- raw_data = self._get_raw_data(url)
- if not raw_data:
- return queue.put(dict())
- data = loads(raw_data)
- for task in data:
- taskdict["activetasks_" + task["type"]] += 1
- return queue.put(dict(taskdict))
-
- def _get_dbs_stats(self, queue, url, stats):
- to_netdata = {}
- for db in self.dbs:
- raw_data = self._get_raw_data(url + '/' + db)
- if not raw_data:
- continue
- data = loads(raw_data)
- for metric in stats:
- value = data
- metrics_list = metric.split('.')
- try:
- for m in metrics_list:
- value = value[m]
- except (KeyError, TypeError) as e:
- self.debug('cannot process ' + metric + ' for ' + db
- + ": " + str(e))
- continue
- metric_name = 'db_{0}_{1}'.format(db, '_'.join(metrics_list))
- to_netdata[metric_name] = value
- return queue.put(to_netdata)
-
- def _fetch_data(self, raw_data, metrics):
- data = dict()
- for metric in metrics:
- value = raw_data
- metrics_list = metric.split('.')
- try:
- for m in metrics_list:
- value = value[m]
- except (KeyError, TypeError) as e:
- self.debug('cannot process ' + metric + ': ' + str(e))
- continue
- # strip off .value from end of stat
- if metrics_list[-1] == 'value':
- metrics_list = metrics_list[:-1]
- # sum up 3xx/4xx/5xx
- if metrics_list[0:2] == ['couchdb', 'httpd_status_codes'] and \
- int(metrics_list[2]) > 202:
- metrics_list[2] = '{0}xx'.format(int(metrics_list[2]) // 100)
- if '_'.join(metrics_list) in data:
- data['_'.join(metrics_list)] += value
- else:
- data['_'.join(metrics_list)] = value
- else:
- data['_'.join(metrics_list)] = value
- return data
-
-
-def get_peak_msg_queue(data):
- maxsize = 0
- queues = data['message_queues']
- for queue in iter(queues.values()):
- if isinstance(queue, dict) and 'count' in queue:
- value = queue['count']
- elif isinstance(queue, int):
- value = queue
- else:
- continue
- maxsize = max(maxsize, value)
- return maxsize
diff --git a/collectors/python.d.plugin/couchdb/couchdb.conf b/collectors/python.d.plugin/couchdb/couchdb.conf
deleted file mode 100644
index 9c68be77..00000000
--- a/collectors/python.d.plugin/couchdb/couchdb.conf
+++ /dev/null
@@ -1,89 +0,0 @@
-# netdata python.d.plugin configuration for couchdb
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# By default, CouchDB only updates its stats every 10 seconds.
-update_every: 10
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, the couchdb plugin also supports the following:
-#
-# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1
-# port: 'port' # CouchDB port. Default: 15672
-# scheme: 'scheme' # http or https. Default: http
-# node: 'couchdb@127.0.0.1' # CouchDB node name. Same as -name vm.args argument.
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-#
-# if db-specific stats are desired, place their names in databases:
-# databases: 'npm-registry animaldb'
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-#
-localhost:
- name: 'local'
- host: '127.0.0.1'
- port: '5984'
- node: 'couchdb@127.0.0.1'
- scheme: 'http'
-# user: 'admin'
-# pass: 'password'
diff --git a/collectors/python.d.plugin/dns_query_time/Makefile.inc b/collectors/python.d.plugin/dns_query_time/Makefile.inc
deleted file mode 100644
index 7eca3e0b..00000000
--- a/collectors/python.d.plugin/dns_query_time/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += dns_query_time/dns_query_time.chart.py
-dist_pythonconfig_DATA += dns_query_time/dns_query_time.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += dns_query_time/README.md dns_query_time/Makefile.inc
-
diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md
deleted file mode 100644
index 365e2256..00000000
--- a/collectors/python.d.plugin/dns_query_time/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-<!--
-title: "DNS query RTT monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dns_query_time/README.md
-sidebar_label: "DNS query RTT"
--->
-
-# DNS query RTT monitoring with Netdata
-
-Measures DNS query round trip time.
-
-**Requirement:**
-
-- `python-dnspython` package
-
-It produces one aggregate chart or one chart per DNS server, showing the query time.
-
-## Configuration
-
-Edit the `python.d/dns_query_time.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/dns_query_time.conf
-```
-
----
-
-
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
deleted file mode 100644
index 7e1cb32b..00000000
--- a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: dns_query_time netdata python.d module
-# Author: ilyam8
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from random import choice
-from socket import getaddrinfo, gaierror
-from threading import Thread
-
-try:
- import dns.message
- import dns.query
- import dns.name
-
- DNS_PYTHON = True
-except ImportError:
- DNS_PYTHON = False
-
-try:
- from queue import Queue
-except ImportError:
- from Queue import Queue
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-update_every = 5
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = list()
- self.definitions = dict()
- self.timeout = self.configuration.get('response_timeout', 4)
- self.aggregate = self.configuration.get('aggregate', True)
- self.domains = self.configuration.get('domains')
- self.server_list = self.configuration.get('dns_servers')
-
- def check(self):
- if not DNS_PYTHON:
- self.error("'python-dnspython' package is needed to use dns_query_time.chart.py")
- return False
-
- self.timeout = self.timeout if isinstance(self.timeout, int) else 4
-
- if not all([self.domains, self.server_list,
- isinstance(self.server_list, str), isinstance(self.domains, str)]):
- self.error("server_list and domain_list can't be empty")
- return False
- else:
- self.domains, self.server_list = self.domains.split(), self.server_list.split()
-
- for ns in self.server_list:
- if not check_ns(ns):
- self.info('Bad NS: %s' % ns)
- self.server_list.remove(ns)
- if not self.server_list:
- return False
-
- data = self._get_data(timeout=1)
-
- down_servers = [s for s in data if data[s] == -100]
- for down in down_servers:
- down = down[3:].replace('_', '.')
- self.info('Removed due to non response %s' % down)
- self.server_list.remove(down)
- if not self.server_list:
- return False
-
- self.order, self.definitions = create_charts(aggregate=self.aggregate, server_list=self.server_list)
- return True
-
- def _get_data(self, timeout=None):
- return dns_request(self.server_list, timeout or self.timeout, self.domains)
-
-
-def dns_request(server_list, timeout, domains):
- threads = list()
- que = Queue()
- result = dict()
-
- def dns_req(ns, t, q):
- domain = dns.name.from_text(choice(domains))
- request = dns.message.make_query(domain, dns.rdatatype.A)
-
- try:
- resp = dns.query.udp(request, ns, timeout=t)
- if (resp.rcode() == dns.rcode.NOERROR and resp.answer):
- query_time = resp.time * 1000
- else:
- query_time = -100
- except dns.exception.Timeout:
- query_time = -100
- finally:
- q.put({'_'.join(['ns', ns.replace('.', '_')]): query_time})
-
- for server in server_list:
- th = Thread(target=dns_req, args=(server, timeout, que))
- th.start()
- threads.append(th)
-
- for th in threads:
- th.join()
- result.update(que.get())
-
- return result
-
-
-def check_ns(ns):
- try:
- return getaddrinfo(ns, 'domain')[0][4][0]
- except gaierror:
- return False
-
-
-def create_charts(aggregate, server_list):
- if aggregate:
- order = ['dns_group']
- definitions = {
- 'dns_group': {
- 'options': [None, 'DNS Response Time', 'ms', 'name servers', 'dns_query_time.response_time', 'line'],
- 'lines': []
- }
- }
- for ns in server_list:
- dim = [
- '_'.join(['ns', ns.replace('.', '_')]),
- ns,
- 'absolute',
- ]
- definitions['dns_group']['lines'].append(dim)
-
- return order, definitions
- else:
- order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
- definitions = dict()
-
- for ns in server_list:
- definitions[''.join(['dns_', ns.replace('.', '_')])] = {
- 'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'],
- 'lines': [
- [
- '_'.join(['ns', ns.replace('.', '_')]),
- ns,
- 'absolute',
- ]
- ]
- }
- return order, definitions
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.conf b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
deleted file mode 100644
index 9c0838ee..00000000
--- a/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
+++ /dev/null
@@ -1,69 +0,0 @@
-# netdata python.d.plugin configuration for dns_query_time
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, dns_query_time also supports the following:
-#
-# dns_servers: 'dns servers' # List of dns servers to query
-# domains: 'domains' # List of domains
-# aggregate: yes/no # Aggregate all servers in one chart or not
-# response_timeout: 4 # Dns query response timeout (query = -100 if response time > response_time)
-#
-# ---------------------------------------------------------------------- \ No newline at end of file
diff --git a/collectors/python.d.plugin/dnsdist/Makefile.inc b/collectors/python.d.plugin/dnsdist/Makefile.inc
deleted file mode 100644
index a53f518f..00000000
--- a/collectors/python.d.plugin/dnsdist/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += dnsdist/dnsdist.chart.py
-dist_pythonconfig_DATA += dnsdist/dnsdist.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += dnsdist/README.md dnsdist/Makefile.inc
-
diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md
deleted file mode 100644
index 95b2efae..00000000
--- a/collectors/python.d.plugin/dnsdist/README.md
+++ /dev/null
@@ -1,72 +0,0 @@
-<!--
-title: "PowerDNS dnsdist monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dnsdist/README.md
-sidebar_label: "PowerDNS dnsdist"
--->
-
-# PowerDNS dnsdist monitoring with Netdata
-
-Collects load-balancer performance and health metrics, and draws the following charts:
-
-1. **Response latency**
-
- - latency-slow
- - latency100-1000
- - latency50-100
- - latency10-50
- - latency1-10
- - latency0-1
-
-2. **Cache performance**
-
- - cache-hits
- - cache-misses
-
-3. **ACL events**
-
- - acl-drops
- - rule-drop
- - rule-nxdomain
- - rule-refused
-
-4. **Noncompliant data**
-
- - empty-queries
- - no-policy
- - noncompliant-queries
- - noncompliant-responses
-
-5. **Queries**
-
- - queries
- - rdqueries
- - rdqueries
-
-6. **Health**
-
- - downstream-send-errors
- - downstream-timeouts
- - servfail-responses
- - trunc-failures
-
-## Configuration
-
-Edit the `python.d/dnsdist.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/dnsdist.conf
-```
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://127.0.0.1:5053/jsonstat?command=stats'
- user : 'username'
- pass : 'password'
- header:
- X-API-Key: 'dnsdist-api-key'
-```
-
-
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
deleted file mode 100644
index 7e947923..00000000
--- a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# -*- coding: utf-8 -*-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from json import loads
-
-from bases.FrameworkServices.UrlService import UrlService
-
-ORDER = [
- 'queries',
- 'queries_dropped',
- 'packets_dropped',
- 'answers',
- 'backend_responses',
- 'backend_commerrors',
- 'backend_errors',
- 'cache',
- 'servercpu',
- 'servermem',
- 'query_latency',
- 'query_latency_avg'
-]
-
-CHARTS = {
- 'queries': {
- 'options': [None, 'Client queries received', 'queries/s', 'queries', 'dnsdist.queries', 'line'],
- 'lines': [
- ['queries', 'all', 'incremental'],
- ['rdqueries', 'recursive', 'incremental'],
- ['empty-queries', 'empty', 'incremental']
- ]
- },
- 'queries_dropped': {
- 'options': [None, 'Client queries dropped', 'queries/s', 'queries', 'dnsdist.queries_dropped', 'line'],
- 'lines': [
- ['rule-drop', 'rule drop', 'incremental'],
- ['dyn-blocked', 'dynamic block', 'incremental'],
- ['no-policy', 'no policy', 'incremental'],
- ['noncompliant-queries', 'non compliant', 'incremental']
- ]
- },
- 'packets_dropped': {
- 'options': [None, 'Packets dropped', 'packets/s', 'packets', 'dnsdist.packets_dropped', 'line'],
- 'lines': [
- ['acl-drops', 'acl', 'incremental']
- ]
- },
- 'answers': {
- 'options': [None, 'Answers statistics', 'answers/s', 'answers', 'dnsdist.answers', 'line'],
- 'lines': [
- ['self-answered', 'self answered', 'incremental'],
- ['rule-nxdomain', 'nxdomain', 'incremental', -1],
- ['rule-refused', 'refused', 'incremental', -1],
- ['trunc-failures', 'trunc failures', 'incremental', -1]
- ]
- },
- 'backend_responses': {
- 'options': [None, 'Backend responses', 'responses/s', 'backends', 'dnsdist.backend_responses', 'line'],
- 'lines': [
- ['responses', 'responses', 'incremental']
- ]
- },
- 'backend_commerrors': {
- 'options': [None, 'Backend Communication Errors', 'errors/s', 'backends', 'dnsdist.backend_commerrors', 'line'],
- 'lines': [
- ['downstream-send-errors', 'send errors', 'incremental']
- ]
- },
- 'backend_errors': {
- 'options': [None, 'Backend error responses', 'responses/s', 'backends', 'dnsdist.backend_errors', 'line'],
- 'lines': [
- ['downstream-timeouts', 'timeout', 'incremental'],
- ['servfail-responses', 'servfail', 'incremental'],
- ['noncompliant-responses', 'non compliant', 'incremental']
- ]
- },
- 'cache': {
- 'options': [None, 'Cache performance', 'answers/s', 'cache', 'dnsdist.cache', 'area'],
- 'lines': [
- ['cache-hits', 'hits', 'incremental'],
- ['cache-misses', 'misses', 'incremental', -1]
- ]
- },
- 'servercpu': {
- 'options': [None, 'DNSDIST server CPU utilization', 'ms/s', 'server', 'dnsdist.servercpu', 'stacked'],
- 'lines': [
- ['cpu-sys-msec', 'system state', 'incremental'],
- ['cpu-user-msec', 'user state', 'incremental']
- ]
- },
- 'servermem': {
- 'options': [None, 'DNSDIST server memory utilization', 'MiB', 'server', 'dnsdist.servermem', 'area'],
- 'lines': [
- ['real-memory-usage', 'memory usage', 'absolute', 1, 1 << 20]
- ]
- },
- 'query_latency': {
- 'options': [None, 'Query latency', 'queries/s', 'latency', 'dnsdist.query_latency', 'stacked'],
- 'lines': [
- ['latency0-1', '1ms', 'incremental'],
- ['latency1-10', '10ms', 'incremental'],
- ['latency10-50', '50ms', 'incremental'],
- ['latency50-100', '100ms', 'incremental'],
- ['latency100-1000', '1sec', 'incremental'],
- ['latency-slow', 'slow', 'incremental']
- ]
- },
- 'query_latency_avg': {
- 'options': [None, 'Average latency for the last N queries', 'microseconds', 'latency',
- 'dnsdist.query_latency_avg', 'line'],
- 'lines': [
- ['latency-avg100', '100', 'absolute'],
- ['latency-avg1000', '1k', 'absolute'],
- ['latency-avg10000', '10k', 'absolute'],
- ['latency-avg1000000', '1000k', 'absolute']
- ]
- }
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- data = self._get_raw_data()
- if not data:
- return None
-
- return loads(data)
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.conf b/collectors/python.d.plugin/dnsdist/dnsdist.conf
deleted file mode 100644
index 324d65aa..00000000
--- a/collectors/python.d.plugin/dnsdist/dnsdist.conf
+++ /dev/null
@@ -1,83 +0,0 @@
-# netdata python.d.plugin configuration for dnsdist
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-#update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-#autodetection_retry: 1
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-#
-# Additionally to the above, dnsdist also supports the following:
-#
-# url: 'URL' # the URL to fetch dnsdist performance statistics
-# user: 'username' # username for basic auth
-# pass: 'password' # password for basic auth
-# header:
-# X-API-Key: 'Key' # API key
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-# localhost:
-# name : 'local'
-# url : 'http://127.0.0.1:5053/jsonstat?command=stats'
-# user : 'username'
-# pass : 'password'
-# header:
-# X-API-Key: 'dnsdist-api-key'
-
-
diff --git a/collectors/python.d.plugin/elasticsearch/Makefile.inc b/collectors/python.d.plugin/elasticsearch/Makefile.inc
deleted file mode 100644
index 15c63c2f..00000000
--- a/collectors/python.d.plugin/elasticsearch/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += elasticsearch/elasticsearch.chart.py
-dist_pythonconfig_DATA += elasticsearch/elasticsearch.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += elasticsearch/README.md elasticsearch/Makefile.inc
-
diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md
deleted file mode 100644
index a98eddf5..00000000
--- a/collectors/python.d.plugin/elasticsearch/README.md
+++ /dev/null
@@ -1,94 +0,0 @@
-<!--
-title: "Elasticsearch monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/elasticsearch/README.md
-sidebar_label: "Elasticsearch"
--->
-
-# Elasticsearch monitoring with Netdata
-
-Monitors [Elasticsearch](https://www.elastic.co/products/elasticsearch) performance and health metrics.
-
-It produces:
-
-1. **Search performance** charts:
-
- - Number of queries, fetches
- - Time spent on queries, fetches
- - Query and fetch latency
-
-2. **Indexing performance** charts:
-
- - Number of documents indexed, index refreshes, flushes
- - Time spent on indexing, refreshing, flushing
- - Indexing and flushing latency
-
-3. **Memory usage and garbage collection** charts:
-
- - JVM heap currently in use, committed
- - Count of garbage collections
- - Time spent on garbage collections
-
-4. **Host metrics** charts:
-
- - Available file descriptors in percent
- - Opened HTTP connections
- - Cluster communication transport metrics
-
-5. **Queues and rejections** charts:
-
- - Number of queued/rejected threads in thread pool
-
-6. **Fielddata cache** charts:
-
- - Fielddata cache size
- - Fielddata evictions and circuit breaker tripped count
-
-7. **Cluster health API** charts:
-
- - Cluster status
- - Nodes and tasks statistics
- - Shards statistics
-
-8. **Cluster stats API** charts:
-
- - Nodes statistics
- - Query cache statistics
- - Docs statistics
- - Store statistics
- - Indices and shards statistics
-
-9. **Indices** charts (per index statistics, disabled by default):
-
- - Docs count
- - Store size
- - Num of replicas
- - Health status
-
-## Configuration
-
-Edit the `python.d/elasticsearch.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/elasticsearch.conf
-```
-
-Sample:
-
-```yaml
-local:
- host : 'ipaddress' # Elasticsearch server ip address or hostname.
- port : 'port' # Port on which elasticsearch listens.
- scheme : 'http' # URL scheme. Use 'https' if your elasticsearch uses TLS.
- node_status : yes/no # Get metrics from "/_nodes/_local/stats". Enabled by default.
- cluster_health : yes/no # Get metrics from "/_cluster/health". Enabled by default.
- cluster_stats : yes/no # Get metrics from "'/_cluster/stats". Enabled by default.
- indices_stats : yes/no # Get metrics from "/_cat/indices". Disabled by default.
-```
-
-If no configuration is given, module will try to connect to `http://127.0.0.1:9200`.
-
----
-
-
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
deleted file mode 100644
index 93614b08..00000000
--- a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
+++ /dev/null
@@ -1,808 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: elastic search node stats netdata python.d module
-# Author: ilyam8
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import json
-import threading
-
-from collections import namedtuple
-from socket import gethostbyname, gaierror
-
-try:
- from queue import Queue
-except ImportError:
- from Queue import Queue
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-update_every = 5
-
-METHODS = namedtuple('METHODS', ['get_data', 'url', 'run'])
-
-NODE_STATS = [
- 'indices.search.fetch_current',
- 'indices.search.fetch_total',
- 'indices.search.query_current',
- 'indices.search.query_total',
- 'indices.search.query_time_in_millis',
- 'indices.search.fetch_time_in_millis',
- 'indices.indexing.index_total',
- 'indices.indexing.index_current',
- 'indices.indexing.index_time_in_millis',
- 'indices.refresh.total',
- 'indices.refresh.total_time_in_millis',
- 'indices.flush.total',
- 'indices.flush.total_time_in_millis',
- 'indices.translog.operations',
- 'indices.translog.size_in_bytes',
- 'indices.translog.uncommitted_operations',
- 'indices.translog.uncommitted_size_in_bytes',
- 'indices.segments.count',
- 'indices.segments.terms_memory_in_bytes',
- 'indices.segments.stored_fields_memory_in_bytes',
- 'indices.segments.term_vectors_memory_in_bytes',
- 'indices.segments.norms_memory_in_bytes',
- 'indices.segments.points_memory_in_bytes',
- 'indices.segments.doc_values_memory_in_bytes',
- 'indices.segments.index_writer_memory_in_bytes',
- 'indices.segments.version_map_memory_in_bytes',
- 'indices.segments.fixed_bit_set_memory_in_bytes',
- 'jvm.gc.collectors.young.collection_count',
- 'jvm.gc.collectors.old.collection_count',
- 'jvm.gc.collectors.young.collection_time_in_millis',
- 'jvm.gc.collectors.old.collection_time_in_millis',
- 'jvm.mem.heap_used_percent',
- 'jvm.mem.heap_used_in_bytes',
- 'jvm.mem.heap_committed_in_bytes',
- 'jvm.buffer_pools.direct.count',
- 'jvm.buffer_pools.direct.used_in_bytes',
- 'jvm.buffer_pools.direct.total_capacity_in_bytes',
- 'jvm.buffer_pools.mapped.count',
- 'jvm.buffer_pools.mapped.used_in_bytes',
- 'jvm.buffer_pools.mapped.total_capacity_in_bytes',
- 'thread_pool.bulk.queue',
- 'thread_pool.bulk.rejected',
- 'thread_pool.write.queue',
- 'thread_pool.write.rejected',
- 'thread_pool.index.queue',
- 'thread_pool.index.rejected',
- 'thread_pool.search.queue',
- 'thread_pool.search.rejected',
- 'thread_pool.merge.queue',
- 'thread_pool.merge.rejected',
- 'indices.fielddata.memory_size_in_bytes',
- 'indices.fielddata.evictions',
- 'breakers.fielddata.tripped',
- 'http.current_open',
- 'transport.rx_size_in_bytes',
- 'transport.tx_size_in_bytes',
- 'process.max_file_descriptors',
- 'process.open_file_descriptors'
-]
-
-CLUSTER_STATS = [
- 'nodes.count.data',
- 'nodes.count.master',
- 'nodes.count.total',
- 'nodes.count.coordinating_only',
- 'nodes.count.ingest',
- 'indices.docs.count',
- 'indices.query_cache.hit_count',
- 'indices.query_cache.miss_count',
- 'indices.store.size_in_bytes',
- 'indices.count',
- 'indices.shards.total'
-]
-
-HEALTH_STATS = [
- 'number_of_nodes',
- 'number_of_data_nodes',
- 'number_of_pending_tasks',
- 'number_of_in_flight_fetch',
- 'active_shards',
- 'relocating_shards',
- 'unassigned_shards',
- 'delayed_unassigned_shards',
- 'initializing_shards',
- 'active_shards_percent_as_number'
-]
-
-LATENCY = {
- 'query_latency': {
- 'total': 'indices_search_query_total',
- 'spent_time': 'indices_search_query_time_in_millis'
- },
- 'fetch_latency': {
- 'total': 'indices_search_fetch_total',
- 'spent_time': 'indices_search_fetch_time_in_millis'
- },
- 'indexing_latency': {
- 'total': 'indices_indexing_index_total',
- 'spent_time': 'indices_indexing_index_time_in_millis'
- },
- 'flushing_latency': {
- 'total': 'indices_flush_total',
- 'spent_time': 'indices_flush_total_time_in_millis'
- }
-}
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = [
- 'search_performance_total',
- 'search_performance_current',
- 'search_performance_time',
- 'search_latency',
- 'index_performance_total',
- 'index_performance_current',
- 'index_performance_time',
- 'index_latency',
- 'index_translog_operations',
- 'index_translog_size',
- 'index_segments_count',
- 'index_segments_memory_writer',
- 'index_segments_memory',
- 'jvm_mem_heap',
- 'jvm_mem_heap_bytes',
- 'jvm_buffer_pool_count',
- 'jvm_direct_buffers_memory',
- 'jvm_mapped_buffers_memory',
- 'jvm_gc_count',
- 'jvm_gc_time',
- 'host_metrics_file_descriptors',
- 'host_metrics_http',
- 'host_metrics_transport',
- 'thread_pool_queued',
- 'thread_pool_rejected',
- 'fielddata_cache',
- 'fielddata_evictions_tripped',
- 'cluster_health_status',
- 'cluster_health_nodes',
- 'cluster_health_pending_tasks',
- 'cluster_health_flight_fetch',
- 'cluster_health_shards',
- 'cluster_stats_nodes',
- 'cluster_stats_query_cache',
- 'cluster_stats_docs',
- 'cluster_stats_store',
- 'cluster_stats_indices',
- 'cluster_stats_shards_total',
- 'index_docs_count',
- 'index_store_size',
- 'index_replica',
- 'index_health',
-]
-
-CHARTS = {
- 'search_performance_total': {
- 'options': [None, 'Queries And Fetches', 'events/s', 'search performance',
- 'elastic.search_performance_total', 'stacked'],
- 'lines': [
- ['indices_search_query_total', 'queries', 'incremental'],
- ['indices_search_fetch_total', 'fetches', 'incremental']
- ]
- },
- 'search_performance_current': {
- 'options': [None, 'Queries and Fetches In Progress', 'events', 'search performance',
- 'elastic.search_performance_current', 'stacked'],
- 'lines': [
- ['indices_search_query_current', 'queries', 'absolute'],
- ['indices_search_fetch_current', 'fetches', 'absolute']
- ]
- },
- 'search_performance_time': {
- 'options': [None, 'Time Spent On Queries And Fetches', 'seconds', 'search performance',
- 'elastic.search_performance_time', 'stacked'],
- 'lines': [
- ['indices_search_query_time_in_millis', 'query', 'incremental', 1, 1000],
- ['indices_search_fetch_time_in_millis', 'fetch', 'incremental', 1, 1000]
- ]
- },
- 'search_latency': {
- 'options': [None, 'Query And Fetch Latency', 'milliseconds', 'search performance', 'elastic.search_latency',
- 'stacked'],
- 'lines': [
- ['query_latency', 'query', 'absolute', 1, 1000],
- ['fetch_latency', 'fetch', 'absolute', 1, 1000]
- ]
- },
- 'index_performance_total': {
- 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'events/s',
- 'indexing performance', 'elastic.index_performance_total', 'stacked'],
- 'lines': [
- ['indices_indexing_index_total', 'indexed', 'incremental'],
- ['indices_refresh_total', 'refreshes', 'incremental'],
- ['indices_flush_total', 'flushes', 'incremental']
- ]
- },
- 'index_performance_current': {
- 'options': [None, 'Number Of Documents Currently Being Indexed', 'currently indexed',
- 'indexing performance', 'elastic.index_performance_current', 'stacked'],
- 'lines': [
- ['indices_indexing_index_current', 'documents', 'absolute']
- ]
- },
- 'index_performance_time': {
- 'options': [None, 'Time Spent On Indexing, Refreshing, Flushing', 'seconds', 'indexing performance',
- 'elastic.index_performance_time', 'stacked'],
- 'lines': [
- ['indices_indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000],
- ['indices_refresh_total_time_in_millis', 'refreshing', 'incremental', 1, 1000],
- ['indices_flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000]
- ]
- },
- 'index_latency': {
- 'options': [None, 'Indexing And Flushing Latency', 'milliseconds', 'indexing performance',
- 'elastic.index_latency', 'stacked'],
- 'lines': [
- ['indexing_latency', 'indexing', 'absolute', 1, 1000],
- ['flushing_latency', 'flushing', 'absolute', 1, 1000]
- ]
- },
- 'index_translog_operations': {
- 'options': [None, 'Translog Operations', 'operations', 'translog',
- 'elastic.index_translog_operations', 'area'],
- 'lines': [
- ['indices_translog_operations', 'total', 'absolute'],
- ['indices_translog_uncommitted_operations', 'uncommitted', 'absolute']
- ]
- },
- 'index_translog_size': {
- 'options': [None, 'Translog Size', 'MiB', 'translog',
- 'elastic.index_translog_size', 'area'],
- 'lines': [
- ['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567],
- ['indices_translog_uncommitted_size_in_bytes', 'uncommitted', 'absolute', 1, 1048567]
- ]
- },
- 'index_segments_count': {
- 'options': [None, 'Total Number Of Indices Segments', 'segments', 'indices segments',
- 'elastic.index_segments_count', 'line'],
- 'lines': [
- ['indices_segments_count', 'segments', 'absolute']
- ]
- },
- 'index_segments_memory_writer': {
- 'options': [None, 'Index Writer Memory Usage', 'MiB', 'indices segments',
- 'elastic.index_segments_memory_writer', 'area'],
- 'lines': [
- ['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567]
- ]
- },
- 'index_segments_memory': {
- 'options': [None, 'Indices Segments Memory Usage', 'MiB', 'indices segments',
- 'elastic.index_segments_memory', 'stacked'],
- 'lines': [
- ['indices_segments_terms_memory_in_bytes', 'terms', 'absolute', 1, 1048567],
- ['indices_segments_stored_fields_memory_in_bytes', 'stored fields', 'absolute', 1, 1048567],
- ['indices_segments_term_vectors_memory_in_bytes', 'term vectors', 'absolute', 1, 1048567],
- ['indices_segments_norms_memory_in_bytes', 'norms', 'absolute', 1, 1048567],
- ['indices_segments_points_memory_in_bytes', 'points', 'absolute', 1, 1048567],
- ['indices_segments_doc_values_memory_in_bytes', 'doc values', 'absolute', 1, 1048567],
- ['indices_segments_version_map_memory_in_bytes', 'version map', 'absolute', 1, 1048567],
- ['indices_segments_fixed_bit_set_memory_in_bytes', 'fixed bit set', 'absolute', 1, 1048567]
- ]
- },
- 'jvm_mem_heap': {
- 'options': [None, 'JVM Heap Percentage Currently in Use', 'percentage', 'memory usage and gc',
- 'elastic.jvm_heap', 'area'],
- 'lines': [
- ['jvm_mem_heap_used_percent', 'inuse', 'absolute']
- ]
- },
- 'jvm_mem_heap_bytes': {
- 'options': [None, 'JVM Heap Commit And Usage', 'MiB', 'memory usage and gc',
- 'elastic.jvm_heap_bytes', 'area'],
- 'lines': [
- ['jvm_mem_heap_committed_in_bytes', 'committed', 'absolute', 1, 1048576],
- ['jvm_mem_heap_used_in_bytes', 'used', 'absolute', 1, 1048576]
- ]
- },
- 'jvm_buffer_pool_count': {
- 'options': [None, 'JVM Buffers', 'pools', 'memory usage and gc',
- 'elastic.jvm_buffer_pool_count', 'line'],
- 'lines': [
- ['jvm_buffer_pools_direct_count', 'direct', 'absolute'],
- ['jvm_buffer_pools_mapped_count', 'mapped', 'absolute']
- ]
- },
- 'jvm_direct_buffers_memory': {
- 'options': [None, 'JVM Direct Buffers Memory', 'MiB', 'memory usage and gc',
- 'elastic.jvm_direct_buffers_memory', 'area'],
- 'lines': [
- ['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567],
- ['jvm_buffer_pools_direct_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
- ]
- },
- 'jvm_mapped_buffers_memory': {
- 'options': [None, 'JVM Mapped Buffers Memory', 'MiB', 'memory usage and gc',
- 'elastic.jvm_mapped_buffers_memory', 'area'],
- 'lines': [
- ['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567],
- ['jvm_buffer_pools_mapped_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
- ]
- },
- 'jvm_gc_count': {
- 'options': [None, 'Garbage Collections', 'events/s', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
- 'lines': [
- ['jvm_gc_collectors_young_collection_count', 'young', 'incremental'],
- ['jvm_gc_collectors_old_collection_count', 'old', 'incremental']
- ]
- },
- 'jvm_gc_time': {
- 'options': [None, 'Time Spent On Garbage Collections', 'milliseconds', 'memory usage and gc',
- 'elastic.gc_time', 'stacked'],
- 'lines': [
- ['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'],
- ['jvm_gc_collectors_old_collection_time_in_millis', 'old', 'incremental']
- ]
- },
- 'thread_pool_queued': {
- 'options': [None, 'Number Of Queued Threads In Thread Pool', 'queued threads', 'queues and rejections',
- 'elastic.thread_pool_queued', 'stacked'],
- 'lines': [
- ['thread_pool_bulk_queue', 'bulk', 'absolute'],
- ['thread_pool_write_queue', 'write', 'absolute'],
- ['thread_pool_index_queue', 'index', 'absolute'],
- ['thread_pool_search_queue', 'search', 'absolute'],
- ['thread_pool_merge_queue', 'merge', 'absolute']
- ]
- },
- 'thread_pool_rejected': {
- 'options': [None, 'Rejected Threads In Thread Pool', 'rejected threads', 'queues and rejections',
- 'elastic.thread_pool_rejected', 'stacked'],
- 'lines': [
- ['thread_pool_bulk_rejected', 'bulk', 'absolute'],
- ['thread_pool_write_rejected', 'write', 'absolute'],
- ['thread_pool_index_rejected', 'index', 'absolute'],
- ['thread_pool_search_rejected', 'search', 'absolute'],
- ['thread_pool_merge_rejected', 'merge', 'absolute']
- ]
- },
- 'fielddata_cache': {
- 'options': [None, 'Fielddata Cache', 'MiB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
- 'lines': [
- ['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576]
- ]
- },
- 'fielddata_evictions_tripped': {
- 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'events/s',
- 'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'],
- 'lines': [
- ['indices_fielddata_evictions', 'evictions', 'incremental'],
- ['indices_fielddata_tripped', 'tripped', 'incremental']
- ]
- },
- 'cluster_health_nodes': {
- 'options': [None, 'Nodes Statistics', 'nodes', 'cluster health API',
- 'elastic.cluster_health_nodes', 'area'],
- 'lines': [
- ['number_of_nodes', 'nodes', 'absolute'],
- ['number_of_data_nodes', 'data_nodes', 'absolute'],
- ]
- },
- 'cluster_health_pending_tasks': {
- 'options': [None, 'Tasks Statistics', 'tasks', 'cluster health API',
- 'elastic.cluster_health_pending_tasks', 'line'],
- 'lines': [
- ['number_of_pending_tasks', 'pending_tasks', 'absolute'],
- ]
- },
- 'cluster_health_flight_fetch': {
- 'options': [None, 'In Flight Fetches Statistics', 'fetches', 'cluster health API',
- 'elastic.cluster_health_flight_fetch', 'line'],
- 'lines': [
- ['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
- ]
- },
- 'cluster_health_status': {
- 'options': [None, 'Cluster Status', 'status', 'cluster health API',
- 'elastic.cluster_health_status', 'area'],
- 'lines': [
- ['status_green', 'green', 'absolute'],
- ['status_red', 'red', 'absolute'],
- ['status_yellow', 'yellow', 'absolute']
- ]
- },
- 'cluster_health_shards': {
- 'options': [None, 'Shards Statistics', 'shards', 'cluster health API',
- 'elastic.cluster_health_shards', 'stacked'],
- 'lines': [
- ['active_shards', 'active_shards', 'absolute'],
- ['relocating_shards', 'relocating_shards', 'absolute'],
- ['unassigned_shards', 'unassigned', 'absolute'],
- ['delayed_unassigned_shards', 'delayed_unassigned', 'absolute'],
- ['initializing_shards', 'initializing', 'absolute'],
- ['active_shards_percent_as_number', 'active_percent', 'absolute']
- ]
- },
- 'cluster_stats_nodes': {
- 'options': [None, 'Nodes Statistics', 'nodes', 'cluster stats API',
- 'elastic.cluster_nodes', 'area'],
- 'lines': [
- ['nodes_count_data', 'data', 'absolute'],
- ['nodes_count_master', 'master', 'absolute'],
- ['nodes_count_total', 'total', 'absolute'],
- ['nodes_count_ingest', 'ingest', 'absolute'],
- ['nodes_count_coordinating_only', 'coordinating_only', 'absolute']
- ]
- },
- 'cluster_stats_query_cache': {
- 'options': [None, 'Query Cache Statistics', 'queries', 'cluster stats API',
- 'elastic.cluster_query_cache', 'stacked'],
- 'lines': [
- ['indices_query_cache_hit_count', 'hit', 'incremental'],
- ['indices_query_cache_miss_count', 'miss', 'incremental']
- ]
- },
- 'cluster_stats_docs': {
- 'options': [None, 'Docs Statistics', 'docs', 'cluster stats API',
- 'elastic.cluster_docs', 'line'],
- 'lines': [
- ['indices_docs_count', 'docs', 'absolute']
- ]
- },
- 'cluster_stats_store': {
- 'options': [None, 'Store Statistics', 'MiB', 'cluster stats API',
- 'elastic.cluster_store', 'line'],
- 'lines': [
- ['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567]
- ]
- },
- 'cluster_stats_indices': {
- 'options': [None, 'Indices Statistics', 'indices', 'cluster stats API',
- 'elastic.cluster_indices', 'line'],
- 'lines': [
- ['indices_count', 'indices', 'absolute'],
- ]
- },
- 'cluster_stats_shards_total': {
- 'options': [None, 'Total Shards Statistics', 'shards', 'cluster stats API',
- 'elastic.cluster_shards_total', 'line'],
- 'lines': [
- ['indices_shards_total', 'shards', 'absolute']
- ]
- },
- 'host_metrics_transport': {
- 'options': [None, 'Cluster Communication Transport Metrics', 'kilobit/s', 'host metrics',
- 'elastic.host_transport', 'area'],
- 'lines': [
- ['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000],
- ['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000]
- ]
- },
- 'host_metrics_file_descriptors': {
- 'options': [None, 'Available File Descriptors In Percent', 'percentage', 'host metrics',
- 'elastic.host_descriptors', 'area'],
- 'lines': [
- ['file_descriptors_used', 'used', 'absolute', 1, 10]
- ]
- },
- 'host_metrics_http': {
- 'options': [None, 'Opened HTTP Connections', 'connections', 'host metrics',
- 'elastic.host_http_connections', 'line'],
- 'lines': [
- ['http_current_open', 'opened', 'absolute', 1, 1]
- ]
- },
- 'index_docs_count': {
- 'options': [None, 'Docs Count', 'count', 'indices', 'elastic.index_docs', 'line'],
- 'lines': []
- },
- 'index_store_size': {
- 'options': [None, 'Store Size', 'bytes', 'indices', 'elastic.index_store_size', 'line'],
- 'lines': []
- },
- 'index_replica': {
- 'options': [None, 'Replica', 'count', 'indices', 'elastic.index_replica', 'line'],
- 'lines': []
- },
- 'index_health': {
- 'options': [None, 'Health', 'status', 'indices', 'elastic.index_health', 'line'],
- 'lines': []
- },
-}
-
-
-def convert_index_store_size_to_bytes(size):
- # can be b, kb, mb, gb or None
- if size is None:
- return -1
- if size.endswith('kb'):
- return round(float(size[:-2]) * 1024)
- elif size.endswith('mb'):
- return round(float(size[:-2]) * 1024 * 1024)
- elif size.endswith('gb'):
- return round(float(size[:-2]) * 1024 * 1024 * 1024)
- elif size.endswith('tb'):
- return round(float(size[:-2]) * 1024 * 1024 * 1024 * 1024)
- elif size.endswith('b'):
- return round(float(size[:-1]))
- return -1
-
-
-def convert_index_null_value(value):
- if value is None:
- return -1
- return value
-
-
-def convert_index_health(health):
- if health == 'green':
- return 0
- elif health == 'yellow':
- return 1
- elif health == 'read':
- return 2
- return -1
-
-
-def get_survive_any(method):
- def w(*args):
- try:
- method(*args)
- except Exception as error:
- self, queue, url = args[0], args[1], args[2]
- self.error("error during '{0}' : {1}".format(url, error))
- queue.put(dict())
-
- return w
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = self.configuration.get('host')
- self.port = self.configuration.get('port', 9200)
- self.url = '{scheme}://{host}:{port}'.format(
- scheme=self.configuration.get('scheme', 'http'),
- host=self.host,
- port=self.port,
- )
- self.latency = dict()
- self.methods = list()
- self.collected_indices = set()
-
- def check(self):
- if not self.host:
- self.error('Host is not defined in the module configuration file')
- return False
-
- try:
- self.host = gethostbyname(self.host)
- except gaierror as error:
- self.error(repr(error))
- return False
-
- self.methods = [
- METHODS(
- get_data=self._get_node_stats,
- url=self.url + '/_nodes/_local/stats',
- run=self.configuration.get('node_stats', True),
- ),
- METHODS(
- get_data=self._get_cluster_health,
- url=self.url + '/_cluster/health',
- run=self.configuration.get('cluster_health', True)
- ),
- METHODS(
- get_data=self._get_cluster_stats,
- url=self.url + '/_cluster/stats',
- run=self.configuration.get('cluster_stats', True),
- ),
- METHODS(
- get_data=self._get_indices,
- url=self.url + '/_cat/indices?format=json',
- run=self.configuration.get('indices_stats', False),
- ),
- ]
- return UrlService.check(self)
-
- def _get_data(self):
- threads = list()
- queue = Queue()
- result = dict()
-
- for method in self.methods:
- if not method.run:
- continue
- th = threading.Thread(
- target=method.get_data,
- args=(queue, method.url),
- )
- th.daemon = True
- th.start()
- threads.append(th)
-
- for thread in threads:
- thread.join()
- result.update(queue.get())
-
- return result or None
-
- def add_index_to_charts(self, idx_name):
- for name in ('index_docs_count', 'index_store_size', 'index_replica', 'index_health'):
- chart = self.charts[name]
- dim = ['{0}_{1}'.format(idx_name, name), idx_name]
- chart.add_dimension(dim)
-
- @get_survive_any
- def _get_indices(self, queue, url):
- # [
- # {
- # "pri.store.size": "650b",
- # "health": "yellow",
- # "status": "open",
- # "index": "twitter",
- # "pri": "5",
- # "rep": "1",
- # "docs.count": "10",
- # "docs.deleted": "3",
- # "store.size": "650b"
- # },
- # {
- # "status":"open",
- # "index":".kibana_3",
- # "health":"red",
- # "uuid":"umAdNrq6QaOXrmZjAowTNw",
- # "store.size":null,
- # "pri.store.size":null,
- # "docs.count":null,
- # "rep":"0",
- # "pri":"1",
- # "docs.deleted":null
- # },
- # {
- # "health" : "green",
- # "status" : "close",
- # "index" : "siem-events-2021.09.12",
- # "uuid" : "mTQ-Yl5TS7S3lGoRORE-Pg",
- # "pri" : "4",
- # "rep" : "0",
- # "docs.count" : null,
- # "docs.deleted" : null,
- # "store.size" : null,
- # "pri.store.size" : null
- # }
- # ]
- raw_data = self._get_raw_data(url)
- if not raw_data:
- return queue.put(dict())
-
- indices = self.json_parse(raw_data)
- if not indices:
- return queue.put(dict())
-
- charts_initialized = len(self.charts) != 0
- data = dict()
- for idx in indices:
- try:
- name = idx['index']
- is_system_index = name.startswith('.')
- if is_system_index:
- continue
-
- v = {
- '{0}_index_replica'.format(name): idx['rep'],
- '{0}_index_health'.format(name): convert_index_health(idx['health']),
- }
- docs_count = convert_index_null_value(idx['docs.count'])
- if docs_count != -1:
- v['{0}_index_docs_count'.format(name)] = idx['docs.count']
- size = convert_index_store_size_to_bytes(idx['store.size'])
- if size != -1:
- v['{0}_index_store_size'.format(name)] = size
- except KeyError as error:
- self.debug("error on parsing index : {0}".format(repr(error)))
- continue
-
- data.update(v)
- if name not in self.collected_indices and charts_initialized:
- self.collected_indices.add(name)
- self.add_index_to_charts(name)
-
- return queue.put(data)
-
- @get_survive_any
- def _get_cluster_health(self, queue, url):
- raw = self._get_raw_data(url)
- if not raw:
- return queue.put(dict())
-
- parsed = self.json_parse(raw)
- if not parsed:
- return queue.put(dict())
-
- data = fetch_data(raw_data=parsed, metrics=HEALTH_STATS)
- dummy = {
- 'status_green': 0,
- 'status_red': 0,
- 'status_yellow': 0,
- }
- data.update(dummy)
- current_status = 'status_' + parsed['status']
- data[current_status] = 1
-
- return queue.put(data)
-
- @get_survive_any
- def _get_cluster_stats(self, queue, url):
- raw = self._get_raw_data(url)
- if not raw:
- return queue.put(dict())
-
- parsed = self.json_parse(raw)
- if not parsed:
- return queue.put(dict())
-
- data = fetch_data(raw_data=parsed, metrics=CLUSTER_STATS)
-
- return queue.put(data)
-
- @get_survive_any
- def _get_node_stats(self, queue, url):
- raw = self._get_raw_data(url)
- if not raw:
- return queue.put(dict())
-
- parsed = self.json_parse(raw)
- if not parsed:
- return queue.put(dict())
-
- node = list(parsed['nodes'].keys())[0]
- data = fetch_data(raw_data=parsed['nodes'][node], metrics=NODE_STATS)
-
- # Search, index, flush, fetch performance latency
- for key in LATENCY:
- try:
- data[key] = self.find_avg(
- total=data[LATENCY[key]['total']],
- spent_time=data[LATENCY[key]['spent_time']],
- key=key)
- except KeyError:
- continue
- if 'process_open_file_descriptors' in data and 'process_max_file_descriptors' in data:
- v = float(data['process_open_file_descriptors']) / data['process_max_file_descriptors'] * 1000
- data['file_descriptors_used'] = round(v)
-
- return queue.put(data)
-
- def json_parse(self, reply):
- try:
- return json.loads(reply)
- except ValueError as err:
- self.error(err)
- return None
-
- def find_avg(self, total, spent_time, key):
- if key not in self.latency:
- self.latency[key] = dict(total=total, spent_time=spent_time)
- return 0
-
- if self.latency[key]['total'] != total:
- spent_diff = spent_time - self.latency[key]['spent_time']
- total_diff = total - self.latency[key]['total']
- latency = float(spent_diff) / float(total_diff) * 1000
- self.latency[key]['total'] = total
- self.latency[key]['spent_time'] = spent_time
- return latency
-
- self.latency[key]['spent_time'] = spent_time
- return 0
-
-
-def fetch_data(raw_data, metrics):
- data = dict()
- for metric in metrics:
- value = raw_data
- metrics_list = metric.split('.')
- try:
- for m in metrics_list:
- value = value[m]
- except (KeyError, TypeError):
- continue
- data['_'.join(metrics_list)] = value
- return data
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.conf b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
deleted file mode 100644
index 4058deba..00000000
--- a/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
+++ /dev/null
@@ -1,83 +0,0 @@
-# netdata python.d.plugin configuration for elasticsearch stats
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, elasticsearch plugin also supports the following:
-#
-# host : 'ipaddress' # Elasticsearch server ip address or hostname.
-# port : 'port' # Port on which elasticsearch listens.
-# node_status : yes/no # Get metrics from "/_nodes/_local/stats". Enabled by default.
-# cluster_health : yes/no # Get metrics from "/_cluster/health". Enabled by default.
-# cluster_stats : yes/no # Get metrics from "'/_cluster/stats". Enabled by default.
-# indices_stats : yes/no # Get metrics from "/_cat/indices". Disabled by default.
-#
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-#
-local:
- host: '127.0.0.1'
- port: '9200'
diff --git a/collectors/python.d.plugin/energid/Makefile.inc b/collectors/python.d.plugin/energid/Makefile.inc
deleted file mode 100644
index 44a209d0..00000000
--- a/collectors/python.d.plugin/energid/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += energid/energid.chart.py
-dist_pythonconfig_DATA += energid/energid.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += energid/README.md energid/Makefile.inc
-
diff --git a/collectors/python.d.plugin/energid/README.md b/collectors/python.d.plugin/energid/README.md
deleted file mode 100644
index 73e39ae1..00000000
--- a/collectors/python.d.plugin/energid/README.md
+++ /dev/null
@@ -1,77 +0,0 @@
-<!--
-title: "Energi Core node monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/energid/README.md
-sidebar_label: "Energi Core"
--->
-
-# Energi Core node monitoring with Netdata
-
-Monitors blockchain, memory, network and unspent transactions statistics.
-
-
-As [Energi Core](https://github.com/energicryptocurrency/energi) Gen 1 & 2 are based on the original Bitcoin code and
-supports very similar JSON RPC, there is quite high chance the module works
-with many others forks including bitcoind itself.
-
-Introduces several new charts:
-
-1. **Blockchain Index**
- - blocks
- - headers
-
-2. **Blockchain Difficulty**
- - diff
-
-3. **MemPool** in MiB
- - Max
- - Usage
- - TX Size
-
-4. **Secure Memory** in KiB
- - Total
- - Locked
- - Used
-
-5. **Network**
- - Connections
-
-6. **UTXO** (Unspent Transaction Output)
- - UTXO
- - Xfers (related transactions)
-
-Configuration is needed in most cases of secure deployment to specify RPC
-credentials. However, Energi, Bitcoin and Dash daemons are checked on
-startup by default.
-
-It may be desired to increase retry count for production use due to possibly
-long daemon startup.
-
-## Configuration
-
-Edit the `python.d/energid.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/energid.conf
-```
-
-Sample:
-
-```yaml
-energi:
- host: '127.0.0.1'
- port: 9796
- user: energi
- pass: energi
-
-bitcoin:
- host: '127.0.0.1'
- port: 8332
- user: bitcoin
- pass: bitcoin
-```
-
----
-
-
diff --git a/collectors/python.d.plugin/energid/energid.chart.py b/collectors/python.d.plugin/energid/energid.chart.py
deleted file mode 100644
index 079c32dc..00000000
--- a/collectors/python.d.plugin/energid/energid.chart.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: Energi Core / Bitcoin netdata python.d module
-# Author: Andrey Galkin <andrey@futoin.org> (andvgal)
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# This module is designed for energid, but it should work with many other Bitcoin forks
-# which support more or less standard JSON-RPC.
-#
-
-import json
-
-from bases.FrameworkServices.UrlService import UrlService
-
-update_every = 5
-
-ORDER = [
- 'blockindex',
- 'difficulty',
- 'mempool',
- 'secmem',
- 'network',
- 'timeoffset',
- 'utxo',
- 'xfers',
-]
-
-CHARTS = {
- 'blockindex': {
- 'options': [None, 'Blockchain Index', 'count', 'blockchain', 'energi.blockindex', 'area'],
- 'lines': [
- ['blockchain_blocks', 'blocks', 'absolute'],
- ['blockchain_headers', 'headers', 'absolute'],
- ]
- },
- 'difficulty': {
- 'options': [None, 'Blockchain Difficulty', 'difficulty', 'blockchain', 'energi.difficulty', 'line'],
- 'lines': [
- ['blockchain_difficulty', 'Diff', 'absolute'],
- ],
- },
- 'mempool': {
- 'options': [None, 'MemPool', 'MiB', 'memory', 'energid.mempool', 'area'],
- 'lines': [
- ['mempool_max', 'Max', 'absolute', None, 1024 * 1024],
- ['mempool_current', 'Usage', 'absolute', None, 1024 * 1024],
- ['mempool_txsize', 'TX Size', 'absolute', None, 1024 * 1024],
- ],
- },
- 'secmem': {
- 'options': [None, 'Secure Memory', 'KiB', 'memory', 'energid.secmem', 'area'],
- 'lines': [
- ['secmem_total', 'Total', 'absolute', None, 1024],
- ['secmem_locked', 'Locked', 'absolute', None, 1024],
- ['secmem_used', 'Used', 'absolute', None, 1024],
- ],
- },
- 'network': {
- 'options': [None, 'Network', 'count', 'network', 'energid.network', 'line'],
- 'lines': [
- ['network_connections', 'Connections', 'absolute'],
- ],
- },
- 'timeoffset': {
- 'options': [None, 'Network', 'seconds', 'network', 'energid.timeoffset', 'line'],
- 'lines': [
- ['network_timeoffset', 'offseet', 'absolute'],
- ],
- },
- 'utxo': {
- 'options': [None, 'UTXO', 'count', 'UTXO', 'energid.utxo', 'line'],
- 'lines': [
- ['utxo_count', 'UTXO', 'absolute'],
- ],
- },
- 'xfers': {
- 'options': [None, 'UTXO', 'count', 'UTXO', 'energid.xfers', 'line'],
- 'lines': [
- ['utxo_xfers', 'Xfers', 'absolute'],
- ],
- },
-}
-
-METHODS = {
- 'getblockchaininfo': lambda r: {
- 'blockchain_blocks': r['blocks'],
- 'blockchain_headers': r['headers'],
- 'blockchain_difficulty': r['difficulty'],
- },
- 'getmempoolinfo': lambda r: {
- 'mempool_txcount': r['size'],
- 'mempool_txsize': r['bytes'],
- 'mempool_current': r['usage'],
- 'mempool_max': r['maxmempool'],
- },
- 'getmemoryinfo': lambda r: dict([
- ('secmem_' + k, v) for (k, v) in r['locked'].items()
- ]),
- 'getnetworkinfo': lambda r: {
- 'network_timeoffset': r['timeoffset'],
- 'network_connections': r['connections'],
- },
- 'gettxoutsetinfo': lambda r: {
- 'utxo_count': r['txouts'],
- 'utxo_xfers': r['transactions'],
- 'utxo_size': r['disk_size'],
- 'utxo_amount': r['total_amount'],
- },
-}
-
-JSON_RPC_VERSION = '1.1'
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = self.configuration.get('host', '127.0.0.1')
- self.port = self.configuration.get('port', 9796)
- self.url = '{scheme}://{host}:{port}'.format(
- scheme=self.configuration.get('scheme', 'http'),
- host=self.host,
- port=self.port,
- )
- self.method = 'POST'
- self.header = {
- 'Content-Type': 'application/json',
- }
-
- def _get_data(self):
- #
- # Bitcoin family speak JSON-RPC version 1.0 for maximum compatibility,
- # but uses JSON-RPC 1.1/2.0 standards for parts of the 1.0 standard that were
- # unspecified (HTTP errors and contents of 'error').
- #
- # 1.0 spec: https://www.jsonrpc.org/specification_v1
- # 2.0 spec: https://www.jsonrpc.org/specification
- #
- # The call documentation: https://github.com/energicryptocurrency/core-api-documentation
- #
- batch = []
-
- for i, method in enumerate(METHODS):
- batch.append({
- 'version': JSON_RPC_VERSION,
- 'id': i,
- 'method': method,
- 'params': [],
- })
-
- result = self._get_raw_data(body=json.dumps(batch))
-
- if not result:
- return None
-
- result = json.loads(result.decode('utf-8'))
- data = dict()
-
- for i, (_, handler) in enumerate(METHODS.items()):
- r = result[i]
- data.update(handler(r['result']))
-
- return data
diff --git a/collectors/python.d.plugin/energid/energid.conf b/collectors/python.d.plugin/energid/energid.conf
deleted file mode 100644
index 3b13841f..00000000
--- a/collectors/python.d.plugin/energid/energid.conf
+++ /dev/null
@@ -1,90 +0,0 @@
-# netdata python.d.plugin configuration for energid
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, energid also supports the following:
-#
-# host: 'IP or HOSTNAME' # type <str> the RPC host to connect to
-# port: PORT # type <int> the RPC port to connect to
-# user: 'RPC username' # type <str> the RPC username to use
-# pass: 'RPC password' # type <str> the RPC password to use
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-#
-
-# Defaults:
-# host: '127.0.0.1'
-# user:
-# pass:
-#
-
-# Energi mainnet
-energi:
- port: 9796
-
-# Most likely supported Bitcoin mainnet
-bitcoin:
- port: 8332
-
-# Most likely supported Dash mainnet
-dash:
- port: 9998
diff --git a/collectors/python.d.plugin/freeradius/Makefile.inc b/collectors/python.d.plugin/freeradius/Makefile.inc
deleted file mode 100644
index 54aa6492..00000000
--- a/collectors/python.d.plugin/freeradius/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += freeradius/freeradius.chart.py
-dist_pythonconfig_DATA += freeradius/freeradius.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += freeradius/README.md freeradius/Makefile.inc
-
diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md
deleted file mode 100644
index d5ec464b..00000000
--- a/collectors/python.d.plugin/freeradius/README.md
+++ /dev/null
@@ -1,90 +0,0 @@
-<!--
-title: "FreeRADIUS monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/freeradius/README.md
-sidebar_label: "FreeRADIUS"
--->
-
-# FreeRADIUS monitoring with Netdata
-
-Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second.
-
-It produces:
-
-1. **Authentication counters:**
-
- - access-accepts
- - access-rejects
- - auth-dropped-requests
- - auth-duplicate-requests
- - auth-invalid-requests
- - auth-malformed-requests
- - auth-unknown-types
-
-2. **Accounting counters:** [optional]
-
- - accounting-requests
- - accounting-responses
- - acct-dropped-requests
- - acct-duplicate-requests
- - acct-invalid-requests
- - acct-malformed-requests
- - acct-unknown-types
-
-3. **Proxy authentication counters:** [optional]
-
- - proxy-access-accepts
- - proxy-access-rejects
- - proxy-auth-dropped-requests
- - proxy-auth-duplicate-requests
- - proxy-auth-invalid-requests
- - proxy-auth-malformed-requests
- - proxy-auth-unknown-types
-
-4. **Proxy accounting counters:** [optional]
-
- - proxy-accounting-requests
- - proxy-accounting-responses
- - proxy-acct-dropped-requests
- - proxy-acct-duplicate-requests
- - proxy-acct-invalid-requests
- - proxy-acct-malformed-requests
- - proxy-acct-unknown-typesa
-
-## Configuration
-
-Edit the `python.d/freeradius.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/freeradius.conf
-```
-
-Sample:
-
-```yaml
-local:
- host : 'localhost'
- port : '18121'
- secret : 'adminsecret'
- acct : False # Freeradius accounting statistics.
- proxy_auth : False # Freeradius proxy authentication statistics.
- proxy_acct : False # Freeradius proxy accounting statistics.
-```
-
-**Freeradius server configuration:**
-
-The configuration for the status server is automatically created in the sites-available directory.
-By default, server is enabled and can be queried from every client.
-FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
-
-To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
-
-- cd sites-enabled
-- ln -s ../sites-available/status status
-
-and restart/reload your FREERADIUS server.
-
----
-
-
diff --git a/collectors/python.d.plugin/freeradius/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py
deleted file mode 100644
index 161d57e0..00000000
--- a/collectors/python.d.plugin/freeradius/freeradius.chart.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: freeradius netdata python.d module
-# Author: ilyam8
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import re
-from subprocess import Popen, PIPE
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from bases.collection import find_binary
-
-update_every = 15
-
-PARSER = re.compile(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)')
-
-RADIUS_MSG = 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept'
-
-RADCLIENT_RETRIES = 1
-RADCLIENT_TIMEOUT = 1
-
-DEFAULT_HOST = 'localhost'
-DEFAULT_PORT = 18121
-DEFAULT_DO_ACCT = False
-DEFAULT_DO_PROXY_AUTH = False
-DEFAULT_DO_PROXY_ACCT = False
-
-ORDER = [
- 'authentication',
- 'accounting',
- 'proxy-auth',
- 'proxy-acct',
-]
-
-CHARTS = {
- 'authentication': {
- 'options': [None, 'Authentication', 'packets/s', 'authentication', 'freerad.auth', 'line'],
- 'lines': [
- ['access-accepts', None, 'incremental'],
- ['access-rejects', None, 'incremental'],
- ['auth-dropped-requests', 'dropped-requests', 'incremental'],
- ['auth-duplicate-requests', 'duplicate-requests', 'incremental'],
- ['auth-invalid-requests', 'invalid-requests', 'incremental'],
- ['auth-malformed-requests', 'malformed-requests', 'incremental'],
- ['auth-unknown-types', 'unknown-types', 'incremental']
- ]
- },
- 'accounting': {
- 'options': [None, 'Accounting', 'packets/s', 'accounting', 'freerad.acct', 'line'],
- 'lines': [
- ['accounting-requests', 'requests', 'incremental'],
- ['accounting-responses', 'responses', 'incremental'],
- ['acct-dropped-requests', 'dropped-requests', 'incremental'],
- ['acct-duplicate-requests', 'duplicate-requests', 'incremental'],
- ['acct-invalid-requests', 'invalid-requests', 'incremental'],
- ['acct-malformed-requests', 'malformed-requests', 'incremental'],
- ['acct-unknown-types', 'unknown-types', 'incremental']
- ]
- },
- 'proxy-auth': {
- 'options': [None, 'Proxy Authentication', 'packets/s', 'authentication', 'freerad.proxy.auth', 'line'],
- 'lines': [
- ['proxy-access-accepts', 'access-accepts', 'incremental'],
- ['proxy-access-rejects', 'access-rejects', 'incremental'],
- ['proxy-auth-dropped-requests', 'dropped-requests', 'incremental'],
- ['proxy-auth-duplicate-requests', 'duplicate-requests', 'incremental'],
- ['proxy-auth-invalid-requests', 'invalid-requests', 'incremental'],
- ['proxy-auth-malformed-requests', 'malformed-requests', 'incremental'],
- ['proxy-auth-unknown-types', 'unknown-types', 'incremental']
- ]
- },
- 'proxy-acct': {
- 'options': [None, 'Proxy Accounting', 'packets/s', 'accounting', 'freerad.proxy.acct', 'line'],
- 'lines': [
- ['proxy-accounting-requests', 'requests', 'incremental'],
- ['proxy-accounting-responses', 'responses', 'incremental'],
- ['proxy-acct-dropped-requests', 'dropped-requests', 'incremental'],
- ['proxy-acct-duplicate-requests', 'duplicate-requests', 'incremental'],
- ['proxy-acct-invalid-requests', 'invalid-requests', 'incremental'],
- ['proxy-acct-malformed-requests', 'malformed-requests', 'incremental'],
- ['proxy-acct-unknown-types', 'unknown-types', 'incremental']
- ]
- }
-}
-
-
-def radclient_status(radclient, retries, timeout, host, port, secret):
- # radclient -r 1 -t 1 -x 127.0.0.1:18121 status secret
-
- return '{radclient} -r {num_retries} -t {timeout} -x {host}:{port} status {secret}'.format(
- radclient=radclient,
- num_retries=retries,
- timeout=timeout,
- host=host,
- port=port,
- secret=secret,
- ).split()
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = self.configuration.get('host', DEFAULT_HOST)
- self.port = self.configuration.get('port', DEFAULT_PORT)
- self.secret = self.configuration.get('secret')
- self.do_acct = self.configuration.get('acct', DEFAULT_DO_ACCT)
- self.do_proxy_auth = self.configuration.get('proxy_auth', DEFAULT_DO_PROXY_AUTH)
- self.do_proxy_acct = self.configuration.get('proxy_acct', DEFAULT_DO_PROXY_ACCT)
- self.echo = find_binary('echo')
- self.radclient = find_binary('radclient')
- self.sub_echo = [self.echo, RADIUS_MSG]
- self.sub_radclient = radclient_status(
- self.radclient, RADCLIENT_RETRIES, RADCLIENT_TIMEOUT, self.host, self.port, self.secret,
- )
-
- def check(self):
- if not self.radclient:
- self.error("Can't locate 'radclient' binary or binary is not executable by netdata user")
- return False
-
- if not self.echo:
- self.error("Can't locate 'echo' binary or binary is not executable by netdata user")
- return None
-
- if not self.secret:
- self.error("'secret' isn't set")
- return None
-
- if not self.get_raw_data():
- self.error('Request returned no data. Is server alive?')
- return False
-
- if not self.do_acct:
- self.order.remove('accounting')
-
- if not self.do_proxy_auth:
- self.order.remove('proxy-auth')
-
- if not self.do_proxy_acct:
- self.order.remove('proxy-acct')
-
- return True
-
- def get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- result = self.get_raw_data()
-
- if not result:
- return None
-
- return dict(
- (key.lower(), value) for key, value in PARSER.findall(result)
- )
-
- def get_raw_data(self):
- """
- The following code is equivalent to
- 'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept"
- | radclient -t 1 -r 1 host:port status secret'
- :return: str
- """
- try:
- process_echo = Popen(self.sub_echo, stdout=PIPE, stderr=PIPE, shell=False)
- process_rad = Popen(self.sub_radclient, stdin=process_echo.stdout, stdout=PIPE, stderr=PIPE, shell=False)
- process_echo.stdout.close()
- raw_result = process_rad.communicate()[0]
- except OSError:
- return None
-
- if process_rad.returncode is 0:
- return raw_result.decode()
-
- return None
diff --git a/collectors/python.d.plugin/freeradius/freeradius.conf b/collectors/python.d.plugin/freeradius/freeradius.conf
deleted file mode 100644
index 74b27377..00000000
--- a/collectors/python.d.plugin/freeradius/freeradius.conf
+++ /dev/null
@@ -1,80 +0,0 @@
-# netdata python.d.plugin configuration for freeradius
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, freeradius also supports the following:
-#
-# host: 'host' # Default: 'localhost'. Server ip address or hostname.
-# port: 'port' # Default: '18121'. Port on which freeradius server listen (type = status).
-# secret: 'secret' # Default: 'adminsecret'.
-# acct: yes/no # Default: no. Freeradius accounting statistics.
-# proxy_auth: yes/no # Default: no. Freeradius proxy authentication statistics.
-# proxy_acct: yes/no # Default: no. Freeradius proxy accounting statistics.
-#
-# ------------------------------------------------------------------------------------------------------------------
-# Freeradius server configuration:
-# The configuration for the status server is automatically created in the sites-available directory.
-# By default, server is enabled and can be queried from every client.
-# FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
-# To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
-# cd sites-enabled
-# ln -s ../sites-available/status status
-# and restart/reload your FREERADIUS server.
-# ------------------------------------------------------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/httpcheck/Makefile.inc b/collectors/python.d.plugin/httpcheck/Makefile.inc
deleted file mode 100644
index 4a5bd856..00000000
--- a/collectors/python.d.plugin/httpcheck/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += httpcheck/httpcheck.chart.py
-dist_pythonconfig_DATA += httpcheck/httpcheck.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += httpcheck/README.md httpcheck/Makefile.inc
-
diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md
deleted file mode 100644
index 101b96e3..00000000
--- a/collectors/python.d.plugin/httpcheck/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-<!--
-title: "HTTP endpoint monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/httpcheck/README.md
-sidebar_label: "HTTP endpoints"
--->
-
-# HTTP endpoint monitoring with Netdata
-
-Monitors remote http server for availability and response time.
-
-Following charts are drawn per job:
-
-1. **Response time** ms
-
- - Time in 0.1 ms resolution in which the server responds.
- If the connection failed, the value is missing.
-
-2. **Status** boolean
-
- - Connection successful
- - Unexpected content: No Regex match found in the response
- - Unexpected status code: Do we get 500 errors?
- - Connection failed: port not listening or blocked
- - Connection timed out: host or port unreachable
-
-## Configuration
-
-Edit the [`python.d/httpcheck.conf`](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/httpcheck/httpcheck.conf) configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/httpcheck.conf
-```
-
-Sample configuration and their default values.
-
-```yaml
-server:
- url: 'http://host:port/path' # required
- status_accepted: # optional
- - 200
- timeout: 1 # optional, supports decimals (e.g. 0.2)
- update_every: 3 # optional
- regex: 'REGULAR_EXPRESSION' # optional, see https://docs.python.org/3/howto/regex.html
- redirect: yes # optional
-```
-
-### Notes
-
-- The status chart is primarily intended for alarms, badges or for access via API.
-- A system/service/firewall might block Netdata's access if a portscan or
- similar is detected.
-- This plugin is meant for simple use cases. Currently, the accuracy of the
- response time is low and should be used as reference only.
-
----
-
-
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
deleted file mode 100644
index 75718bb6..00000000
--- a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: http check netdata python.d module
-# Original Author: ccremer (github.com/ccremer)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import re
-
-import urllib3
-
-try:
- from time import monotonic as time
-except ImportError:
- from time import time
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-update_every = 3
-priority = 60000
-
-# Response
-HTTP_RESPONSE_TIME = 'time'
-HTTP_RESPONSE_LENGTH = 'length'
-
-# Status dimensions
-HTTP_SUCCESS = 'success'
-HTTP_BAD_CONTENT = 'bad_content'
-HTTP_BAD_STATUS = 'bad_status'
-HTTP_TIMEOUT = 'timeout'
-HTTP_NO_CONNECTION = 'no_connection'
-
-ORDER = [
- 'response_time',
- 'response_length',
- 'status',
-]
-
-CHARTS = {
- 'response_time': {
- 'options': [None, 'HTTP response time', 'milliseconds', 'response', 'httpcheck.responsetime', 'line'],
- 'lines': [
- [HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000]
- ]
- },
- 'response_length': {
- 'options': [None, 'HTTP response body length', 'characters', 'response', 'httpcheck.responselength', 'line'],
- 'lines': [
- [HTTP_RESPONSE_LENGTH, 'length', 'absolute']
- ]
- },
- 'status': {
- 'options': [None, 'HTTP status', 'boolean', 'status', 'httpcheck.status', 'line'],
- 'lines': [
- [HTTP_SUCCESS, 'success', 'absolute'],
- [HTTP_BAD_CONTENT, 'bad content', 'absolute'],
- [HTTP_BAD_STATUS, 'bad status', 'absolute'],
- [HTTP_TIMEOUT, 'timeout', 'absolute'],
- [HTTP_NO_CONNECTION, 'no connection', 'absolute']
- ]
- }
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- pattern = self.configuration.get('regex')
- self.regex = re.compile(pattern) if pattern else None
- self.status_codes_accepted = self.configuration.get('status_accepted', [200])
- self.follow_redirect = self.configuration.get('redirect', True)
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- data = dict()
- data[HTTP_SUCCESS] = 0
- data[HTTP_BAD_CONTENT] = 0
- data[HTTP_BAD_STATUS] = 0
- data[HTTP_TIMEOUT] = 0
- data[HTTP_NO_CONNECTION] = 0
- url = self.url
- try:
- start = time()
- status, content = self._get_raw_data_with_status(retries=1 if self.follow_redirect else False,
- redirect=self.follow_redirect)
- diff = time() - start
- data[HTTP_RESPONSE_TIME] = max(round(diff * 10000), 0)
- self.debug('Url: {url}. Host responded with status code {code} in {diff} s'.format(
- url=url, code=status, diff=diff
- ))
- self.process_response(content, data, status)
-
- except urllib3.exceptions.NewConnectionError as error:
- self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error))
- data[HTTP_NO_CONNECTION] = 1
-
- except (urllib3.exceptions.TimeoutError, urllib3.exceptions.PoolError) as error:
- self.debug('Connection timed out: {url}. Error: {error}'.format(url=url, error=error))
- data[HTTP_TIMEOUT] = 1
-
- except urllib3.exceptions.HTTPError as error:
- self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error))
- data[HTTP_NO_CONNECTION] = 1
-
- except (TypeError, AttributeError) as error:
- self.error('Url: {url}. Error: {error}'.format(url=url, error=error))
- return None
-
- return data
-
- def process_response(self, content, data, status):
- data[HTTP_RESPONSE_LENGTH] = len(content)
- self.debug('Content: \n\n{content}\n'.format(content=content))
- if status in self.status_codes_accepted:
- if self.regex and self.regex.search(content) is None:
- self.debug('No match for regex "{regex}" found'.format(regex=self.regex.pattern))
- data[HTTP_BAD_CONTENT] = 1
- else:
- data[HTTP_SUCCESS] = 1
- else:
- data[HTTP_BAD_STATUS] = 1
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.conf b/collectors/python.d.plugin/httpcheck/httpcheck.conf
deleted file mode 100644
index 3f33bf65..00000000
--- a/collectors/python.d.plugin/httpcheck/httpcheck.conf
+++ /dev/null
@@ -1,107 +0,0 @@
-# netdata python.d.plugin configuration for httpcheck
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the httpcheck default is used, which is at 3 seconds.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# chart_cleanup sets the default chart cleanup interval in iterations.
-# A chart is marked as obsolete if it has not been updated
-# 'chart_cleanup' iterations in a row.
-# They will be hidden immediately (not offered to dashboard viewer,
-# streamed upstream and archived to external databases) and deleted one hour
-# later (configurable from netdata.conf).
-# -- For this plugin, cleanup MUST be disabled, otherwise we lose response
-# time charts
-chart_cleanup: 0
-
-# Autodetection and retries do not work for this plugin
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# -------------------------------
-# ATTENTION: Any valid configuration will be accepted, even if initial connection fails!
-# -------------------------------
-#
-# There is intentionally no default config, e.g. for 'localhost'
-
-# job_name:
-# name: myname # [optional] the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 3 # [optional] the JOB's data collection frequency
-# priority: 60000 # [optional] the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# timeout: 1 # [optional] the timeout when connecting, supports decimals (e.g. 0.5s)
-# url: 'http[s]://host-ip-or-dns[:port][path]'
-# # [required] the remote host url to connect to. If [:port] is missing, it defaults to 80
-# # for HTTP and 443 for HTTPS. [path] is optional too, defaults to /
-# header: {'Content-Type': 'application/json'}
-# # [optional] the HTTP header sent with the request.
-# method: GET # [optional] the HTTP request method (POST, PUT, DELETE, HEAD etc.)
-# redirect: yes # [optional] If the remote host returns 3xx status codes, the redirection url will be
-# # followed (default).
-# body: {'key': 'value'} # [optional] the body sent with the request (e.g. POST, PUT, PATCH).
-# status_accepted: # [optional] By default, 200 is accepted. Anything else will result in 'bad status' in the
-# # status chart, however: The response time will still be > 0, since the
-# # host responded with something.
-# # If redirect is enabled, the accepted status will be checked against the redirected page.
-# - 200 # Multiple status codes are possible. If you specify 'status_accepted', you would still
-# # need to add '200'. E.g. 'status_accepted: [301]' will trigger an error in 'bad status'
-# # if code is 200. Do specify numerical entries such as 200, not 'OK'.
-# regex: None # [optional] If the status code is accepted, the content of the response will be searched for this
-# # regex (if defined). Be aware that you may need to escape the regex string. If redirect is enabled,
-# # the regex will be matched to the redirected page, not the initial 3xx response.
-
-# Simple example:
-#
-# jira:
-# url: 'https://jira.localdomain/'
-
-
-# Complex example:
-#
-# cool_website:
-# url: 'http://cool.website:8080/home'
-# status_accepted:
-# - 200
-# - 204
-# regex: <title>My cool website!<\/title>
-# timeout: 2
-
-# This plugin is intended for simple cases. Currently, the accuracy of the response time is low and should be used as reference only.
-
diff --git a/collectors/python.d.plugin/isc_dhcpd/Makefile.inc b/collectors/python.d.plugin/isc_dhcpd/Makefile.inc
deleted file mode 100644
index 44343fc9..00000000
--- a/collectors/python.d.plugin/isc_dhcpd/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += isc_dhcpd/isc_dhcpd.chart.py
-dist_pythonconfig_DATA += isc_dhcpd/isc_dhcpd.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += isc_dhcpd/README.md isc_dhcpd/Makefile.inc
-
diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md
deleted file mode 100644
index 712943d9..00000000
--- a/collectors/python.d.plugin/isc_dhcpd/README.md
+++ /dev/null
@@ -1,57 +0,0 @@
-<!--
-title: "ISC DHCP monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/isc_dhcpd/README.md
-sidebar_label: "ISC DHCP"
--->
-
-# ISC DHCP monitoring with Netdata
-
-Monitors the leases database to show all active leases for given pools.
-
-## Requirements
-
-- dhcpd leases file MUST BE readable by Netdata
-- pools MUST BE in CIDR format
-- `python-ipaddress` package is needed in Python2
-
-It produces:
-
-1. **Pools utilization** Aggregate chart for all pools.
-
- - utilization in percent
-
-2. **Total leases**
-
- - leases (overall number of leases for all pools)
-
-3. **Active leases** for every pools
-
- - leases (number of active leases in pool)
-
-## Configuration
-
-Edit the `python.d/isc_dhcpd.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/isc_dhcpd.conf
-```
-
-Sample:
-
-```yaml
-local:
- leases_path: '/var/lib/dhcp/dhcpd.leases'
- pools:
- office: '192.168.2.0/24' # name(dimension): pool in CIDR format
- wifi: '192.168.3.10-192.168.3.20' # name(dimension): pool in IP Range format
- 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
- wifi-guest: '192.168.5.0/24 192.168.6.10-192.168.6.20' # name(dimension): pool in CIDR + IP Range format
-```
-
-The module will not work If no configuration is given.
-
----
-
-
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
deleted file mode 100644
index 099c7d4e..00000000
--- a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: isc dhcpd lease netdata python.d module
-# Author: ilyam8
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-import re
-import time
-
-try:
- import ipaddress
-
- HAVE_IP_ADDRESS = True
-except ImportError:
- HAVE_IP_ADDRESS = False
-
-from collections import defaultdict
-from copy import deepcopy
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-ORDER = [
- 'pools_utilization',
- 'pools_active_leases',
- 'leases_total',
-]
-
-CHARTS = {
- 'pools_utilization': {
- 'options': [None, 'Pools Utilization', 'percentage', 'utilization', 'isc_dhcpd.utilization', 'line'],
- 'lines': []
- },
- 'pools_active_leases': {
- 'options': [None, 'Active Leases Per Pool', 'leases', 'active leases', 'isc_dhcpd.active_leases', 'line'],
- 'lines': []
- },
- 'leases_total': {
- 'options': [None, 'All Active Leases', 'leases', 'active leases', 'isc_dhcpd.leases_total', 'line'],
- 'lines': [
- ['leases_total', 'leases', 'absolute']
- ],
- 'variables': [
- ['leases_size']
- ]
- }
-}
-
-POOL_CIDR = "CIDR"
-POOL_IP_RANGE = "IP_RANGE"
-POOL_UNKNOWN = "UNKNOWN"
-
-def detect_ip_type(ip):
- ip_type = ip.split("-")
- if len(ip_type) == 1:
- return POOL_CIDR
- elif len(ip_type) == 2:
- return POOL_IP_RANGE
- else:
- return POOL_UNKNOWN
-
-
-class DhcpdLeasesFile:
- def __init__(self, path):
- self.path = path
- self.mod_time = 0
- self.size = 0
-
- def is_valid(self):
- return os.path.isfile(self.path) and os.access(self.path, os.R_OK)
-
- def is_changed(self):
- mod_time = os.path.getmtime(self.path)
- if mod_time != self.mod_time:
- self.mod_time = mod_time
- self.size = int(os.path.getsize(self.path) / 1024)
- return True
- return False
-
- def get_data(self):
- try:
- with open(self.path) as leases:
- result = defaultdict(dict)
- for row in leases:
- row = row.strip()
- if row.startswith('lease'):
- address = row[6:-2]
- elif row.startswith('iaaddr'):
- address = row[7:-2]
- elif row.startswith('ends'):
- result[address]['ends'] = row[5:-1]
- elif row.startswith('binding state'):
- result[address]['state'] = row[14:-1]
- return dict((k, v) for k, v in result.items() if len(v) == 2)
- except (OSError, IOError):
- return None
-
-
-class Pool:
- def __init__(self, name, network):
- self.id = re.sub(r'[:/.-]+', '_', name)
- self.name = name
-
- self.networks = list()
- for network in network.split(" "):
- if not network:
- continue
-
- ip_type = detect_ip_type(ip=network)
- if ip_type == POOL_CIDR:
- self.networks.append(PoolCIDR(network=network))
- elif ip_type == POOL_IP_RANGE:
- self.networks.append(PoolIPRange(ip_range=network))
- else:
- raise ValueError('Network ({0}) incorrect syntax, expect CIDR or IPRange format.'.format(network))
-
- def num_hosts(self):
- return sum([network.num_hosts() for network in self.networks])
-
- def __contains__(self, item):
- for network in self.networks:
- if item in network:
- return True
- return False
-
-
-class PoolCIDR:
- def __init__(self, network):
- self.network = ipaddress.ip_network(address=u'%s' % network)
-
- def num_hosts(self):
- return self.network.num_addresses - 2
-
- def __contains__(self, item):
- return item.address in self.network
-
-
-class PoolIPRange:
- def __init__(self, ip_range):
- ip_range = ip_range.split("-")
- self.networks = list(self._summarize_address_range(ip_range[0], ip_range[1]))
-
- @staticmethod
- def ip_address(ip):
- return ipaddress.ip_address(u'%s' % ip)
-
- def _summarize_address_range(self, first, last):
- address_first = self.ip_address(first)
- address_last = self.ip_address(last)
- return ipaddress.summarize_address_range(address_first, address_last)
-
- def num_hosts(self):
- return sum([network.num_addresses for network in self.networks])
-
- def __contains__(self, item):
- for network in self.networks:
- if item.address in network:
- return True
- return False
-
-
-class Lease:
- def __init__(self, address, ends, state):
- self.address = ipaddress.ip_address(address=u'%s' % address)
- self.ends = ends
- self.state = state
-
- def is_active(self, current_time):
- # lease_end_time might be epoch
- if self.ends.startswith('epoch'):
- epoch = int(self.ends.split()[1].replace(';', ''))
- return epoch - current_time > 0
- # max. int for lease-time causes lease to expire in year 2038.
- # dhcpd puts 'never' in the ends section of active lease
- elif self.ends == 'never':
- return True
- return time.mktime(time.strptime(self.ends, '%w %Y/%m/%d %H:%M:%S')) - current_time > 0
-
- def is_valid(self):
- return self.state == 'active'
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = deepcopy(CHARTS)
- lease_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
- self.dhcpd_leases = DhcpdLeasesFile(path=lease_path)
- self.pools = list()
- self.data = dict()
-
- # Will work only with 'default' db-time-format (weekday year/month/day hour:minute:second)
- # TODO: update algorithm to parse correctly 'local' db-time-format
-
- def check(self):
- if not HAVE_IP_ADDRESS:
- self.error("'python-ipaddress' package is needed")
- return False
-
- if not self.dhcpd_leases.is_valid():
- self.error("Make sure '{path}' is exist and readable by netdata".format(path=self.dhcpd_leases.path))
- return False
-
- pools = self.configuration.get('pools')
- if not pools:
- self.error('Pools are not defined')
- return False
-
- for pool in pools:
- try:
- new_pool = Pool(name=pool, network=pools[pool])
- except ValueError as error:
- self.error("'{pool}' was removed, error: {error}".format(pool=pools[pool], error=error))
- else:
- self.pools.append(new_pool)
-
- self.create_charts()
- return bool(self.pools)
-
- def get_data(self):
- """
- :return: dict
- """
- if not self.dhcpd_leases.is_changed():
- return self.data
-
- raw_leases = self.dhcpd_leases.get_data()
- if not raw_leases:
- self.data = dict()
- return None
-
- active_leases = list()
- current_time = time.mktime(time.gmtime())
-
- for address in raw_leases:
- try:
- new_lease = Lease(address, **raw_leases[address])
- except ValueError:
- continue
- else:
- if new_lease.is_active(current_time) and new_lease.is_valid():
- active_leases.append(new_lease)
-
- for pool in self.pools:
- count = len([ip for ip in active_leases if ip in pool])
- self.data[pool.id + '_active_leases'] = count
- self.data[pool.id + '_utilization'] = float(count) / pool.num_hosts() * 10000
-
- self.data['leases_size'] = self.dhcpd_leases.size
- self.data['leases_total'] = len(active_leases)
-
- return self.data
-
- def create_charts(self):
- for pool in self.pools:
- dim = [
- pool.id + '_utilization',
- pool.name,
- 'absolute',
- 1,
- 100,
- ]
- self.definitions['pools_utilization']['lines'].append(dim)
-
- dim = [
- pool.id + '_active_leases',
- pool.name,
- ]
- self.definitions['pools_active_leases']['lines'].append(dim)
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
deleted file mode 100644
index c700947b..00000000
--- a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
+++ /dev/null
@@ -1,80 +0,0 @@
-# netdata python.d.plugin configuration for isc dhcpd leases
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, isc_dhcpd supports the following:
-#
-# leases_path: 'PATH' # the path to dhcpd.leases file
-# pools:
-# office: '192.168.2.0/24' # name(dimension): pool in CIDR format
-# wifi: '192.168.3.10-192.168.3.20' # name(dimension): pool in IP Range format
-# 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
-# wifi-guest: '192.168.5.0/24 192.168.6.10-192.168.6.20' # name(dimension): pool in CIDR + IP Range format
-#
-#-----------------------------------------------------------------------
-# IMPORTANT notes
-#
-# 1. Make sure leases file is readable by netdata.
-# 2. Current implementation works only with 'default' db-time-format
-# (weekday year/month/day hour:minute:second).
-# This is the default, so it will work in most cases.
-# 3. Pools MUST BE in CIDR format.
-#
-# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/mysql/Makefile.inc b/collectors/python.d.plugin/mysql/Makefile.inc
deleted file mode 100644
index 03e8b65e..00000000
--- a/collectors/python.d.plugin/mysql/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += mysql/mysql.chart.py
-dist_pythonconfig_DATA += mysql/mysql.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += mysql/README.md mysql/Makefile.inc
-
diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md
deleted file mode 100644
index 1ba794ad..00000000
--- a/collectors/python.d.plugin/mysql/README.md
+++ /dev/null
@@ -1,396 +0,0 @@
-<!--
-title: "MySQL monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/mysql/README.md
-sidebar_label: "MySQL"
--->
-
-# MySQL monitoring with Netdata
-
-Monitors one or more MySQL servers.
-
-## Requirements
-
-- python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
-- `netdata` local user to connect to the MySQL server.
-
-To create the `netdata` user, execute the following in the MySQL shell:
-
-```sh
-create user 'netdata'@'localhost';
-grant usage, replication client on *.* to 'netdata'@'localhost';
-flush privileges;
-```
-The `netdata` user will have the ability to connect to the MySQL server on `localhost` without a password.
-It will only be able to gather MySQL statistics without being able to alter or affect MySQL operations in any way.
-
-This module will produce following charts (if data is available):
-
-1. **Bandwidth** in kilobits/s
-
- - in
- - out
-
-2. **Queries** in queries/sec
-
- - queries
- - questions
- - slow queries
-
-3. **Queries By Type** in queries/s
-
- - select
- - delete
- - update
- - insert
- - cache hits
- - replace
-
-4. **Handlers** in handlers/s
-
- - commit
- - delete
- - prepare
- - read first
- - read key
- - read next
- - read prev
- - read rnd
- - read rnd next
- - rollback
- - savepoint
- - savepoint rollback
- - update
- - write
-
-5. **Table Locks** in locks/s
-
- - immediate
- - waited
-
-6. **Table Select Join Issues** in joins/s
-
- - full join
- - full range join
- - range
- - range check
- - scan
-
-7. **Table Sort Issues** in joins/s
-
- - merge passes
- - range
- - scan
-
-8. **Tmp Operations** in created/s
-
- - disk tables
- - files
- - tables
-
-9. **Connections** in connections/s
-
- - all
- - aborted
-
-10. **Connections Active** in connections/s
-
- - active
- - limit
- - max active
-
-11. **Binlog Cache** in threads
-
- - disk
- - all
-
-12. **Threads** in transactions/s
-
- - connected
- - cached
- - running
-
-13. **Threads Creation Rate** in threads/s
-
- - created
-
-14. **Threads Cache Misses** in misses
-
- - misses
-
-15. **InnoDB I/O Bandwidth** in KiB/s
-
- - read
- - write
-
-16. **InnoDB I/O Operations** in operations/s
-
- - reads
- - writes
- - fsyncs
-
-17. **InnoDB Pending I/O Operations** in operations/s
-
- - reads
- - writes
- - fsyncs
-
-18. **InnoDB Log Operations** in operations/s
-
- - waits
- - write requests
- - writes
-
-19. **InnoDB OS Log Pending Operations** in operations
-
- - fsyncs
- - writes
-
-20. **InnoDB OS Log Operations** in operations/s
-
- - fsyncs
-
-21. **InnoDB OS Log Bandwidth** in KiB/s
-
- - write
-
-22. **InnoDB Current Row Locks** in operations
-
- - current waits
-
-23. **InnoDB Row Operations** in operations/s
-
- - inserted
- - read
- - updated
- - deleted
-
-24. **InnoDB Buffer Pool Pages** in pages
-
- - data
- - dirty
- - free
- - misc
- - total
-
-25. **InnoDB Buffer Pool Flush Pages Requests** in requests/s
-
- - flush pages
-
-26. **InnoDB Buffer Pool Bytes** in MiB
-
- - data
- - dirty
-
-27. **InnoDB Buffer Pool Operations** in operations/s
-
- - disk reads
- - wait free
-
-28. **QCache Operations** in queries/s
-
- - hits
- - lowmem prunes
- - inserts
- - no caches
-
-29. **QCache Queries in Cache** in queries
-
- - queries
-
-30. **QCache Free Memory** in MiB
-
- - free
-
-31. **QCache Memory Blocks** in blocks
-
- - free
- - total
-
-32. **MyISAM Key Cache Blocks** in blocks
-
- - unused
- - used
- - not flushed
-
-33. **MyISAM Key Cache Requests** in requests/s
-
- - reads
- - writes
-
-34. **MyISAM Key Cache Requests** in requests/s
-
- - reads
- - writes
-
-35. **MyISAM Key Cache Disk Operations** in operations/s
-
- - reads
- - writes
-
-36. **Open Files** in files
-
- - files
-
-37. **Opened Files Rate** in files/s
-
- - files
-
-38. **Binlog Statement Cache** in statements/s
-
- - disk
- - all
-
-39. **Connection Errors** in errors/s
-
- - accept
- - internal
- - max
- - peer addr
- - select
- - tcpwrap
-
-40. **Slave Behind Seconds** in seconds
-
- - time
-
-41. **I/O / SQL Thread Running State** in bool
-
- - sql
- - io
-
-42. **Galera Replicated Writesets** in writesets/s
-
- - rx
- - tx
-
-43. **Galera Replicated Bytes** in KiB/s
-
- - rx
- - tx
-
-44. **Galera Queue** in writesets
-
- - rx
- - tx
-
-45. **Galera Replication Conflicts** in transactions
-
- - bf aborts
- - cert fails
-
-46. **Galera Flow Control** in ms
-
- - paused
-
-47. **Galera Cluster Status** in status
-
- - status
-
-48. **Galera Cluster State** in state
-
- - state
-
-49. **Galera Number of Nodes in the Cluster** in num
-
- - nodes
-
-50. **Galera Total Weight of the Current Members in the Cluster** in weight
-
- - weight
-
-51. **Galera Whether the Node is Connected to the Cluster** in boolean
-
- - connected
-
-52. **Galera Whether the Node is Ready to Accept Queries** in boolean
-
- - ready
-
-53. **Galera Open Transactions** in num
-
- - open transactions
-
-54. **Galera Total Number of WSRep (applier/rollbacker) Threads** in num
-
- - threads
-
-55. **Users CPU time** in percentage
-
- - users
-
-**Per user statistics:**
-
-1. **Rows Operations** in operations/s
-
- - read
- - send
- - updated
- - inserted
- - deleted
-
-2. **Commands** in commands/s
-
- - select
- - update
- - other
-
-## Configuration
-
-Edit the `python.d/mysql.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/mysql.conf
-```
-
-You can provide, per server, the following:
-
-1. username which have access to database (defaults to 'root')
-2. password (defaults to none)
-3. mysql my.cnf configuration file
-4. mysql socket (optional)
-5. mysql host (ip or hostname)
-6. mysql port (defaults to 3306)
-7. ssl connection parameters
-
- - key: the path name of the client private key file.
- - cert: the path name of the client public key certificate file.
- - ca: the path name of the Certificate Authority (CA) certificate file. This option, if used, must specify the
- same certificate used by the server.
- - capath: the path name of the directory that contains trusted SSL CA certificate files.
- - cipher: the list of permitted ciphers for SSL encryption.
-
-Here is an example for 3 servers:
-
-```yaml
-update_every : 10
-priority : 90100
-
-local:
- 'my.cnf' : '/etc/mysql/my.cnf'
- priority : 90000
-
-local_2:
- user : 'root'
- pass : 'blablablabla'
- socket : '/var/run/mysqld/mysqld.sock'
- update_every : 1
-
-remote:
- user : 'admin'
- pass : 'bla'
- host : 'example.org'
- port : 9000
-```
-
-If no configuration is given, the module will attempt to connect to MySQL server via a unix socket at
-`/var/run/mysqld/mysqld.sock` without password and with username `root` or `netdata` (you granted permissions for `netdata` user in the Requirements section of this document).
-
-`userstats` graph works only if you enable the plugin in MariaDB server and set proper MySQL privileges (SUPER or
-PROCESS). For more details, please check the [MariaDB User Statistics
-page](https://mariadb.com/kb/en/library/user-statistics/)
-
----
-
-
diff --git a/collectors/python.d.plugin/mysql/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py
deleted file mode 100644
index e8c03cb0..00000000
--- a/collectors/python.d.plugin/mysql/mysql.chart.py
+++ /dev/null
@@ -1,976 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: MySQL netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.MySQLService import MySQLService
-
-# query executed on MySQL server
-QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
-QUERY_SLAVE = 'SHOW SLAVE STATUS;'
-QUERY_VARIABLES = 'SHOW GLOBAL VARIABLES LIKE \'max_connections\';'
-QUERY_USER_STATISTICS = 'SHOW USER_STATISTICS;'
-
-GLOBAL_STATS = [
- 'Bytes_received',
- 'Bytes_sent',
- 'Queries',
- 'Questions',
- 'Slow_queries',
- 'Handler_commit',
- 'Handler_delete',
- 'Handler_prepare',
- 'Handler_read_first',
- 'Handler_read_key',
- 'Handler_read_next',
- 'Handler_read_prev',
- 'Handler_read_rnd',
- 'Handler_read_rnd_next',
- 'Handler_rollback',
- 'Handler_savepoint',
- 'Handler_savepoint_rollback',
- 'Handler_update',
- 'Handler_write',
- 'Table_locks_immediate',
- 'Table_locks_waited',
- 'Select_full_join',
- 'Select_full_range_join',
- 'Select_range',
- 'Select_range_check',
- 'Select_scan',
- 'Sort_merge_passes',
- 'Sort_range',
- 'Sort_scan',
- 'Created_tmp_disk_tables',
- 'Created_tmp_files',
- 'Created_tmp_tables',
- 'Connections',
- 'Aborted_connects',
- 'Max_used_connections',
- 'Binlog_cache_disk_use',
- 'Binlog_cache_use',
- 'Threads_connected',
- 'Threads_created',
- 'Threads_cached',
- 'Threads_running',
- 'Thread_cache_misses',
- 'Innodb_data_read',
- 'Innodb_data_written',
- 'Innodb_data_reads',
- 'Innodb_data_writes',
- 'Innodb_data_fsyncs',
- 'Innodb_data_pending_reads',
- 'Innodb_data_pending_writes',
- 'Innodb_data_pending_fsyncs',
- 'Innodb_log_waits',
- 'Innodb_log_write_requests',
- 'Innodb_log_writes',
- 'Innodb_os_log_fsyncs',
- 'Innodb_os_log_pending_fsyncs',
- 'Innodb_os_log_pending_writes',
- 'Innodb_os_log_written',
- 'Innodb_row_lock_current_waits',
- 'Innodb_rows_inserted',
- 'Innodb_rows_read',
- 'Innodb_rows_updated',
- 'Innodb_rows_deleted',
- 'Innodb_buffer_pool_pages_data',
- 'Innodb_buffer_pool_pages_dirty',
- 'Innodb_buffer_pool_pages_free',
- 'Innodb_buffer_pool_pages_flushed',
- 'Innodb_buffer_pool_pages_misc',
- 'Innodb_buffer_pool_pages_total',
- 'Innodb_buffer_pool_bytes_data',
- 'Innodb_buffer_pool_bytes_dirty',
- 'Innodb_buffer_pool_read_ahead',
- 'Innodb_buffer_pool_read_ahead_evicted',
- 'Innodb_buffer_pool_read_ahead_rnd',
- 'Innodb_buffer_pool_read_requests',
- 'Innodb_buffer_pool_write_requests',
- 'Innodb_buffer_pool_reads',
- 'Innodb_buffer_pool_wait_free',
- 'Innodb_deadlocks',
- 'Qcache_hits',
- 'Qcache_lowmem_prunes',
- 'Qcache_inserts',
- 'Qcache_not_cached',
- 'Qcache_queries_in_cache',
- 'Qcache_free_memory',
- 'Qcache_free_blocks',
- 'Qcache_total_blocks',
- 'Key_blocks_unused',
- 'Key_blocks_used',
- 'Key_blocks_not_flushed',
- 'Key_read_requests',
- 'Key_write_requests',
- 'Key_reads',
- 'Key_writes',
- 'Open_files',
- 'Opened_files',
- 'Binlog_stmt_cache_disk_use',
- 'Binlog_stmt_cache_use',
- 'Connection_errors_accept',
- 'Connection_errors_internal',
- 'Connection_errors_max_connections',
- 'Connection_errors_peer_address',
- 'Connection_errors_select',
- 'Connection_errors_tcpwrap',
- 'Com_delete',
- 'Com_insert',
- 'Com_select',
- 'Com_update',
- 'Com_replace'
-]
-
-GALERA_STATS = [
- 'wsrep_local_recv_queue',
- 'wsrep_local_send_queue',
- 'wsrep_received',
- 'wsrep_replicated',
- 'wsrep_received_bytes',
- 'wsrep_replicated_bytes',
- 'wsrep_local_bf_aborts',
- 'wsrep_local_cert_failures',
- 'wsrep_flow_control_paused_ns',
- 'wsrep_cluster_weight',
- 'wsrep_cluster_size',
- 'wsrep_cluster_status',
- 'wsrep_local_state',
- 'wsrep_open_transactions',
- 'wsrep_connected',
- 'wsrep_ready',
- 'wsrep_thread_count'
-]
-
-
-def slave_seconds(value):
- try:
- return int(value)
- except (TypeError, ValueError):
- return -1
-
-
-def slave_running(value):
- return 1 if value == 'Yes' else -1
-
-
-SLAVE_STATS = [
- ('Seconds_Behind_Master', slave_seconds),
- ('Slave_SQL_Running', slave_running),
- ('Slave_IO_Running', slave_running)
-]
-
-USER_STATISTICS = [
- 'Select_commands',
- 'Update_commands',
- 'Other_commands',
- 'Cpu_time',
- 'Rows_read',
- 'Rows_sent',
- 'Rows_deleted',
- 'Rows_inserted',
- 'Rows_updated'
-]
-
-VARIABLES = [
- 'max_connections'
-]
-
-ORDER = [
- 'net',
- 'queries',
- 'queries_type',
- 'handlers',
- 'table_locks',
- 'join_issues',
- 'sort_issues',
- 'tmp',
- 'connections',
- 'connections_active',
- 'connection_errors',
- 'binlog_cache',
- 'binlog_stmt_cache',
- 'threads',
- 'threads_creation_rate',
- 'thread_cache_misses',
- 'innodb_io',
- 'innodb_io_ops',
- 'innodb_io_pending_ops',
- 'innodb_log',
- 'innodb_os_log',
- 'innodb_os_log_fsync_writes',
- 'innodb_os_log_io',
- 'innodb_cur_row_lock',
- 'innodb_deadlocks',
- 'innodb_rows',
- 'innodb_buffer_pool_pages',
- 'innodb_buffer_pool_flush_pages_requests',
- 'innodb_buffer_pool_bytes',
- 'innodb_buffer_pool_read_ahead',
- 'innodb_buffer_pool_reqs',
- 'innodb_buffer_pool_ops',
- 'qcache_ops',
- 'qcache',
- 'qcache_freemem',
- 'qcache_memblocks',
- 'key_blocks',
- 'key_requests',
- 'key_disk_ops',
- 'files',
- 'files_rate',
- 'slave_behind',
- 'slave_status',
- 'galera_writesets',
- 'galera_bytes',
- 'galera_queue',
- 'galera_conflicts',
- 'galera_flow_control',
- 'galera_cluster_status',
- 'galera_cluster_state',
- 'galera_cluster_size',
- 'galera_cluster_weight',
- 'galera_connected',
- 'galera_ready',
- 'galera_open_transactions',
- 'galera_thread_count',
- 'userstats_cpu',
-]
-
-CHARTS = {
- 'net': {
- 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'],
- 'lines': [
- ['Bytes_received', 'in', 'incremental', 8, 1000],
- ['Bytes_sent', 'out', 'incremental', -8, 1000]
- ]
- },
- 'queries': {
- 'options': [None, 'Queries', 'queries/s', 'queries', 'mysql.queries', 'line'],
- 'lines': [
- ['Queries', 'queries', 'incremental'],
- ['Questions', 'questions', 'incremental'],
- ['Slow_queries', 'slow_queries', 'incremental']
- ]
- },
- 'queries_type': {
- 'options': [None, 'Query Type', 'queries/s', 'query_types', 'mysql.queries_type', 'stacked'],
- 'lines': [
- ['Com_select', 'select', 'incremental'],
- ['Com_delete', 'delete', 'incremental'],
- ['Com_update', 'update', 'incremental'],
- ['Com_insert', 'insert', 'incremental'],
- ['Qcache_hits', 'cache_hits', 'incremental'],
- ['Com_replace', 'replace', 'incremental']
- ]
- },
- 'handlers': {
- 'options': [None, 'Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'],
- 'lines': [
- ['Handler_commit', 'commit', 'incremental'],
- ['Handler_delete', 'delete', 'incremental'],
- ['Handler_prepare', 'prepare', 'incremental'],
- ['Handler_read_first', 'read_first', 'incremental'],
- ['Handler_read_key', 'read_key', 'incremental'],
- ['Handler_read_next', 'read_next', 'incremental'],
- ['Handler_read_prev', 'read_prev', 'incremental'],
- ['Handler_read_rnd', 'read_rnd', 'incremental'],
- ['Handler_read_rnd_next', 'read_rnd_next', 'incremental'],
- ['Handler_rollback', 'rollback', 'incremental'],
- ['Handler_savepoint', 'savepoint', 'incremental'],
- ['Handler_savepoint_rollback', 'savepoint_rollback', 'incremental'],
- ['Handler_update', 'update', 'incremental'],
- ['Handler_write', 'write', 'incremental']
- ]
- },
- 'table_locks': {
- 'options': [None, 'Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'],
- 'lines': [
- ['Table_locks_immediate', 'immediate', 'incremental'],
- ['Table_locks_waited', 'waited', 'incremental', -1, 1]
- ]
- },
- 'join_issues': {
- 'options': [None, 'Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'],
- 'lines': [
- ['Select_full_join', 'full_join', 'incremental'],
- ['Select_full_range_join', 'full_range_join', 'incremental'],
- ['Select_range', 'range', 'incremental'],
- ['Select_range_check', 'range_check', 'incremental'],
- ['Select_scan', 'scan', 'incremental']
- ]
- },
- 'sort_issues': {
- 'options': [None, 'Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'],
- 'lines': [
- ['Sort_merge_passes', 'merge_passes', 'incremental'],
- ['Sort_range', 'range', 'incremental'],
- ['Sort_scan', 'scan', 'incremental']
- ]
- },
- 'tmp': {
- 'options': [None, 'Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'],
- 'lines': [
- ['Created_tmp_disk_tables', 'disk_tables', 'incremental'],
- ['Created_tmp_files', 'files', 'incremental'],
- ['Created_tmp_tables', 'tables', 'incremental']
- ]
- },
- 'connections': {
- 'options': [None, 'Connections', 'connections/s', 'connections', 'mysql.connections', 'line'],
- 'lines': [
- ['Connections', 'all', 'incremental'],
- ['Aborted_connects', 'aborted', 'incremental']
- ]
- },
- 'connections_active': {
- 'options': [None, 'Connections Active', 'connections', 'connections', 'mysql.connections_active', 'line'],
- 'lines': [
- ['Threads_connected', 'active', 'absolute'],
- ['max_connections', 'limit', 'absolute'],
- ['Max_used_connections', 'max_active', 'absolute']
- ]
- },
- 'binlog_cache': {
- 'options': [None, 'Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'],
- 'lines': [
- ['Binlog_cache_disk_use', 'disk', 'incremental'],
- ['Binlog_cache_use', 'all', 'incremental']
- ]
- },
- 'threads': {
- 'options': [None, 'Threads', 'threads', 'threads', 'mysql.threads', 'line'],
- 'lines': [
- ['Threads_connected', 'connected', 'absolute'],
- ['Threads_cached', 'cached', 'absolute', -1, 1],
- ['Threads_running', 'running', 'absolute'],
- ]
- },
- 'threads_creation_rate': {
- 'options': [None, 'Threads Creation Rate', 'threads/s', 'threads', 'mysql.threads_creation_rate', 'line'],
- 'lines': [
- ['Threads_created', 'created', 'incremental'],
- ]
- },
- 'thread_cache_misses': {
- 'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'],
- 'lines': [
- ['Thread_cache_misses', 'misses', 'absolute', 1, 100]
- ]
- },
- 'innodb_io': {
- 'options': [None, 'InnoDB I/O Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_io', 'area'],
- 'lines': [
- ['Innodb_data_read', 'read', 'incremental', 1, 1024],
- ['Innodb_data_written', 'write', 'incremental', -1, 1024]
- ]
- },
- 'innodb_io_ops': {
- 'options': [None, 'InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'],
- 'lines': [
- ['Innodb_data_reads', 'reads', 'incremental'],
- ['Innodb_data_writes', 'writes', 'incremental', -1, 1],
- ['Innodb_data_fsyncs', 'fsyncs', 'incremental']
- ]
- },
- 'innodb_io_pending_ops': {
- 'options': [None, 'InnoDB Pending I/O Operations', 'operations', 'innodb',
- 'mysql.innodb_io_pending_ops', 'line'],
- 'lines': [
- ['Innodb_data_pending_reads', 'reads', 'absolute'],
- ['Innodb_data_pending_writes', 'writes', 'absolute', -1, 1],
- ['Innodb_data_pending_fsyncs', 'fsyncs', 'absolute']
- ]
- },
- 'innodb_log': {
- 'options': [None, 'InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'],
- 'lines': [
- ['Innodb_log_waits', 'waits', 'incremental'],
- ['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1],
- ['Innodb_log_writes', 'writes', 'incremental', -1, 1],
- ]
- },
- 'innodb_os_log': {
- 'options': [None, 'InnoDB OS Log Pending Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'],
- 'lines': [
- ['Innodb_os_log_pending_fsyncs', 'fsyncs', 'absolute'],
- ['Innodb_os_log_pending_writes', 'writes', 'absolute', -1, 1],
- ]
- },
- 'innodb_os_log_fsync_writes': {
- 'options': [None, 'InnoDB OS Log Operations', 'operations/s', 'innodb', 'mysql.innodb_os_log_fsyncs', 'line'],
- 'lines': [
- ['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'],
- ]
- },
- 'innodb_os_log_io': {
- 'options': [None, 'InnoDB OS Log Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
- 'lines': [
- ['Innodb_os_log_written', 'write', 'incremental', -1, 1024],
- ]
- },
- 'innodb_cur_row_lock': {
- 'options': [None, 'InnoDB Current Row Locks', 'operations', 'innodb',
- 'mysql.innodb_cur_row_lock', 'area'],
- 'lines': [
- ['Innodb_row_lock_current_waits', 'current_waits', 'absolute']
- ]
- },
- 'innodb_deadlocks': {
- 'options': [None, 'InnoDB Deadlocks', 'operations/s', 'innodb',
- 'mysql.innodb_deadlocks', 'area'],
- 'lines': [
- ['Innodb_deadlocks', 'deadlocks', 'incremental']
- ]
- },
- 'innodb_rows': {
- 'options': [None, 'InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
- 'lines': [
- ['Innodb_rows_inserted', 'inserted', 'incremental'],
- ['Innodb_rows_read', 'read', 'incremental', 1, 1],
- ['Innodb_rows_updated', 'updated', 'incremental', 1, 1],
- ['Innodb_rows_deleted', 'deleted', 'incremental', -1, 1],
- ]
- },
- 'innodb_buffer_pool_pages': {
- 'options': [None, 'InnoDB Buffer Pool Pages', 'pages', 'innodb',
- 'mysql.innodb_buffer_pool_pages', 'line'],
- 'lines': [
- ['Innodb_buffer_pool_pages_data', 'data', 'absolute'],
- ['Innodb_buffer_pool_pages_dirty', 'dirty', 'absolute', -1, 1],
- ['Innodb_buffer_pool_pages_free', 'free', 'absolute'],
- ['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1],
- ['Innodb_buffer_pool_pages_total', 'total', 'absolute']
- ]
- },
- 'innodb_buffer_pool_flush_pages_requests': {
- 'options': [None, 'InnoDB Buffer Pool Flush Pages Requests', 'requests/s', 'innodb',
- 'mysql.innodb_buffer_pool_pages_flushed', 'line'],
- 'lines': [
- ['Innodb_buffer_pool_pages_flushed', 'flush pages', 'incremental'],
- ]
- },
- 'innodb_buffer_pool_bytes': {
- 'options': [None, 'InnoDB Buffer Pool Bytes', 'MiB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
- 'lines': [
- ['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024],
- ['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024]
- ]
- },
- 'innodb_buffer_pool_read_ahead': {
- 'options': [None, 'mysql InnoDB Buffer Pool Read Ahead', 'operations/s', 'innodb',
- 'mysql.innodb_buffer_pool_read_ahead', 'area'],
- 'lines': [
- ['Innodb_buffer_pool_read_ahead', 'all', 'incremental'],
- ['Innodb_buffer_pool_read_ahead_evicted', 'evicted', 'incremental', -1, 1],
- ['Innodb_buffer_pool_read_ahead_rnd', 'random', 'incremental']
- ]
- },
- 'innodb_buffer_pool_reqs': {
- 'options': [None, 'InnoDB Buffer Pool Requests', 'requests/s', 'innodb',
- 'mysql.innodb_buffer_pool_reqs', 'area'],
- 'lines': [
- ['Innodb_buffer_pool_read_requests', 'reads', 'incremental'],
- ['Innodb_buffer_pool_write_requests', 'writes', 'incremental', -1, 1]
- ]
- },
- 'innodb_buffer_pool_ops': {
- 'options': [None, 'InnoDB Buffer Pool Operations', 'operations/s', 'innodb',
- 'mysql.innodb_buffer_pool_ops', 'area'],
- 'lines': [
- ['Innodb_buffer_pool_reads', 'disk reads', 'incremental'],
- ['Innodb_buffer_pool_wait_free', 'wait free', 'incremental', -1, 1]
- ]
- },
- 'qcache_ops': {
- 'options': [None, 'QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'],
- 'lines': [
- ['Qcache_hits', 'hits', 'incremental'],
- ['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1],
- ['Qcache_inserts', 'inserts', 'incremental'],
- ['Qcache_not_cached', 'not cached', 'incremental', -1, 1]
- ]
- },
- 'qcache': {
- 'options': [None, 'QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'],
- 'lines': [
- ['Qcache_queries_in_cache', 'queries', 'absolute']
- ]
- },
- 'qcache_freemem': {
- 'options': [None, 'QCache Free Memory', 'MiB', 'qcache', 'mysql.qcache_freemem', 'area'],
- 'lines': [
- ['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024]
- ]
- },
- 'qcache_memblocks': {
- 'options': [None, 'QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'],
- 'lines': [
- ['Qcache_free_blocks', 'free', 'absolute'],
- ['Qcache_total_blocks', 'total', 'absolute']
- ]
- },
- 'key_blocks': {
- 'options': [None, 'MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'],
- 'lines': [
- ['Key_blocks_unused', 'unused', 'absolute'],
- ['Key_blocks_used', 'used', 'absolute', -1, 1],
- ['Key_blocks_not_flushed', 'not flushed', 'absolute']
- ]
- },
- 'key_requests': {
- 'options': [None, 'MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'],
- 'lines': [
- ['Key_read_requests', 'reads', 'incremental'],
- ['Key_write_requests', 'writes', 'incremental', -1, 1]
- ]
- },
- 'key_disk_ops': {
- 'options': [None, 'MyISAM Key Cache Disk Operations', 'operations/s',
- 'myisam', 'mysql.key_disk_ops', 'area'],
- 'lines': [
- ['Key_reads', 'reads', 'incremental'],
- ['Key_writes', 'writes', 'incremental', -1, 1]
- ]
- },
- 'files': {
- 'options': [None, 'Open Files', 'files', 'files', 'mysql.files', 'line'],
- 'lines': [
- ['Open_files', 'files', 'absolute']
- ]
- },
- 'files_rate': {
- 'options': [None, 'Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'],
- 'lines': [
- ['Opened_files', 'files', 'incremental']
- ]
- },
- 'binlog_stmt_cache': {
- 'options': [None, 'Binlog Statement Cache', 'statements/s', 'binlog',
- 'mysql.binlog_stmt_cache', 'line'],
- 'lines': [
- ['Binlog_stmt_cache_disk_use', 'disk', 'incremental'],
- ['Binlog_stmt_cache_use', 'all', 'incremental']
- ]
- },
- 'connection_errors': {
- 'options': [None, 'Connection Errors', 'connections/s', 'connections',
- 'mysql.connection_errors', 'line'],
- 'lines': [
- ['Connection_errors_accept', 'accept', 'incremental'],
- ['Connection_errors_internal', 'internal', 'incremental'],
- ['Connection_errors_max_connections', 'max', 'incremental'],
- ['Connection_errors_peer_address', 'peer_addr', 'incremental'],
- ['Connection_errors_select', 'select', 'incremental'],
- ['Connection_errors_tcpwrap', 'tcpwrap', 'incremental']
- ]
- },
- 'slave_behind': {
- 'options': [None, 'Slave Behind Seconds', 'seconds', 'slave', 'mysql.slave_behind', 'line'],
- 'lines': [
- ['Seconds_Behind_Master', 'seconds', 'absolute']
- ]
- },
- 'slave_status': {
- 'options': [None, 'Slave Status', 'status', 'slave', 'mysql.slave_status', 'line'],
- 'lines': [
- ['Slave_SQL_Running', 'sql_running', 'absolute'],
- ['Slave_IO_Running', 'io_running', 'absolute']
- ]
- },
- 'galera_writesets': {
- 'options': [None, 'Replicated Writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'],
- 'lines': [
- ['wsrep_received', 'rx', 'incremental'],
- ['wsrep_replicated', 'tx', 'incremental', -1, 1],
- ]
- },
- 'galera_bytes': {
- 'options': [None, 'Replicated Bytes', 'KiB/s', 'galera', 'mysql.galera_bytes', 'area'],
- 'lines': [
- ['wsrep_received_bytes', 'rx', 'incremental', 1, 1024],
- ['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024],
- ]
- },
- 'galera_queue': {
- 'options': [None, 'Galera Queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'],
- 'lines': [
- ['wsrep_local_recv_queue', 'rx', 'absolute'],
- ['wsrep_local_send_queue', 'tx', 'absolute', -1, 1],
- ]
- },
- 'galera_conflicts': {
- 'options': [None, 'Replication Conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'],
- 'lines': [
- ['wsrep_local_bf_aborts', 'bf_aborts', 'incremental'],
- ['wsrep_local_cert_failures', 'cert_fails', 'incremental', -1, 1],
- ]
- },
- 'galera_flow_control': {
- 'options': [None, 'Flow Control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'],
- 'lines': [
- ['wsrep_flow_control_paused_ns', 'paused', 'incremental', 1, 1000000],
- ]
- },
- 'galera_cluster_status': {
- 'options': [None, 'Cluster Component Status', 'status', 'galera', 'mysql.galera_cluster_status', 'line'],
- 'lines': [
- ['wsrep_cluster_status', 'status', 'absolute'],
- ]
- },
- 'galera_cluster_state': {
- 'options': [None, 'Cluster Component State', 'state', 'galera', 'mysql.galera_cluster_state', 'line'],
- 'lines': [
- ['wsrep_local_state', 'state', 'absolute'],
- ]
- },
- 'galera_cluster_size': {
- 'options': [None, 'Number of Nodes in the Cluster', 'num', 'galera', 'mysql.galera_cluster_size', 'line'],
- 'lines': [
- ['wsrep_cluster_size', 'nodes', 'absolute'],
- ]
- },
- 'galera_cluster_weight': {
- 'options': [None, 'The Total Weight of the Current Members in the Cluster', 'weight', 'galera',
- 'mysql.galera_cluster_weight', 'line'],
- 'lines': [
- ['wsrep_cluster_weight', 'weight', 'absolute'],
- ]
- },
- 'galera_connected': {
- 'options': [None, 'Whether the Node is Connected to the Cluster', 'boolean', 'galera',
- 'mysql.galera_connected', 'line'],
- 'lines': [
- ['wsrep_connected', 'connected', 'absolute'],
- ]
- },
- 'galera_ready': {
- 'options': [None, 'Whether the Node is Ready to Accept Queries', 'boolean', 'galera',
- 'mysql.galera_ready', 'line'],
- 'lines': [
- ['wsrep_ready', 'ready', 'absolute'],
- ]
- },
- 'galera_open_transactions': {
- 'options': [None, 'Open Transactions', 'num', 'galera', 'mysql.galera_open_transactions', 'line'],
- 'lines': [
- ['wsrep_open_transactions', 'open transactions', 'absolute'],
- ]
- },
- 'galera_thread_count': {
- 'options': [None, 'Total Number of WSRep (applier/rollbacker) Threads', 'num', 'galera',
- 'mysql.galera_thread_count', 'line'],
- 'lines': [
- ['wsrep_thread_count', 'threads', 'absolute'],
- ]
- },
- 'userstats_cpu': {
- 'options': [None, 'Users CPU time', 'percentage', 'userstats', 'mysql.userstats_cpu', 'stacked'],
- 'lines': []
- }
-}
-
-
-def slave_status_chart_template(channel_name):
- order = [
- 'slave_behind_{0}'.format(channel_name),
- 'slave_status_{0}'.format(channel_name)
- ]
-
- charts = {
- order[0]: {
- 'options': [None, 'Slave Behind Seconds Channel {0}'.format(channel_name),
- 'seconds', 'slave', 'mysql.slave_behind', 'line'],
- 'lines': [
- ['Seconds_Behind_Master_{0}'.format(channel_name), 'seconds', 'absolute']
- ]
- },
- order[1]: {
- 'options': [None, 'Slave Status Channel {0}'.format(channel_name),
- 'status', 'slave', 'mysql.slave_status', 'line'],
- 'lines': [
- ['Slave_SQL_Running_{0}'.format(channel_name), 'sql_running', 'absolute'],
- ['Slave_IO_Running_{0}'.format(channel_name), 'io_running', 'absolute']
- ]
- },
- }
-
- return order, charts
-
-
-def userstats_chart_template(name):
- order = [
- 'userstats_rows_{0}'.format(name),
- 'userstats_commands_{0}'.format(name)
- ]
- family = 'userstats {0}'.format(name)
-
- charts = {
- order[0]: {
- 'options': [None, 'Rows Operations', 'operations/s', family, 'mysql.userstats_rows', 'stacked'],
- 'lines': [
- ['userstats_{0}_Rows_read'.format(name), 'read', 'incremental'],
- ['userstats_{0}_Rows_send'.format(name), 'send', 'incremental'],
- ['userstats_{0}_Rows_updated'.format(name), 'updated', 'incremental'],
- ['userstats_{0}_Rows_inserted'.format(name), 'inserted', 'incremental'],
- ['userstats_{0}_Rows_deleted'.format(name), 'deleted', 'incremental']
- ]
- },
- order[1]: {
- 'options': [None, 'Commands', 'commands/s', family, 'mysql.userstats_commands', 'stacked'],
- 'lines': [
- ['userstats_{0}_Select_commands'.format(name), 'select', 'incremental'],
- ['userstats_{0}_Update_commands'.format(name), 'update', 'incremental'],
- ['userstats_{0}_Other_commands'.format(name), 'other', 'incremental']
- ]
- }
- }
-
- return order, charts
-
-
-# https://dev.mysql.com/doc/refman/8.0/en/replication-channels.html
-DEFAULT_REPL_CHANNEL = ''
-
-
-# Write Set REPlication
-# https://galeracluster.com/library/documentation/galera-status-variables.html
-# https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html
-class WSRepDataConverter:
- unknown_value = -1
-
- def convert(self, key, value):
- if key == 'wsrep_connected':
- return self.convert_connected(value)
- elif key == 'wsrep_ready':
- return self.convert_ready(value)
- elif key == 'wsrep_cluster_status':
- return self.convert_cluster_status(value)
- return value
-
- def convert_connected(self, value):
- # https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html#wsrep_connected
- if value == 'OFF':
- return 0
- if value == 'ON':
- return 1
- return self.unknown_value
-
- def convert_ready(self, value):
- # https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html#wsrep_ready
- if value == 'OFF':
- return 0
- if value == 'ON':
- return 1
- return self.unknown_value
-
- def convert_cluster_status(self, value):
- # https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html#wsrep_cluster_status
- # https://github.com/codership/wsrep-API/blob/eab2d5d5a31672c0b7d116ef1629ff18392fd7d0/wsrep_api.h
- # typedef enum wsrep_view_status {
- # WSREP_VIEW_PRIMARY, //!< primary group configuration (quorum present)
- # WSREP_VIEW_NON_PRIMARY, //!< non-primary group configuration (quorum lost)
- # WSREP_VIEW_DISCONNECTED, //!< not connected to group, retrying.
- # WSREP_VIEW_MAX
- # } wsrep_view_status_t;
- value = value.lower()
- if value == 'primary':
- return 0
- elif value == 'non-primary':
- return 1
- elif value == 'disconnected':
- return 2
- return self.unknown_value
-
-
-wsrep_converter = WSRepDataConverter()
-
-
-class Service(MySQLService):
- def __init__(self, configuration=None, name=None):
- MySQLService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.queries = dict(
- global_status=QUERY_GLOBAL,
- slave_status=QUERY_SLAVE,
- variables=QUERY_VARIABLES,
- user_statistics=QUERY_USER_STATISTICS,
- )
- self.repl_channels = [DEFAULT_REPL_CHANNEL]
-
- def _get_data(self):
-
- raw_data = self._get_raw_data(description=True)
-
- if not raw_data:
- return None
-
- data = dict()
-
- if 'global_status' in raw_data:
- global_status = self.get_global_status(raw_data['global_status'])
- if global_status:
- data.update(global_status)
-
- if 'slave_status' in raw_data:
- status = self.get_slave_status(raw_data['slave_status'])
- if status:
- data.update(status)
-
- if 'user_statistics' in raw_data:
- if raw_data['user_statistics'][0]:
- data.update(self.get_userstats(raw_data))
- else:
- self.queries.pop('user_statistics')
-
- if 'variables' in raw_data:
- variables = dict(raw_data['variables'][0])
- for key in VARIABLES:
- if key in variables:
- data[key] = variables[key]
-
- return data or None
-
- @staticmethod
- def convert_wsrep(key, value):
- return wsrep_converter.convert(key, value)
-
- def get_global_status(self, raw_global_status):
- # (
- # (
- # ('Aborted_clients', '18'),
- # ('Aborted_connects', '33'),
- # ('Access_denied_errors', '80'),
- # ('Acl_column_grants', '0'),
- # ('Acl_database_grants', '0'),
- # ('Acl_function_grants', '0'),
- # ('wsrep_ready', 'OFF'),
- # ('wsrep_rollbacker_thread_count', '0'),
- # ('wsrep_thread_count', '0')
- # ),
- # (
- # ('Variable_name', 253, 60, 64, 64, 0, 0),
- # ('Value', 253, 48, 2048, 2048, 0, 0),
- # )
- # )
- rows = raw_global_status[0]
- if not rows:
- return
-
- global_status = dict(rows)
- data = dict()
-
- for key in GLOBAL_STATS:
- if key not in global_status:
- continue
- value = global_status[key]
- data[key] = value
-
- for key in GALERA_STATS:
- if key not in global_status:
- continue
- value = global_status[key]
- value = self.convert_wsrep(key, value)
- data[key] = value
-
- if 'Threads_created' in data and 'Connections' in data:
- data['Thread_cache_misses'] = round(int(data['Threads_created']) / float(data['Connections']) * 10000)
- return data
-
- def get_slave_status(self, slave_status_data):
- rows, description = slave_status_data[0], slave_status_data[1]
- description_keys = [v[0] for v in description]
- if not rows:
- return
-
- data = dict()
- for row in rows:
- slave_data = dict(zip(description_keys, row))
- channel_name = slave_data.get('Channel_Name', DEFAULT_REPL_CHANNEL)
-
- if channel_name not in self.repl_channels and len(self.charts) > 0:
- self.add_repl_channel_charts(channel_name)
- self.repl_channels.append(channel_name)
-
- for key, func in SLAVE_STATS:
- if key not in slave_data:
- continue
-
- value = slave_data[key]
- if channel_name:
- key = '{0}_{1}'.format(key, channel_name)
- data[key] = func(value)
-
- return data
-
- def add_repl_channel_charts(self, name):
- self.add_new_charts(slave_status_chart_template, name)
-
- def get_userstats(self, raw_data):
- # (
- # (
- # ('netdata', 1L, 0L, 60L, 0.15842499999999984, 0.15767439999999996, 5206L, 963957L, 0L, 0L,
- # 61L, 0L, 0L, 0L, 0L, 0L, 62L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L),
- # ),
- # (
- # ('User', 253, 7, 128, 128, 0, 0),
- # ('Total_connections', 3, 2, 11, 11, 0, 0),
- # ('Concurrent_connections', 3, 1, 11, 11, 0, 0),
- # ('Connected_time', 3, 2, 11, 11, 0, 0),
- # ('Busy_time', 5, 20, 21, 21, 31, 0),
- # ('Cpu_time', 5, 20, 21, 21, 31, 0),
- # ('Bytes_received', 8, 4, 21, 21, 0, 0),
- # ('Bytes_sent', 8, 6, 21, 21, 0, 0),
- # ('Binlog_bytes_written', 8, 1, 21, 21, 0, 0),
- # ('Rows_read', 8, 1, 21, 21, 0, 0),
- # ('Rows_sent', 8, 2, 21, 21, 0, 0),
- # ('Rows_deleted', 8, 1, 21, 21, 0, 0),
- # ('Rows_inserted', 8, 1, 21, 21, 0, 0),
- # ('Rows_updated', 8, 1, 21, 21, 0, 0),
- # ('Select_commands', 8, 2, 21, 21, 0, 0),
- # ('Update_commands', 8, 1, 21, 21, 0, 0),
- # ('Other_commands', 8, 2, 21, 21, 0, 0),
- # ('Commit_transactions', 8, 1, 21, 21, 0, 0),
- # ('Rollback_transactions', 8, 1, 21, 21, 0, 0),
- # ('Denied_connections', 8, 1, 21, 21, 0, 0),
- # ('Lost_connections', 8, 1, 21, 21, 0, 0),
- # ('Access_denied', 8, 1, 21, 21, 0, 0),
- # ('Empty_queries', 8, 2, 21, 21, 0, 0),
- # ('Total_ssl_connections', 8, 1, 21, 21, 0, 0),
- # ('Max_statement_time_exceeded', 8, 1, 21, 21, 0, 0)
- # )
- # )
- data = dict()
- userstats_vars = [e[0] for e in raw_data['user_statistics'][1]]
- for i, _ in enumerate(raw_data['user_statistics'][0]):
- user_name = raw_data['user_statistics'][0][i][0]
- userstats = dict(zip(userstats_vars, raw_data['user_statistics'][0][i]))
-
- if len(self.charts) > 0:
- if ('userstats_{0}_Cpu_time'.format(user_name)) not in self.charts['userstats_cpu']:
- self.add_userstats_dimensions(user_name)
- self.create_new_userstats_charts(user_name)
-
- for key in USER_STATISTICS:
- if key in userstats:
- data['userstats_{0}_{1}'.format(user_name, key)] = userstats[key]
-
- return data
-
- def add_userstats_dimensions(self, name):
- self.charts['userstats_cpu'].add_dimension(['userstats_{0}_Cpu_time'.format(name), name, 'incremental', 100, 1])
-
- def create_new_userstats_charts(self, tube):
- self.add_new_charts(userstats_chart_template, tube)
-
- def add_new_charts(self, template, *params):
- order, charts = template(*params)
-
- for chart_name in order:
- params = [chart_name] + charts[chart_name]['options']
- dimensions = charts[chart_name]['lines']
-
- new_chart = self.charts.add_chart(params)
- for dimension in dimensions:
- new_chart.add_dimension(dimension)
diff --git a/collectors/python.d.plugin/mysql/mysql.conf b/collectors/python.d.plugin/mysql/mysql.conf
deleted file mode 100644
index 31bfe9c0..00000000
--- a/collectors/python.d.plugin/mysql/mysql.conf
+++ /dev/null
@@ -1,293 +0,0 @@
-# netdata python.d.plugin configuration for mysql
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, mysql also supports the following:
-#
-# socket: 'path/to/mysql.sock'
-#
-# or
-# host: 'IP or HOSTNAME' # the host to connect to
-# port: PORT # the port to connect to
-#
-# in all cases, the following can also be set:
-#
-# user: 'username' # the mysql username to use
-# pass: 'password' # the mysql password to use
-#
-# ssl connection parameters
-#
-# ssl:
-# key: 'key' # the path name of the client private key file.
-# cert: 'cert' # the path name of the client public key certificate file.
-# ca: 'ca' # the path name of the Certificate Authority (CA) certificate file. This option, if used, must specify the same certificate used by the server.
-# capath: 'capath' # the path name of the directory that contains trusted SSL CA certificate files.
-# cipher: [ciphers] # the list of permitted ciphers for SSL encryption.
-
-# ----------------------------------------------------------------------
-# mySQL CONFIGURATION
-#
-# netdata does not need any privilege - only the ability to connect
-# to the mysql server (netdata will not be able to see any data).
-#
-# Execute these commands to give the local user 'netdata' the ability
-# to connect to the mysql server on localhost, without a password:
-#
-# > create user 'netdata'@'localhost';
-# > grant usage on *.* to 'netdata'@'localhost';
-# > flush privileges;
-#
-# with the above statements, netdata will be able to gather mysql
-# statistics, without the ability to see or alter any data or affect
-# mysql operation in any way. No change is required below.
-#
-# If you need to monitor mysql replication too, use this instead:
-#
-# > create user 'netdata'@'localhost';
-# > grant replication client on *.* to 'netdata'@'localhost';
-# > flush privileges;
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-mycnf1:
- name : 'local'
- 'my.cnf' : '/etc/my.cnf'
-
-mycnf2:
- name : 'local'
- 'my.cnf' : '/etc/mysql/my.cnf'
-
-debiancnf:
- name : 'local'
- 'my.cnf' : '/etc/mysql/debian.cnf'
-
-socket1:
- name : 'local'
- # user : ''
- # pass : ''
- socket : '/var/run/mysqld/mysqld.sock'
-
-socket2:
- name : 'local'
- # user : ''
- # pass : ''
- socket : '/var/run/mysqld/mysql.sock'
-
-socket3:
- name : 'local'
- # user : ''
- # pass : ''
- socket : '/var/lib/mysql/mysql.sock'
-
-socket4:
- name : 'local'
- # user : ''
- # pass : ''
- socket : '/tmp/mysql.sock'
-
-tcp:
- name : 'local'
- # user : ''
- # pass : ''
- host : 'localhost'
- port : '3306'
- # keep in mind port might be ignored by mysql, if host = 'localhost'
- # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844
-
-tcpipv4:
- name : 'local'
- # user : ''
- # pass : ''
- host : '127.0.0.1'
- port : '3306'
-
-tcpipv6:
- name : 'local'
- # user : ''
- # pass : ''
- host : '::1'
- port : '3306'
-
-
-# Now we try the same as above with user: root
-# A few systems configure mysql to accept passwordless
-# root access.
-
-mycnf1_root:
- name : 'local'
- user : 'root'
- 'my.cnf' : '/etc/my.cnf'
-
-mycnf2_root:
- name : 'local'
- user : 'root'
- 'my.cnf' : '/etc/mysql/my.cnf'
-
-socket1_root:
- name : 'local'
- user : 'root'
- # pass : ''
- socket : '/var/run/mysqld/mysqld.sock'
-
-socket2_root:
- name : 'local'
- user : 'root'
- # pass : ''
- socket : '/var/run/mysqld/mysql.sock'
-
-socket3_root:
- name : 'local'
- user : 'root'
- # pass : ''
- socket : '/var/lib/mysql/mysql.sock'
-
-socket4_root:
- name : 'local'
- user : 'root'
- # pass : ''
- socket : '/tmp/mysql.sock'
-
-tcp_root:
- name : 'local'
- user : 'root'
- # pass : ''
- host : 'localhost'
- port : '3306'
- # keep in mind port might be ignored by mysql, if host = 'localhost'
- # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844
-
-tcpipv4_root:
- name : 'local'
- user : 'root'
- # pass : ''
- host : '127.0.0.1'
- port : '3306'
-
-tcpipv6_root:
- name : 'local'
- user : 'root'
- # pass : ''
- host : '::1'
- port : '3306'
-
-
-# Now we try the same as above with user: netdata
-
-mycnf1_netdata:
- name : 'local'
- user : 'netdata'
- 'my.cnf' : '/etc/my.cnf'
-
-mycnf2_netdata:
- name : 'local'
- user : 'netdata'
- 'my.cnf' : '/etc/mysql/my.cnf'
-
-socket1_netdata:
- name : 'local'
- user : 'netdata'
- # pass : ''
- socket : '/var/run/mysqld/mysqld.sock'
-
-socket2_netdata:
- name : 'local'
- user : 'netdata'
- # pass : ''
- socket : '/var/run/mysqld/mysql.sock'
-
-socket3_netdata:
- name : 'local'
- user : 'netdata'
- # pass : ''
- socket : '/var/lib/mysql/mysql.sock'
-
-socket4_netdata:
- name : 'local'
- user : 'netdata'
- # pass : ''
- socket : '/tmp/mysql.sock'
-
-tcp_netdata:
- name : 'local'
- user : 'netdata'
- # pass : ''
- host : 'localhost'
- port : '3306'
- # keep in mind port might be ignored by mysql, if host = 'localhost'
- # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844
-
-tcpipv4_netdata:
- name : 'local'
- user : 'netdata'
- # pass : ''
- host : '127.0.0.1'
- port : '3306'
-
-tcpipv6_netdata:
- name : 'local'
- user : 'netdata'
- # pass : ''
- host : '::1'
- port : '3306'
-
diff --git a/collectors/python.d.plugin/nginx/Makefile.inc b/collectors/python.d.plugin/nginx/Makefile.inc
deleted file mode 100644
index 4636aa83..00000000
--- a/collectors/python.d.plugin/nginx/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += nginx/nginx.chart.py
-dist_pythonconfig_DATA += nginx/nginx.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += nginx/README.md nginx/Makefile.inc
-
diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md
deleted file mode 100644
index 34f63cc5..00000000
--- a/collectors/python.d.plugin/nginx/README.md
+++ /dev/null
@@ -1,65 +0,0 @@
-<!--
-title: "NGINX monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nginx/README.md
-sidebar_label: "NGINX"
--->
-
-# NGINX monitoring with Netdata
-
-Monitors one or more NGINX servers depending on configuration. Servers can be either local or remote.
-
-## Requirements
-
-- nginx with configured 'ngx_http_stub_status_module'
-- 'location /stub_status'
-
-Example nginx configuration can be found in 'python.d/nginx.conf'
-
-It produces following charts:
-
-1. **Active Connections**
-
- - active
-
-2. **Requests** in requests/s
-
- - requests
-
-3. **Active Connections by Status**
-
- - reading
- - writing
- - waiting
-
-4. **Connections Rate** in connections/s
-
- - accepts
- - handled
-
-## Configuration
-
-Edit the `python.d/nginx.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/nginx.conf
-```
-
-Needs only `url` to server's `stub_status`.
-
-Here is an example for local server:
-
-```yaml
-update_every : 10
-priority : 90100
-
-local:
- url : 'http://localhost/stub_status'
-```
-
-Without configuration, module attempts to connect to `http://localhost/stub_status`
-
----
-
-
diff --git a/collectors/python.d.plugin/nginx/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py
deleted file mode 100644
index 7548d6a4..00000000
--- a/collectors/python.d.plugin/nginx/nginx.chart.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: nginx netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.UrlService import UrlService
-
-ORDER = [
- 'connections',
- 'requests',
- 'connection_status',
- 'connect_rate',
-]
-
-CHARTS = {
- 'connections': {
- 'options': [None, 'Active Connections', 'connections', 'active connections',
- 'nginx.connections', 'line'],
- 'lines': [
- ['active']
- ]
- },
- 'requests': {
- 'options': [None, 'Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
- 'lines': [
- ['requests', None, 'incremental']
- ]
- },
- 'connection_status': {
- 'options': [None, 'Active Connections by Status', 'connections', 'status',
- 'nginx.connection_status', 'line'],
- 'lines': [
- ['reading'],
- ['writing'],
- ['waiting', 'idle']
- ]
- },
- 'connect_rate': {
- 'options': [None, 'Connections Rate', 'connections/s', 'connections rate',
- 'nginx.connect_rate', 'line'],
- 'lines': [
- ['accepts', 'accepted', 'incremental'],
- ['handled', None, 'incremental']
- ]
- }
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.url = self.configuration.get('url', 'http://localhost/stub_status')
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- try:
- raw = self._get_raw_data().split(" ")
- return {'active': int(raw[2]),
- 'requests': int(raw[9]),
- 'reading': int(raw[11]),
- 'writing': int(raw[13]),
- 'waiting': int(raw[15]),
- 'accepts': int(raw[7]),
- 'handled': int(raw[8])}
- except (ValueError, AttributeError):
- return None
diff --git a/collectors/python.d.plugin/nginx/nginx.conf b/collectors/python.d.plugin/nginx/nginx.conf
deleted file mode 100644
index 4001b4bb..00000000
--- a/collectors/python.d.plugin/nginx/nginx.conf
+++ /dev/null
@@ -1,107 +0,0 @@
-# netdata python.d.plugin configuration for nginx
-#
-# You must have ngx_http_stub_status_module configured on your nginx server for this
-# plugin to work. The following is an example config.
-# It must be located inside a server { } block.
-#
-# location /stub_status {
-# stub_status;
-# # Security: Only allow access from the IP below.
-# allow 192.168.1.200;
-# # Deny anyone else
-# deny all;
-# }
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, this plugin also supports the following:
-#
-# url: 'URL' # the URL to fetch nginx's status stats
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-#
-# Example
-#
-# RemoteNginx:
-# name : 'Reverse_Proxy'
-# url : 'http://yourdomain.com/stub_status'
-#
-# "RemoteNginx" will show up in Netdata logs. "Reverse Proxy" will show up in the menu
-# in the nginx section.
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- url : 'http://localhost/stub_status'
-
-localipv4:
- name : 'local'
- url : 'http://127.0.0.1/stub_status'
-
-localipv6:
- name : 'local'
- url : 'http://[::1]/stub_status'
-
diff --git a/collectors/python.d.plugin/phpfpm/Makefile.inc b/collectors/python.d.plugin/phpfpm/Makefile.inc
deleted file mode 100644
index ff312fe1..00000000
--- a/collectors/python.d.plugin/phpfpm/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += phpfpm/phpfpm.chart.py
-dist_pythonconfig_DATA += phpfpm/phpfpm.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += phpfpm/README.md phpfpm/Makefile.inc
-
diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md
deleted file mode 100644
index fe81971b..00000000
--- a/collectors/python.d.plugin/phpfpm/README.md
+++ /dev/null
@@ -1,51 +0,0 @@
-<!--
-title: "PHP-FPM monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/phpfpm/README.md
-sidebar_label: "PHP-FPM"
--->
-
-# PHP-FPM monitoring with Netdata
-
-Monitors one or more PHP-FPM instances depending on configuration.
-
-## Requirements
-
-- `PHP-FPM` with [enabled `status` page](https://easyengine.io/tutorials/php/fpm-status-page/)
-- access to `status` page via web server
-
-## Charts
-
-It produces following charts:
-
-- Active Connections in `connections`
-- Requests in `requests/s`
-- Performance in `status`
-- Requests Duration Among All Idle Processes in `milliseconds`
-- Last Request CPU Usage Among All Idle Processes in `percentage`
-- Last Request Memory Usage Among All Idle Processes in `KB`
-
-## Configuration
-
-Edit the `python.d/phpfpm.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/phpfpm.conf
-```
-
-Needs only `url` to server's `status`. Here is an example for local and remote instances:
-
-```yaml
-local:
- url : 'http://localhost/status?full&json'
-
-remote:
- url : 'http://203.0.113.10/status?full&json'
-```
-
-Without configuration, module attempts to connect to `http://localhost/status`
-
----
-
-
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
deleted file mode 100644
index 226df99c..00000000
--- a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: PHP-FPM netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import json
-import re
-
-from bases.FrameworkServices.UrlService import UrlService
-
-REGEX = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
-
-POOL_INFO = [
- ('active processes', 'active'),
- ('max active processes', 'maxActive'),
- ('idle processes', 'idle'),
- ('accepted conn', 'requests'),
- ('max children reached', 'reached'),
- ('slow requests', 'slow')
-]
-
-PER_PROCESS_INFO = [
- ('request duration', 'ReqDur'),
- ('last request cpu', 'ReqCpu'),
- ('last request memory', 'ReqMem')
-]
-
-
-def average(collection):
- return sum(collection, 0.0) / max(len(collection), 1)
-
-
-CALC = [
- ('min', min),
- ('max', max),
- ('avg', average)
-]
-
-ORDER = [
- 'connections',
- 'requests',
- 'performance',
- 'request_duration',
- 'request_cpu',
- 'request_mem',
-]
-
-CHARTS = {
- 'connections': {
- 'options': [None, 'PHP-FPM Active Connections', 'connections', 'active connections', 'phpfpm.connections',
- 'line'],
- 'lines': [
- ['active'],
- ['maxActive', 'max active'],
- ['idle']
- ]
- },
- 'requests': {
- 'options': [None, 'PHP-FPM Requests', 'requests/s', 'requests', 'phpfpm.requests', 'line'],
- 'lines': [
- ['requests', None, 'incremental']
- ]
- },
- 'performance': {
- 'options': [None, 'PHP-FPM Performance', 'status', 'performance', 'phpfpm.performance', 'line'],
- 'lines': [
- ['reached', 'max children reached'],
- ['slow', 'slow requests']
- ]
- },
- 'request_duration': {
- 'options': [None, 'PHP-FPM Requests Duration Among All Idle Processes', 'milliseconds', 'request duration',
- 'phpfpm.request_duration',
- 'line'],
- 'lines': [
- ['minReqDur', 'min', 'absolute', 1, 1000],
- ['maxReqDur', 'max', 'absolute', 1, 1000],
- ['avgReqDur', 'avg', 'absolute', 1, 1000]
- ]
- },
- 'request_cpu': {
- 'options': [None, 'PHP-FPM Last Request CPU Usage Among All Idle Processes', 'percentage', 'request CPU',
- 'phpfpm.request_cpu', 'line'],
- 'lines': [
- ['minReqCpu', 'min'],
- ['maxReqCpu', 'max'],
- ['avgReqCpu', 'avg']
- ]
- },
- 'request_mem': {
- 'options': [None, 'PHP-FPM Last Request Memory Usage Among All Idle Processes', 'KB', 'request memory',
- 'phpfpm.request_mem', 'line'],
- 'lines': [
- ['minReqMem', 'min', 'absolute', 1, 1024],
- ['maxReqMem', 'max', 'absolute', 1, 1024],
- ['avgReqMem', 'avg', 'absolute', 1, 1024]
- ]
- }
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.url = self.configuration.get('url', 'http://localhost/status?full&json')
- self.json = '&json' in self.url or '?json' in self.url
- self.json_full = self.url.endswith(('?full&json', '?json&full'))
- self.if_all_processes_running = dict(
- [(c_name + p_name, 0) for c_name, func in CALC for metric, p_name in PER_PROCESS_INFO]
- )
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- raw = self._get_raw_data()
- if not raw:
- return None
-
- raw_json = parse_raw_data_(is_json=self.json, raw_data=raw)
-
- # Per Pool info: active connections, requests and performance charts
- to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO)
-
- # Per Process Info: duration, cpu and memory charts (min, max, avg)
- if self.json_full:
- p_info = dict()
- to_netdata.update(self.if_all_processes_running) # If all processes are in running state
- # Metrics are always 0 if the process is not in Idle state because calculation is done
- # when the request processing has terminated
- for process in [p for p in raw_json['processes'] if p['state'] == 'Idle']:
- p_info.update(fetch_data_(raw_data=process, metrics_list=PER_PROCESS_INFO, pid=str(process['pid'])))
-
- if p_info:
- for new_name in PER_PROCESS_INFO:
- for name, func in CALC:
- to_netdata[name + new_name[1]] = func([p_info[k] for k in p_info if new_name[1] in k])
-
- return to_netdata or None
-
-
-def fetch_data_(raw_data, metrics_list, pid=''):
- """
- :param raw_data: dict
- :param metrics_list: list
- :param pid: str
- :return: dict
- """
- result = dict()
- for metric, new_name in metrics_list:
- if metric in raw_data:
- result[new_name + pid] = float(raw_data[metric])
- return result
-
-
-def parse_raw_data_(is_json, raw_data):
- """
- :param is_json: bool
- :param regex: compiled regular expr
- :param raw_data: dict
- :return: dict
- """
- if is_json:
- try:
- return json.loads(raw_data)
- except ValueError:
- return dict()
- else:
- raw_data = ' '.join(raw_data.split())
- return dict(REGEX.findall(raw_data))
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.conf b/collectors/python.d.plugin/phpfpm/phpfpm.conf
deleted file mode 100644
index d3185390..00000000
--- a/collectors/python.d.plugin/phpfpm/phpfpm.conf
+++ /dev/null
@@ -1,88 +0,0 @@
-# netdata python.d.plugin configuration for PHP-FPM
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, PHP-FPM also supports the following:
-#
-# url: 'URL' # the URL to fetch nginx's status stats
-# # Be sure and include ?full&status at the end of the url
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- url : "http://localhost/status?full&json"
-
-localipv4:
- name : 'local'
- url : "http://127.0.0.1/status?full&json"
-
-localipv6:
- name : 'local'
- url : "http://[::1]/status?full&json"
-
diff --git a/collectors/python.d.plugin/portcheck/Makefile.inc b/collectors/python.d.plugin/portcheck/Makefile.inc
deleted file mode 100644
index 76763f02..00000000
--- a/collectors/python.d.plugin/portcheck/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += portcheck/portcheck.chart.py
-dist_pythonconfig_DATA += portcheck/portcheck.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += portcheck/README.md portcheck/Makefile.inc
-
diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md
deleted file mode 100644
index 845fa5b9..00000000
--- a/collectors/python.d.plugin/portcheck/README.md
+++ /dev/null
@@ -1,52 +0,0 @@
-<!--
-title: "TCP endpoint monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/portcheck/README.md
-sidebar_label: "TCP endpoints"
--->
-
-# TCP endpoint monitoring with Netdata
-
-Monitors TCP endpoint availability and response time.
-
-Following charts are drawn per host:
-
-1. **Latency** ms
-
- - Time required to connect to a TCP port.
- Displays latency in 0.1 ms resolution. If the connection failed, the value is missing.
-
-2. **Status** boolean
-
- - Connection successful
- - Could not create socket: possible DNS problems
- - Connection refused: port not listening or blocked
- - Connection timed out: host or port unreachable
-
-## Configuration
-
-Edit the `python.d/portcheck.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/portcheck.conf
-```
-
-```yaml
-server:
- host: 'dns or ip' # required
- port: 22 # required
- timeout: 1 # optional
- update_every: 1 # optional
-```
-
-### notes
-
-- The error chart is intended for alarms, badges or for access via API.
-- A system/service/firewall might block Netdata's access if a portscan or
- similar is detected.
-- Currently, the accuracy of the latency is low and should be used as reference only.
-
----
-
-
diff --git a/collectors/python.d.plugin/portcheck/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py
deleted file mode 100644
index 818ac765..00000000
--- a/collectors/python.d.plugin/portcheck/portcheck.chart.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: simple port check netdata python.d module
-# Original Author: ccremer (github.com/ccremer)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import socket
-
-try:
- from time import monotonic as time
-except ImportError:
- from time import time
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-PORT_LATENCY = 'connect'
-
-PORT_SUCCESS = 'success'
-PORT_TIMEOUT = 'timeout'
-PORT_FAILED = 'no_connection'
-
-ORDER = ['latency', 'status']
-
-CHARTS = {
- 'latency': {
- 'options': [None, 'TCP connect latency', 'milliseconds', 'latency', 'portcheck.latency', 'line'],
- 'lines': [
- [PORT_LATENCY, 'connect', 'absolute', 100, 1000]
- ]
- },
- 'status': {
- 'options': [None, 'Portcheck status', 'boolean', 'status', 'portcheck.status', 'line'],
- 'lines': [
- [PORT_SUCCESS, 'success', 'absolute'],
- [PORT_TIMEOUT, 'timeout', 'absolute'],
- [PORT_FAILED, 'no connection', 'absolute']
- ]
- }
-}
-
-
-# Not deriving from SocketService, too much is different
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = self.configuration.get('host')
- self.port = self.configuration.get('port')
- self.timeout = self.configuration.get('timeout', 1)
-
- def check(self):
- """
- Parse configuration, check if configuration is available, and dynamically create chart lines data
- :return: boolean
- """
- if self.host is None or self.port is None:
- self.error('Host or port missing')
- return False
- if not isinstance(self.port, int):
- self.error('"port" is not an integer. Specify a numerical value, not service name.')
- return False
-
- self.debug('Enabled portcheck: {host}:{port}, update every {update}s, timeout: {timeout}s'.format(
- host=self.host, port=self.port, update=self.update_every, timeout=self.timeout
- ))
- # We will accept any (valid-ish) configuration, even if initial connection fails (a service might be down from
- # the beginning)
- return True
-
- def _get_data(self):
- """
- Get data from socket
- :return: dict
- """
- data = dict()
- data[PORT_SUCCESS] = 0
- data[PORT_TIMEOUT] = 0
- data[PORT_FAILED] = 0
-
- success = False
- try:
- for socket_config in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM):
- # use first working socket
- sock = self._create_socket(socket_config)
- if sock is not None:
- self._connect2socket(data, socket_config, sock)
- self._disconnect(sock)
- success = True
- break
- except socket.gaierror as error:
- self.debug('Failed to connect to "{host}:{port}", error: {error}'.format(
- host=self.host, port=self.port, error=error
- ))
-
- # We could not connect
- if not success:
- data[PORT_FAILED] = 1
-
- return data
-
- def _create_socket(self, socket_config):
- af, sock_type, proto, _, sa = socket_config
- try:
- self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
- sock = socket.socket(af, sock_type, proto)
- sock.settimeout(self.timeout)
- return sock
- except socket.error as error:
- self.debug('Failed to create socket "{address}", port {port}, error: {error}'.format(
- address=sa[0], port=sa[1], error=error
- ))
- return None
-
- def _connect2socket(self, data, socket_config, sock):
- """
- Connect to a socket, passing the result of getaddrinfo()
- :return: dict
- """
-
- _, _, _, _, sa = socket_config
- port = str(sa[1])
- try:
- self.debug('Connecting socket to "{address}", port {port}'.format(address=sa[0], port=port))
- start = time()
- sock.connect(sa)
- diff = time() - start
- self.debug('Connected to "{address}", port {port}, latency {latency}'.format(
- address=sa[0], port=port, latency=diff
- ))
- # we will set it at least 0.1 ms. 0.0 would mean failed connection (handy for 3rd-party-APIs)
- data[PORT_LATENCY] = max(round(diff * 10000), 0)
- data[PORT_SUCCESS] = 1
-
- except socket.timeout as error:
- self.debug('Socket timed out on "{address}", port {port}, error: {error}'.format(
- address=sa[0], port=port, error=error
- ))
- data[PORT_TIMEOUT] = 1
-
- except socket.error as error:
- self.debug('Failed to connect to "{address}", port {port}, error: {error}'.format(
- address=sa[0], port=port, error=error
- ))
- data[PORT_FAILED] = 1
-
- def _disconnect(self, sock):
- """
- Close socket connection
- :return:
- """
- if sock is not None:
- try:
- self.debug('Closing socket')
- sock.shutdown(2) # 0 - read, 1 - write, 2 - all
- sock.close()
- except socket.error:
- pass
diff --git a/collectors/python.d.plugin/portcheck/portcheck.conf b/collectors/python.d.plugin/portcheck/portcheck.conf
deleted file mode 100644
index 2b32c003..00000000
--- a/collectors/python.d.plugin/portcheck/portcheck.conf
+++ /dev/null
@@ -1,74 +0,0 @@
-# netdata python.d.plugin configuration for portcheck
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# chart_cleanup sets the default chart cleanup interval in iterations.
-# A chart is marked as obsolete if it has not been updated
-# 'chart_cleanup' iterations in a row.
-# They will be hidden immediately (not offered to dashboard viewer,
-# streamed upstream and archived to external databases) and deleted one hour
-# later (configurable from netdata.conf).
-# -- For this plugin, cleanup MUST be disabled, otherwise we lose latency chart
-chart_cleanup: 0
-
-# Autodetection and retries do not work for this plugin
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# -------------------------------
-# ATTENTION: Any valid configuration will be accepted, even if initial connection fails!
-# -------------------------------
-#
-# There is intentionally no default config for 'localhost'
-
-# job_name:
-# name: myname # [optional] the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # [optional] the JOB's data collection frequency
-# priority: 60000 # [optional] the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# timeout: 1 # [optional] the socket timeout when connecting
-# host: 'dns or ip' # [required] the remote host address in either IPv4, IPv6 or as DNS name.
-# port: 22 # [required] the port number to check. Specify an integer, not service name.
-
-# You just have been warned about possible portscan blocking. The portcheck plugin is meant for simple use cases.
-# Currently, the accuracy of the latency is low and should be used as reference only.
-
diff --git a/collectors/python.d.plugin/powerdns/Makefile.inc b/collectors/python.d.plugin/powerdns/Makefile.inc
deleted file mode 100644
index 256d32a4..00000000
--- a/collectors/python.d.plugin/powerdns/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += powerdns/powerdns.chart.py
-dist_pythonconfig_DATA += powerdns/powerdns.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += powerdns/README.md powerdns/Makefile.inc
-
diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md
deleted file mode 100644
index 02449e68..00000000
--- a/collectors/python.d.plugin/powerdns/README.md
+++ /dev/null
@@ -1,104 +0,0 @@
-<!--
-title: "PowerDNS monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/powerdns/README.md
-sidebar_label: "PowerDNS"
--->
-
-# PowerDNS monitoring with Netdata
-
-Monitors authoritative server and recursor statistics.
-
-Powerdns charts:
-
-1. **Queries and Answers**
-
- - udp-queries
- - udp-answers
- - tcp-queries
- - tcp-answers
-
-2. **Cache Usage**
-
- - query-cache-hit
- - query-cache-miss
- - packetcache-hit
- - packetcache-miss
-
-3. **Cache Size**
-
- - query-cache-size
- - packetcache-size
- - key-cache-size
- - meta-cache-size
-
-4. **Latency**
-
- - latency
-
- Powerdns Recursor charts:
-
-1. **Questions In**
-
- - questions
- - ipv6-questions
- - tcp-queries
-
-2. **Questions Out**
-
- - all-outqueries
- - ipv6-outqueries
- - tcp-outqueries
- - throttled-outqueries
-
-3. **Answer Times**
-
- - answers-slow
- - answers0-1
- - answers1-10
- - answers10-100
- - answers100-1000
-
-4. **Timeouts**
-
- - outgoing-timeouts
- - outgoing4-timeouts
- - outgoing6-timeouts
-
-5. **Drops**
-
- - over-capacity-drops
-
-6. **Cache Usage**
-
- - cache-hits
- - cache-misses
- - packetcache-hits
- - packetcache-misses
-
-7. **Cache Size**
-
- - cache-entries
- - packetcache-entries
- - negcache-entries
-
-## Configuration
-
-Edit the `python.d/powerdns.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/powerdns.conf
-```
-
-```yaml
-local:
- name : 'local'
- url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
- header :
- X-API-Key: 'change_me'
-```
-
----
-
-
diff --git a/collectors/python.d.plugin/powerdns/powerdns.chart.py b/collectors/python.d.plugin/powerdns/powerdns.chart.py
deleted file mode 100644
index b951e0c1..00000000
--- a/collectors/python.d.plugin/powerdns/powerdns.chart.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: powerdns netdata python.d module
-# Author: Ilya Mashchenko (ilyam8)
-# Author: Luke Whitworth
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from json import loads
-
-from bases.FrameworkServices.UrlService import UrlService
-
-ORDER = [
- 'questions',
- 'cache_usage',
- 'cache_size',
- 'latency',
-]
-
-CHARTS = {
- 'questions': {
- 'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'],
- 'lines': [
- ['udp-queries', None, 'incremental'],
- ['udp-answers', None, 'incremental'],
- ['tcp-queries', None, 'incremental'],
- ['tcp-answers', None, 'incremental']
- ]
- },
- 'cache_usage': {
- 'options': [None, 'PowerDNS Cache Usage', 'count', 'cache', 'powerdns.cache_usage', 'line'],
- 'lines': [
- ['query-cache-hit', None, 'incremental'],
- ['query-cache-miss', None, 'incremental'],
- ['packetcache-hit', 'packet-cache-hit', 'incremental'],
- ['packetcache-miss', 'packet-cache-miss', 'incremental']
- ]
- },
- 'cache_size': {
- 'options': [None, 'PowerDNS Cache Size', 'count', 'cache', 'powerdns.cache_size', 'line'],
- 'lines': [
- ['query-cache-size', None, 'absolute'],
- ['packetcache-size', 'packet-cache-size', 'absolute'],
- ['key-cache-size', None, 'absolute'],
- ['meta-cache-size', None, 'absolute']
- ]
- },
- 'latency': {
- 'options': [None, 'PowerDNS Latency', 'microseconds', 'latency', 'powerdns.latency', 'line'],
- 'lines': [
- ['latency', None, 'absolute']
- ]
- }
-}
-
-RECURSOR_ORDER = ['questions-in', 'questions-out', 'answer-times', 'timeouts', 'drops', 'cache_usage', 'cache_size']
-
-RECURSOR_CHARTS = {
- 'questions-in': {
- 'options': [None, 'PowerDNS Recursor Questions In', 'count', 'questions', 'powerdns_recursor.questions-in',
- 'line'],
- 'lines': [
- ['questions', None, 'incremental'],
- ['ipv6-questions', None, 'incremental'],
- ['tcp-questions', None, 'incremental']
- ]
- },
- 'questions-out': {
- 'options': [None, 'PowerDNS Recursor Questions Out', 'count', 'questions', 'powerdns_recursor.questions-out',
- 'line'],
- 'lines': [
- ['all-outqueries', None, 'incremental'],
- ['ipv6-outqueries', None, 'incremental'],
- ['tcp-outqueries', None, 'incremental'],
- ['throttled-outqueries', None, 'incremental']
- ]
- },
- 'answer-times': {
- 'options': [None, 'PowerDNS Recursor Answer Times', 'count', 'performance', 'powerdns_recursor.answer-times',
- 'line'],
- 'lines': [
- ['answers-slow', None, 'incremental'],
- ['answers0-1', None, 'incremental'],
- ['answers1-10', None, 'incremental'],
- ['answers10-100', None, 'incremental'],
- ['answers100-1000', None, 'incremental']
- ]
- },
- 'timeouts': {
- 'options': [None, 'PowerDNS Recursor Questions Time', 'count', 'performance', 'powerdns_recursor.timeouts',
- 'line'],
- 'lines': [
- ['outgoing-timeouts', None, 'incremental'],
- ['outgoing4-timeouts', None, 'incremental'],
- ['outgoing6-timeouts', None, 'incremental']
- ]
- },
- 'drops': {
- 'options': [None, 'PowerDNS Recursor Drops', 'count', 'performance', 'powerdns_recursor.drops', 'line'],
- 'lines': [
- ['over-capacity-drops', None, 'incremental']
- ]
- },
- 'cache_usage': {
- 'options': [None, 'PowerDNS Recursor Cache Usage', 'count', 'cache', 'powerdns_recursor.cache_usage', 'line'],
- 'lines': [
- ['cache-hits', None, 'incremental'],
- ['cache-misses', None, 'incremental'],
- ['packetcache-hits', 'packet-cache-hit', 'incremental'],
- ['packetcache-misses', 'packet-cache-miss', 'incremental']
- ]
- },
- 'cache_size': {
- 'options': [None, 'PowerDNS Recursor Cache Size', 'count', 'cache', 'powerdns_recursor.cache_size', 'line'],
- 'lines': [
- ['cache-entries', None, 'absolute'],
- ['packetcache-entries', None, 'absolute'],
- ['negcache-entries', None, 'absolute']
- ]
- }
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.url = configuration.get('url', 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics')
-
- def check(self):
- self._manager = self._build_manager()
- if not self._manager:
- return None
-
- d = self._get_data()
- if not d:
- return False
-
- if is_recursor(d):
- self.order = RECURSOR_ORDER
- self.definitions = RECURSOR_CHARTS
- self.module_name = 'powerdns_recursor'
-
- return True
-
- def _get_data(self):
- data = self._get_raw_data()
- if not data:
- return None
- return dict((d['name'], d['value']) for d in loads(data))
-
-
-def is_recursor(d):
- return 'over-capacity-drops' in d and 'tcp-questions' in d
diff --git a/collectors/python.d.plugin/powerdns/powerdns.conf b/collectors/python.d.plugin/powerdns/powerdns.conf
deleted file mode 100644
index 559bf175..00000000
--- a/collectors/python.d.plugin/powerdns/powerdns.conf
+++ /dev/null
@@ -1,76 +0,0 @@
-# netdata python.d.plugin configuration for powerdns
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, apache also supports the following:
-#
-# url: 'URL' # the URL to fetch powerdns performance statistics
-# header:
-# X-API-Key: 'Key' # API key
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-# localhost:
-# name : 'local'
-# url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
-# header:
-# X-API-Key: 'change_me'
diff --git a/collectors/python.d.plugin/redis/Makefile.inc b/collectors/python.d.plugin/redis/Makefile.inc
deleted file mode 100644
index 6aab0897..00000000
--- a/collectors/python.d.plugin/redis/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += redis/redis.chart.py
-dist_pythonconfig_DATA += redis/redis.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += redis/README.md redis/Makefile.inc
-
diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md
deleted file mode 100644
index 31982710..00000000
--- a/collectors/python.d.plugin/redis/README.md
+++ /dev/null
@@ -1,64 +0,0 @@
-<!--
-title: "Redis monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/redis/README.md
-sidebar_label: "Redis"
--->
-
-# Redis monitoring with Netdata
-
-Monitors database status. It reads server response to `INFO` command.
-
-Following charts are drawn:
-
-1. **Operations** per second
-
- - operations
-
-2. **Hit rate** in percent
-
- - rate
-
-3. **Memory utilization** in kilobytes
-
- - total
- - lua
-
-4. **Database keys**
-
- - lines are creates dynamically based on how many databases are there
-
-5. **Clients**
-
- - connected
- - blocked
-
-6. **Slaves**
-
- - connected
-
-## Configuration
-
-Edit the `python.d/redis.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/redis.conf
-```
-
-```yaml
-socket:
- name : 'local'
- socket : '/var/lib/redis/redis.sock'
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 6379
-```
-
-When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`.
-
----
-
-
diff --git a/collectors/python.d.plugin/redis/redis.chart.py b/collectors/python.d.plugin/redis/redis.chart.py
deleted file mode 100644
index e09916d8..00000000
--- a/collectors/python.d.plugin/redis/redis.chart.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: redis netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import re
-from copy import deepcopy
-
-from bases.FrameworkServices.SocketService import SocketService
-
-REDIS_ORDER = [
- 'operations',
- 'hit_rate',
- 'memory',
- 'keys_redis',
- 'eviction',
- 'net',
- 'connections',
- 'clients',
- 'slaves',
- 'persistence',
- 'bgsave_now',
- 'bgsave_health',
- 'uptime',
-]
-
-PIKA_ORDER = [
- 'operations',
- 'hit_rate',
- 'memory',
- 'keys_pika',
- 'connections',
- 'clients',
- 'slaves',
- 'uptime',
-]
-
-CHARTS = {
- 'operations': {
- 'options': [None, 'Operations', 'operations/s', 'operations', 'redis.operations', 'line'],
- 'lines': [
- ['total_commands_processed', 'commands', 'incremental'],
- ['instantaneous_ops_per_sec', 'operations', 'absolute']
- ]
- },
- 'hit_rate': {
- 'options': [None, 'Hit rate', 'percentage', 'hits', 'redis.hit_rate', 'line'],
- 'lines': [
- ['hit_rate', 'rate', 'absolute']
- ]
- },
- 'memory': {
- 'options': [None, 'Memory utilization', 'KiB', 'memory', 'redis.memory', 'area'],
- 'lines': [
- ['maxmemory', 'max', 'absolute', 1, 1024],
- ['used_memory', 'total', 'absolute', 1, 1024],
- ['used_memory_lua', 'lua', 'absolute', 1, 1024]
- ]
- },
- 'net': {
- 'options': [None, 'Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'],
- 'lines': [
- ['total_net_input_bytes', 'in', 'incremental', 8, 1000],
- ['total_net_output_bytes', 'out', 'incremental', -8, 1000]
- ]
- },
- 'keys_redis': {
- 'options': [None, 'Keys per Database', 'keys', 'keys', 'redis.keys', 'line'],
- 'lines': []
- },
- 'keys_pika': {
- 'options': [None, 'Keys', 'keys', 'keys', 'redis.keys', 'line'],
- 'lines': [
- ['kv_keys', 'kv', 'absolute'],
- ['hash_keys', 'hash', 'absolute'],
- ['list_keys', 'list', 'absolute'],
- ['zset_keys', 'zset', 'absolute'],
- ['set_keys', 'set', 'absolute']
- ]
- },
- 'eviction': {
- 'options': [None, 'Evicted Keys', 'keys', 'keys', 'redis.eviction', 'line'],
- 'lines': [
- ['evicted_keys', 'evicted', 'absolute']
- ]
- },
- 'connections': {
- 'options': [None, 'Connections', 'connections/s', 'connections', 'redis.connections', 'line'],
- 'lines': [
- ['total_connections_received', 'received', 'incremental', 1],
- ['rejected_connections', 'rejected', 'incremental', -1]
- ]
- },
- 'clients': {
- 'options': [None, 'Clients', 'clients', 'connections', 'redis.clients', 'line'],
- 'lines': [
- ['connected_clients', 'connected', 'absolute', 1],
- ['blocked_clients', 'blocked', 'absolute', -1]
- ]
- },
- 'slaves': {
- 'options': [None, 'Slaves', 'slaves', 'replication', 'redis.slaves', 'line'],
- 'lines': [
- ['connected_slaves', 'connected', 'absolute']
- ]
- },
- 'persistence': {
- 'options': [None, 'Persistence Changes Since Last Save', 'changes', 'persistence',
- 'redis.rdb_changes', 'line'],
- 'lines': [
- ['rdb_changes_since_last_save', 'changes', 'absolute']
- ]
- },
- 'bgsave_now': {
- 'options': [None, 'Duration of the RDB Save Operation', 'seconds', 'persistence',
- 'redis.bgsave_now', 'absolute'],
- 'lines': [
- ['rdb_bgsave_in_progress', 'rdb save', 'absolute']
- ]
- },
- 'bgsave_health': {
- 'options': [None, 'Status of the Last RDB Save Operation', 'status', 'persistence',
- 'redis.bgsave_health', 'line'],
- 'lines': [
- ['rdb_last_bgsave_status', 'rdb save', 'absolute']
- ]
- },
- 'uptime': {
- 'options': [None, 'Uptime', 'seconds', 'uptime', 'redis.uptime', 'line'],
- 'lines': [
- ['uptime_in_seconds', 'uptime', 'absolute']
- ]
- }
-}
-
-
-def copy_chart(name):
- return {name: deepcopy(CHARTS[name])}
-
-
-RE = re.compile(r'\n([a-z_0-9 ]+):(?:keys=)?([^,\r]+)')
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.order = list()
- self.definitions = dict()
- self._keep_alive = True
- self.host = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', 6379)
- self.unix_socket = self.configuration.get('socket')
- p = self.configuration.get('pass')
- self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None
- self.request = 'INFO\r\n'.encode()
- self.bgsave_time = 0
- self.keyspace_dbs = set()
-
- def do_auth(self):
- resp = self._get_raw_data(request=self.auth_request)
- if not resp:
- return False
- if resp.strip() != '+OK':
- self.error('invalid password')
- return False
- return True
-
- def get_raw_and_parse(self):
- if self.auth_request and not self.do_auth():
- return None
-
- resp = self._get_raw_data()
-
- if not resp:
- return None
-
- parsed = RE.findall(resp)
-
- if not parsed:
- self.error('response is invalid/empty')
- return None
-
- return dict((k.replace(' ', '_'), v) for k, v in parsed)
-
- def get_data(self):
- """
- Get data from socket
- :return: dict
- """
- data = self.get_raw_and_parse()
- if not data:
- return None
-
- self.calc_hit_rate(data)
- self.calc_redis_keys(data)
- self.calc_redis_rdb_save_operations(data)
- return data
-
- @staticmethod
- def calc_hit_rate(data):
- try:
- hits = int(data['keyspace_hits'])
- misses = int(data['keyspace_misses'])
- data['hit_rate'] = hits * 100 / (hits + misses)
- except (KeyError, ZeroDivisionError):
- data['hit_rate'] = 0
-
- def calc_redis_keys(self, data):
- if not data.get('redis_version'):
- return
- # db0:keys=2,expires=0,avg_ttl=0
- new_keyspace_dbs = [k for k in data if k.startswith('db') and k not in self.keyspace_dbs]
- for db in new_keyspace_dbs:
- self.keyspace_dbs.add(db)
- self.charts['keys_redis'].add_dimension([db, None, 'absolute'])
- for db in self.keyspace_dbs:
- if db not in data:
- data[db] = 0
-
- def calc_redis_rdb_save_operations(self, data):
- if not (data.get('redis_version') and data.get('rdb_bgsave_in_progress')):
- return
- if data['rdb_bgsave_in_progress'] != '0':
- self.bgsave_time += self.update_every
- else:
- self.bgsave_time = 0
-
- data['rdb_last_bgsave_status'] = 0 if data['rdb_last_bgsave_status'] == 'ok' else 1
- data['rdb_bgsave_in_progress'] = self.bgsave_time
-
- def check(self):
- """
- Parse configuration, check if redis is available, and dynamically create chart lines data
- :return: boolean
- """
- data = self.get_raw_and_parse()
-
- if not data:
- return False
-
- self.order = PIKA_ORDER if data.get('pika_version') else REDIS_ORDER
-
- for n in self.order:
- self.definitions.update(copy_chart(n))
-
- return True
-
- def _check_raw_data(self, data):
- """
- Check if all data has been gathered from socket.
- Parse first line containing message length and check against received message
- :param data: str
- :return: boolean
- """
- length = len(data)
- supposed = data.split('\n')[0][1:-1]
- offset = len(supposed) + 4 # 1 dollar sing, 1 new line character + 1 ending sequence '\r\n'
- if not supposed.isdigit():
- return True
- supposed = int(supposed)
-
- if length - offset >= supposed:
- self.debug('received full response from redis')
- return True
-
- self.debug('waiting more data from redis')
- return False
diff --git a/collectors/python.d.plugin/redis/redis.conf b/collectors/python.d.plugin/redis/redis.conf
deleted file mode 100644
index b456d75d..00000000
--- a/collectors/python.d.plugin/redis/redis.conf
+++ /dev/null
@@ -1,110 +0,0 @@
-# netdata python.d.plugin configuration for redis
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, redis also supports the following:
-#
-# socket: 'path/to/redis.sock'
-#
-# or
-# host: 'IP or HOSTNAME' # the host to connect to
-# port: PORT # the port to connect to
-#
-# and
-# pass: 'password' # the redis password to use for AUTH command
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-socket1:
- name : 'local'
- socket : '/tmp/redis.sock'
- # pass : ''
-
-socket2:
- name : 'local'
- socket : '/var/run/redis/redis.sock'
- # pass : ''
-
-socket3:
- name : 'local'
- socket : '/var/lib/redis/redis.sock'
- # pass : ''
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 6379
- # pass : ''
-
-localipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 6379
- # pass : ''
-
-localipv6:
- name : 'local'
- host : '::1'
- port : 6379
- # pass : ''
-
diff --git a/collectors/python.d.plugin/web_log/Makefile.inc b/collectors/python.d.plugin/web_log/Makefile.inc
deleted file mode 100644
index 89311599..00000000
--- a/collectors/python.d.plugin/web_log/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += web_log/web_log.chart.py
-dist_pythonconfig_DATA += web_log/web_log.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += web_log/README.md web_log/Makefile.inc
-
diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md
deleted file mode 100644
index 552d56e9..00000000
--- a/collectors/python.d.plugin/web_log/README.md
+++ /dev/null
@@ -1,219 +0,0 @@
-<!--
-title: "Web server log (Apache, NGINX, Squid) monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/web_log/README.md
-sidebar_label: "Web server logs (Apache, NGINX, Squid)"
--->
-
-# Web server log (Apache, NGINX, Squid) monitoring with Netdata
-
-Tails access log file and Collects web server/caching proxy metrics.
-
-## Motivation
-
-Web server log files exist for more than 20 years. All web servers of all kinds, from all vendors, [since the time NCSA httpd was powering the web](https://en.wikipedia.org/wiki/NCSA_HTTPd), produce log files, saving in real-time all accesses to web sites and APIs.
-
-Yet, after the appearance of google analytics and similar services, and the recent rise of APM (Application Performance Monitoring) with sophisticated time-series databases that collect and analyze metrics at the application level, all these web server log files are mostly just filling our disks, rotated every night without any use whatsoever.
-
-Netdata turns this "useless" log file, into a powerful performance and health monitoring tool, capable of detecting, **in real-time**, most common web server problems, such as:
-
-- too many redirects (i.e. **oops!** *this should not redirect clients to itself*)
-- too many bad requests (i.e. **oops!** *a few files were not uploaded*)
-- too many internal server errors (i.e. **oops!** *this release crashes too much*)
-- unreasonably too many requests (i.e. **oops!** *we are under attack*)
-- unreasonably few requests (i.e. **oops!** *call the network guys*)
-- unreasonably slow responses (i.e. **oops!** *the database is slow again*)
-- too few successful responses (i.e. **oops!** *help us God!*)
-
-## Usage
-
-If Netdata is installed on a system running a web server, it will detect it and it will automatically present a series of charts, with information obtained from the web server API, like these (*these do not come from the web server log file*):
-
-![image](https://cloud.githubusercontent.com/assets/2662304/22900686/e283f636-f237-11e6-93d2-cbdf63de150c.png)
-*[**netdata**](https://my-netdata.io/) charts based on metrics collected by querying the `nginx` API (i.e. `/stub_status`).*
-
-> [**netdata**](https://my-netdata.io/) supports `apache`, `nginx`, `lighttpd` and `tomcat`. To obtain real-time information from a web server API, the web server needs to expose it. For directions on configuring your web server, check the config files for each web server. There is a directory with a config file for each web server under `/etc/netdata/python.d/`.
-
-## Configuration
-
-Edit the `python.d/web_log.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/web_log.conf
-```
-
-[**netdata**](https://my-netdata.io/) has a powerful `web_log` plugin, capable of incrementally parsing any number of web server log files. This plugin is automatically started with [**netdata**](https://my-netdata.io/) and comes, pre-configured, for finding web server log files on popular distributions. Its configuration is at `/etc/netdata/python.d/web_log.conf`, like this:
-
-```yaml
-nginx_log:
- name : 'nginx_log'
- path : '/var/log/nginx/access.log'
-
-apache_log:
- name : 'apache_log'
- path : '/var/log/apache/other_vhosts_access.log'
- categories:
- cacti : 'cacti.*'
- observium : 'observium'
-```
-
-The module has preconfigured jobs for nginx, apache and gunicorn on various distros.
-You can add one such section for each of your web server log files.
-
-> **Important**<br/>Keep in mind [**netdata**](https://my-netdata.io/) runs as user `netdata`. So, make sure user `netdata` has access to the logs directory and can read the log file.
-
-## Charts
-
-Once you have all log files configured and [**netdata**](https://my-netdata.io/) restarted, **for each log file** you will get a section at the [**netdata**](https://my-netdata.io/) dashboard, with the following charts.
-
-### Responses by status
-
-In this chart we tried to provide a meaningful status for all responses. So:
-
-- `success` counts all the valid responses (i.e. `1xx` informational, `2xx` successful and `304` not modified).
-- `error` are `5xx` internal server errors. These are very bad, they mean your web site or API is facing difficulties.
-- `redirect` are `3xx` responses, except `304`. All `3xx` are redirects, but `304` means "not modified" - it tells the browsers the content they already have is still valid and can be used as-is. So, we decided to account it as a successful response.
-- `bad` are bad requests that cannot be served.
-- `other` as all the other, non-standard, types of responses.
-
-![image](https://cloud.githubusercontent.com/assets/2662304/22902194/ea0affc6-f23c-11e6-85f1-a4951dd4bb40.png)
-
-### Responses by type
-
-Then, we group all responses by code family, without interpreting their meaning.
-**Response by type** requests/s
-
-- success (1xx, 2xx, 304)
-- error (5xx)
-- redirect (3xx except 304)
-- bad (4xx)
-- other (all other responses)
-
-![image](https://cloud.githubusercontent.com/assets/2662304/22901883/dea7d33a-f23b-11e6-960d-00a913b58936.png)
-
-### Responses by code family
-
-Here we show all the response codes in detail.
-
-**Response by code family** requests/s
-
-- 1xx (informational)
-- 2xx (successful)
-- 3xx (redirect)
-- 4xx (bad)
-- 5xx (internal server errors)
-- other (non-standart responses)
-- unmatched (the lines in the log file that are not matched)
-
-![image](https://cloud.githubusercontent.com/assets/2662304/22901965/1a5d84ba-f23c-11e6-9d38-3deebcc8b879.png)
-
-> **Important**<br/>If your application is using hundreds of non-standard response codes, your browser may become slow while viewing this chart, so we have added a configuration [option to disable this chart](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L63).
-
-### Detailed Response Codes
-
-Number of responses for each response code family individually (requests/s)
-
-### Bandwidth
-
-This is a nice view of the traffic the web server is receiving and is sending.
-
-What is important to know for this chart, is that the bandwidth used for each request and response is accounted at the time the log is written. Since [**netdata**](https://my-netdata.io/) refreshes this chart every single second, you may have unrealistic spikes is the size of the requests or responses is too big. The reason is simple: a response may have needed 1 minute to be completed, but all the bandwidth used during that minute for the specific response will be accounted at the second the log line is written.
-
-As the legend on the chart suggests, you can use FireQOS to setup QoS on the web server ports and IPs to accurately measure the bandwidth the web server is using. Actually, [there may be a few more reasons to install QoS on your servers](/collectors/tc.plugin/README.md#tcplugin)...
-
-**Bandwidth** KB/s
-
-- received (bandwidth of requests)
-- send (bandwidth of responses)
-
-![image](https://cloud.githubusercontent.com/assets/2662304/22902266/245141d6-f23d-11e6-90f9-98729733e0da.png)
-
-> **Important**<br/>Most web servers do not log the request size by default.<br/>So, [unless you have configured your web server to log the size of requests](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L76-L89), the `received` dimension will be always zero.
-
-### Timings
-
-[**netdata**](https://my-netdata.io/) will also render the `minimum`, `average` and `maximum` time the web server needed to respond to requests.
-
-Keep in mind most web servers timings start at the reception of the full request, until the dispatch of the last byte of the response. So, they include network latencies of responses, but they do not include network latencies of requests.
-
-**Timings** ms (request processing time)
-
-- min (bandwidth of requests)
-- max (bandwidth of responses)
-- average (bandwidth of responses)
-
-![image](https://cloud.githubusercontent.com/assets/2662304/22902283/369e3f92-f23d-11e6-9359-53e5d4ecb18e.png)
-
-> **Important**<br/>Most web servers do not log timing information by default.<br/>So, [unless you have configured your web server to also log timings](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L76-L89), this chart will not exist.
-
-### URL patterns
-
-This is a very interesting chart. It is configured entirely by you.
-
-[**netdata**](https://my-netdata.io/) can map the URLs found in the log file into categories. You can define these categories, by providing names and regular expressions in `web_log.conf`.
-
-So, this configuration:
-
-```yaml
-nginx_netdata: # name the charts
- path: '/var/log/nginx/access.log' # web server log file
- categories:
- badges : '^/api/v1/badge\.svg'
- charts : '^/api/v1/(data|chart|charts)'
- registry : '^/api/v1/registry'
- alarms : '^/api/v1/alarm'
- allmetrics : '^/api/v1/allmetrics'
- api_other : '^/api/'
- netdata_conf: '^/netdata.conf'
- api_old : '^/(data|datasource|graph|list|all\.json)'
-```
-
-Produces the following chart. The `categories` section is matched in the order given. So, pay attention to the order you give your patterns.
-
-![image](https://cloud.githubusercontent.com/assets/2662304/22902302/4d25bf06-f23d-11e6-844d-18c0876bdc3d.png)
-
-### HTTP methods
-
-This chart breaks down requests by HTTP method used.
-
-![image](https://cloud.githubusercontent.com/assets/2662304/22902323/5ee376d4-f23d-11e6-8457-157d3f438843.png)
-
-### IP versions
-
-This one provides requests per IP version used by the clients (`IPv4`, `IPv6`).
-
-![image](https://cloud.githubusercontent.com/assets/2662304/22902370/7091a770-f23d-11e6-8cd2-74e9a67b1397.png)
-
-### Unique clients
-
-The last charts are about the unique IPs accessing your web server.
-
-**Current Poll Unique Client IPs** unique ips/s. This one counts the unique IPs for each data collection iteration (i.e. **unique clients per second**).
-
-![image](https://cloud.githubusercontent.com/assets/2662304/22902384/835aa168-f23d-11e6-914f-cfc3f06eaff8.png)
-
-**All Time Unique Client IPs** unique ips/s. Counts the unique IPs, since the last [**netdata**](https://my-netdata.io/) restart.
-
-![image](https://cloud.githubusercontent.com/assets/2662304/22902407/92dd27e6-f23d-11e6-900d-eede7bc08e64.png)
-
-> **Important**<br/>To provide this information `web_log` plugin keeps in memory all the IPs seen by the web server. Although this does not require so much memory, if you have a web server with several million unique client IPs, we suggest to [disable this chart](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L64).
-
-## Alarms
-
-The magic of [**netdata**](https://my-netdata.io/) is that all metrics are collected per second, and all metrics can be used or correlated to provide real-time alarms. Out of the box, [**netdata**](https://my-netdata.io/) automatically attaches the following alarms] to all `web_log` charts (i.e. to all log files configured, individually):
-
-| alarm|description|minimum<br/>requests|warning|critical|
-|:----|-----------|:------------------:|:-----:|:------:|
-| `1m_redirects`|The ratio of HTTP redirects (3xx except 304) over all the requests, during the last minute.<br/> <br/>*Detects if the site or the web API is suffering from too many or circular redirects.*<br/> <br/>(i.e. **oops!** *this should not redirect clients to itself*)|120/min|> 20%|> 30%|
-| `1m_bad_requests`|The ratio of HTTP bad requests (4xx) over all the requests, during the last minute.<br/> <br/>*Detects if the site or the web API is receiving too many bad requests, including `404`, not found.*<br/> <br/>(i.e. **oops!** *a few files were not uploaded*)|120/min|> 30%|> 50%|
-| `1m_internal_errors`|The ratio of HTTP internal server errors (5xx), over all the requests, during the last minute.<br/> <br/>*Detects if the site is facing difficulties to serve requests.*<br/> <br/>(i.e. **oops!** *this release crashes too much*)|120/min|> 2%|> 5%|
-| `5m_requests_ratio`|The percentage of successful web requests of the last 5 minutes, compared with the previous 5 minutes.<br/> <br/>*Detects if the site or the web API is suddenly getting too many or too few requests.*<br/> <br/>(i.e. too many = **oops!** *we are under attack*)<br/>(i.e. too few = **oops!** *call the network guys*)|120/5min|> double or \< half|> 4x or \< 1/4x|
-| `web_slow`|The average time to respond to requests, over the last 1 minute, compared to the average of last 10 minutes.<br/> <br/>*Detects if the site or the web API is suddenly a lot slower.*<br/> <br/>(i.e. **oops!** *the database is slow again*)|120/min|> 2x|> 4x|
-| `1m_successful`|The ratio of successful HTTP responses (1xx, 2xx, 304) over all the requests, during the last minute.<br/> <br/>*Detects if the site or the web API is performing within limits.*<br/> <br/>(i.e. **oops!** *help us God!*)|120/min|\< 85%|\< 75%|
-
-The column `minimum requests` state the minimum number of requests required for the alarm to be evaluated. We found that when the site is receiving requests above this rate, these alarms are pretty accurate (i.e. no false-positives).
-
-Netdata alarms are user-configurable. Sample config files can be found under directory `health/health.d` of the [Netdata GitHub repository](https://github.com/netdata/netdata/).
-
-
diff --git a/collectors/python.d.plugin/web_log/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py
deleted file mode 100644
index 04ecadec..00000000
--- a/collectors/python.d.plugin/web_log/web_log.chart.py
+++ /dev/null
@@ -1,1194 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: web log netdata python.d module
-# Author: ilyam8
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import bisect
-import os
-import re
-from collections import namedtuple, defaultdict
-from copy import deepcopy
-
-try:
- from itertools import filterfalse
-except ImportError:
- from itertools import ifilter as filter
- from itertools import ifilterfalse as filterfalse
-
-try:
- from sys import maxint
-except ImportError:
- from sys import maxsize as maxint
-
-from bases.collection import read_last_line
-from bases.FrameworkServices.LogService import LogService
-
-ORDER_APACHE_CACHE = [
- 'apache_cache',
-]
-
-ORDER_WEB = [
- 'response_statuses',
- 'response_codes',
- 'bandwidth',
- 'response_time',
- 'response_time_hist',
- 'response_time_upstream',
- 'response_time_upstream_hist',
- 'requests_per_url',
- 'requests_per_user_defined',
- 'http_method',
- 'vhost',
- 'port',
- 'http_version',
- 'requests_per_ipproto',
- 'clients',
- 'clients_all'
-]
-
-ORDER_SQUID = [
- 'squid_response_statuses',
- 'squid_response_codes',
- 'squid_detailed_response_codes',
- 'squid_method',
- 'squid_mime_type',
- 'squid_hier_code',
- 'squid_transport_methods',
- 'squid_transport_errors',
- 'squid_code',
- 'squid_handling_opts',
- 'squid_object_types',
- 'squid_cache_events',
- 'squid_bytes',
- 'squid_duration',
- 'squid_clients',
- 'squid_clients_all'
-]
-
-CHARTS_WEB = {
- 'response_codes': {
- 'options': [None, 'Response Codes', 'requests/s', 'responses', 'web_log.response_codes', 'stacked'],
- 'lines': [
- ['2xx', None, 'incremental'],
- ['5xx', None, 'incremental'],
- ['3xx', None, 'incremental'],
- ['4xx', None, 'incremental'],
- ['1xx', None, 'incremental'],
- ['0xx', 'other', 'incremental'],
- ['unmatched', None, 'incremental']
- ]
- },
- 'bandwidth': {
- 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'web_log.bandwidth', 'area'],
- 'lines': [
- ['resp_length', 'received', 'incremental', 8, 1000],
- ['bytes_sent', 'sent', 'incremental', -8, 1000]
- ]
- },
- 'response_time': {
- 'options': [None, 'Processing Time', 'milliseconds', 'timings', 'web_log.response_time', 'area'],
- 'lines': [
- ['resp_time_min', 'min', 'incremental', 1, 1000],
- ['resp_time_max', 'max', 'incremental', 1, 1000],
- ['resp_time_avg', 'avg', 'incremental', 1, 1000]
- ]
- },
- 'response_time_hist': {
- 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings', 'web_log.response_time_hist', 'line'],
- 'lines': []
- },
- 'response_time_upstream': {
- 'options': [None, 'Processing Time Upstream', 'milliseconds', 'timings',
- 'web_log.response_time_upstream', 'area'],
- 'lines': [
- ['resp_time_upstream_min', 'min', 'incremental', 1, 1000],
- ['resp_time_upstream_max', 'max', 'incremental', 1, 1000],
- ['resp_time_upstream_avg', 'avg', 'incremental', 1, 1000]
- ]
- },
- 'response_time_upstream_hist': {
- 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings',
- 'web_log.response_time_upstream_hist', 'line'],
- 'lines': []
- },
- 'clients': {
- 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'],
- 'lines': [
- ['unique_cur_ipv4', 'ipv4', 'incremental', 1, 1],
- ['unique_cur_ipv6', 'ipv6', 'incremental', 1, 1]
- ]
- },
- 'clients_all': {
- 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'clients', 'web_log.clients_all', 'stacked'],
- 'lines': [
- ['unique_tot_ipv4', 'ipv4', 'absolute', 1, 1],
- ['unique_tot_ipv6', 'ipv6', 'absolute', 1, 1]
- ]
- },
- 'http_method': {
- 'options': [None, 'Requests Per HTTP Method', 'requests/s', 'http methods', 'web_log.http_method', 'stacked'],
- 'lines': [
- ['GET', 'GET', 'incremental', 1, 1]
- ]
- },
- 'http_version': {
- 'options': [None, 'Requests Per HTTP Version', 'requests/s', 'http versions',
- 'web_log.http_version', 'stacked'],
- 'lines': []
- },
- 'requests_per_ipproto': {
- 'options': [None, 'Requests Per IP Protocol', 'requests/s', 'ip protocols', 'web_log.requests_per_ipproto',
- 'stacked'],
- 'lines': [
- ['req_ipv4', 'ipv4', 'incremental', 1, 1],
- ['req_ipv6', 'ipv6', 'incremental', 1, 1]
- ]
- },
- 'response_statuses': {
- 'options': [None, 'Response Statuses', 'requests/s', 'responses', 'web_log.response_statuses', 'stacked'],
- 'lines': [
- ['successful_requests', 'success', 'incremental', 1, 1],
- ['server_errors', 'error', 'incremental', 1, 1],
- ['redirects', 'redirect', 'incremental', 1, 1],
- ['bad_requests', 'bad', 'incremental', 1, 1],
- ['other_requests', 'other', 'incremental', 1, 1]
- ]
- },
- 'requests_per_url': {
- 'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url', 'stacked'],
- 'lines': [
- ['url_pattern_other', 'other', 'incremental', 1, 1]
- ]
- },
- 'requests_per_user_defined': {
- 'options': [None, 'Requests Per User Defined Pattern', 'requests/s', 'user defined',
- 'web_log.requests_per_user_defined', 'stacked'],
- 'lines': [
- ['user_pattern_other', 'other', 'incremental', 1, 1]
- ]
- },
- 'port': {
- 'options': [None, 'Requests Per Port', 'requests/s', 'port', 'web_log.port', 'stacked'],
- 'lines': [
- ['port_80', 'http', 'incremental', 1, 1],
- ['port_443', 'https', 'incremental', 1, 1]
- ]
- },
- 'vhost': {
- 'options': [None, 'Requests Per Vhost', 'requests/s', 'vhost', 'web_log.vhost', 'stacked'],
- 'lines': []
- }
-}
-
-CHARTS_APACHE_CACHE = {
- 'apache_cache': {
- 'options': [None, 'Apache Cached Responses', 'percentage', 'cached', 'web_log.apache_cache_cache',
- 'stacked'],
- 'lines': [
- ['hit', 'cache', 'percentage-of-absolute-row'],
- ['miss', None, 'percentage-of-absolute-row'],
- ['other', None, 'percentage-of-absolute-row']
- ]
- }
-}
-
-CHARTS_SQUID = {
- 'squid_duration': {
- 'options': [None, 'Elapsed Time The Transaction Busied The Cache',
- 'milliseconds', 'squid_timings', 'web_log.squid_duration', 'area'],
- 'lines': [
- ['duration_min', 'min', 'incremental', 1, 1000],
- ['duration_max', 'max', 'incremental', 1, 1000],
- ['duration_avg', 'avg', 'incremental', 1, 1000]
- ]
- },
- 'squid_bytes': {
- 'options': [None, 'Amount Of Data Delivered To The Clients',
- 'kilobits/s', 'squid_bandwidth', 'web_log.squid_bytes', 'area'],
- 'lines': [
- ['bytes', 'sent', 'incremental', 8, 1000]
- ]
- },
- 'squid_response_statuses': {
- 'options': [None, 'Response Statuses', 'responses/s', 'squid_responses', 'web_log.squid_response_statuses',
- 'stacked'],
- 'lines': [
- ['successful_requests', 'success', 'incremental', 1, 1],
- ['server_errors', 'error', 'incremental', 1, 1],
- ['redirects', 'redirect', 'incremental', 1, 1],
- ['bad_requests', 'bad', 'incremental', 1, 1],
- ['other_requests', 'other', 'incremental', 1, 1]
- ]
- },
- 'squid_response_codes': {
- 'options': [None, 'Response Codes', 'responses/s', 'squid_responses',
- 'web_log.squid_response_codes', 'stacked'],
- 'lines': [
- ['2xx', None, 'incremental'],
- ['5xx', None, 'incremental'],
- ['3xx', None, 'incremental'],
- ['4xx', None, 'incremental'],
- ['1xx', None, 'incremental'],
- ['0xx', None, 'incremental'],
- ['other', None, 'incremental'],
- ['unmatched', None, 'incremental']
- ]
- },
- 'squid_code': {
- 'options': [None, 'Responses Per Cache Result Of The Request',
- 'requests/s', 'squid_squid_cache', 'web_log.squid_code', 'stacked'],
- 'lines': []
- },
- 'squid_detailed_response_codes': {
- 'options': [None, 'Detailed Response Codes',
- 'responses/s', 'squid_responses', 'web_log.squid_detailed_response_codes', 'stacked'],
- 'lines': []
- },
- 'squid_hier_code': {
- 'options': [None, 'Responses Per Hierarchy Code',
- 'requests/s', 'squid_hierarchy', 'web_log.squid_hier_code', 'stacked'],
- 'lines': []
- },
- 'squid_method': {
- 'options': [None, 'Requests Per Method',
- 'requests/s', 'squid_requests', 'web_log.squid_method', 'stacked'],
- 'lines': []
- },
- 'squid_mime_type': {
- 'options': [None, 'Requests Per MIME Type',
- 'requests/s', 'squid_requests', 'web_log.squid_mime_type', 'stacked'],
- 'lines': []
- },
- 'squid_clients': {
- 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'squid_clients',
- 'web_log.squid_clients', 'stacked'],
- 'lines': [
- ['unique_ipv4', 'ipv4', 'incremental'],
- ['unique_ipv6', 'ipv6', 'incremental']
- ]
- },
- 'squid_clients_all': {
- 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'squid_clients',
- 'web_log.squid_clients_all', 'stacked'],
- 'lines': [
- ['unique_tot_ipv4', 'ipv4', 'absolute'],
- ['unique_tot_ipv6', 'ipv6', 'absolute']
- ]
- },
- 'squid_transport_methods': {
- 'options': [None, 'Transport Methods', 'requests/s', 'squid_squid_transport',
- 'web_log.squid_transport_methods', 'stacked'],
- 'lines': []
- },
- 'squid_transport_errors': {
- 'options': [None, 'Transport Errors', 'requests/s', 'squid_squid_transport',
- 'web_log.squid_transport_errors', 'stacked'],
- 'lines': []
- },
- 'squid_handling_opts': {
- 'options': [None, 'Handling Opts', 'requests/s', 'squid_squid_cache',
- 'web_log.squid_handling_opts', 'stacked'],
- 'lines': []
- },
- 'squid_object_types': {
- 'options': [None, 'Object Types', 'objects/s', 'squid_squid_cache',
- 'web_log.squid_object_types', 'stacked'],
- 'lines': []
- },
- 'squid_cache_events': {
- 'options': [None, 'Cache Events', 'events/s', 'squid_squid_cache',
- 'web_log.squid_cache_events', 'stacked'],
- 'lines': []
- }
-}
-
-NAMED_PATTERN = namedtuple('PATTERN', ['description', 'func'])
-
-DET_RESP_AGGR = ['', '_1xx', '_2xx', '_3xx', '_4xx', '_5xx', '_Other']
-
-SQUID_CODES = {
- 'TCP': 'squid_transport_methods',
- 'UDP': 'squid_transport_methods',
- 'NONE': 'squid_transport_methods',
- 'CLIENT': 'squid_handling_opts',
- 'IMS': 'squid_handling_opts',
- 'ASYNC': 'squid_handling_opts',
- 'SWAPFAIL': 'squid_handling_opts',
- 'REFRESH': 'squid_handling_opts',
- 'SHARED': 'squid_handling_opts',
- 'REPLY': 'squid_handling_opts',
- 'NEGATIVE': 'squid_object_types',
- 'STALE': 'squid_object_types',
- 'OFFLINE': 'squid_object_types',
- 'INVALID': 'squid_object_types',
- 'FAIL': 'squid_object_types',
- 'MODIFIED': 'squid_object_types',
- 'UNMODIFIED': 'squid_object_types',
- 'REDIRECT': 'squid_object_types',
- 'HIT': 'squid_cache_events',
- 'MEM': 'squid_cache_events',
- 'MISS': 'squid_cache_events',
- 'DENIED': 'squid_cache_events',
- 'NOFETCH': 'squid_cache_events',
- 'TUNNEL': 'squid_cache_events',
- 'ABORTED': 'squid_transport_errors',
- 'TIMEOUT': 'squid_transport_errors'
-}
-
-REQUEST_REGEX = re.compile(r'(?P<method>[A-Z]+) (?P<url>[^ ]+) [A-Z]+/(?P<http_version>\d(?:.\d)?)')
-
-MIME_TYPES = ['application', 'audio', 'example', 'font', 'image', 'message', 'model', 'multipart', 'text', 'video']
-
-
-class Service(LogService):
- def __init__(self, configuration=None, name=None):
- """
- :param configuration:
- :param name:
- """
- LogService.__init__(self, configuration=configuration, name=name)
- self.configuration = configuration
- self.log_path = self.configuration.get('path')
- self.job = None
-
- def check(self):
- """
- :return: bool
-
- 1. "log_path" is specified in the module configuration file
- 2. "log_path" must be readable by netdata user and must exist
- 3. "log_path' must not be empty. We need at least 1 line to find appropriate pattern to parse
- 4. other checks depends on log "type"
- """
-
- log_type = self.configuration.get('type', 'web')
- log_types = dict(web=Web, apache_cache=ApacheCache, squid=Squid)
-
- if log_type not in log_types:
- self.error('bad log type {log_type}. Supported types: {types}'.format(log_type=log_type,
- types=log_types.keys()))
- return False
-
- if not self.log_path:
- self.error('log path is not specified')
- return False
-
- if not (self._find_recent_log_file() and os.access(self.log_path, os.R_OK)):
- self.error('{log_file} not readable or not exist'.format(log_file=self.log_path))
- return False
-
- if not os.path.getsize(self.log_path):
- self.error('{log_file} is empty'.format(log_file=self.log_path))
- return False
-
- self.job = log_types[log_type](self)
- if self.job.check():
- self.order = self.job.order
- self.definitions = self.job.definitions
- return True
- return False
-
- def _get_data(self):
- return self.job.get_data(self._get_raw_data())
-
-
-class Web:
- def __init__(self, service):
- self.service = service
- self.order = ORDER_WEB[:]
- self.definitions = deepcopy(CHARTS_WEB)
- self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
- self.storage = dict()
- self.data = {
- 'bytes_sent': 0,
- 'resp_length': 0,
- 'resp_time_min': 0,
- 'resp_time_max': 0,
- 'resp_time_avg': 0,
- 'resp_time_upstream_min': 0,
- 'resp_time_upstream_max': 0,
- 'resp_time_upstream_avg': 0,
- 'unique_cur_ipv4': 0,
- 'unique_cur_ipv6': 0,
- '2xx': 0,
- '5xx': 0,
- '3xx': 0,
- '4xx': 0,
- '1xx': 0,
- '0xx': 0,
- 'unmatched': 0,
- 'req_ipv4': 0,
- 'req_ipv6': 0,
- 'unique_tot_ipv4': 0,
- 'unique_tot_ipv6': 0,
- 'successful_requests': 0,
- 'redirects': 0,
- 'bad_requests': 0,
- 'server_errors': 0,
- 'other_requests': 0,
- 'GET': 0
- }
-
- def __getattr__(self, item):
- return getattr(self.service, item)
-
- def check(self):
- last_line = read_last_line(self.log_path)
- if not last_line:
- return False
- # Custom_log_format or predefined log format.
- if self.configuration.get('custom_log_format'):
- match_dict, error = self.find_regex_custom(last_line)
- else:
- match_dict, error = self.find_regex(last_line)
-
- # "match_dict" is None if there are any problems
- if match_dict is None:
- self.error(error)
- return False
-
- self.storage['unique_all_time'] = list()
- self.storage['url_pattern'] = check_patterns('url_pattern', self.configuration.get('categories'))
- self.storage['user_pattern'] = check_patterns('user_pattern', self.configuration.get('user_defined'))
-
- self.create_web_charts(match_dict) # Create charts
- self.info('Collected data: %s' % list(match_dict.keys()))
- return True
-
- def create_web_charts(self, match_dict):
- """
- :param match_dict: dict: regex.search.groupdict(). Ex. {'address': '127.0.0.1', 'code': '200', 'method': 'GET'}
- :return:
- Create/remove additional charts depending on the 'match_dict' keys and configuration file options
- """
- if 'resp_time' not in match_dict:
- self.order.remove('response_time')
- self.order.remove('response_time_hist')
- if 'resp_time_upstream' not in match_dict:
- self.order.remove('response_time_upstream')
- self.order.remove('response_time_upstream_hist')
-
- # Add 'response_time_hist' and 'response_time_upstream_hist' charts if is specified in the configuration
- histogram = self.configuration.get('histogram', None)
- if isinstance(histogram, list):
- self.storage['bucket_index'] = histogram[:]
- self.storage['bucket_index'].append(maxint)
- self.storage['buckets'] = [0] * (len(histogram) + 1)
- self.storage['upstream_buckets'] = [0] * (len(histogram) + 1)
- hist_lines = self.definitions['response_time_hist']['lines']
- upstream_hist_lines = self.definitions['response_time_upstream_hist']['lines']
- for i, le in enumerate(histogram):
- hist_key = 'response_time_hist_%d' % i
- upstream_hist_key = 'response_time_upstream_hist_%d' % i
- hist_lines.append([hist_key, str(le), 'incremental', 1, 1])
- upstream_hist_lines.append([upstream_hist_key, str(le), 'incremental', 1, 1])
-
- hist_lines.append(['response_time_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
- upstream_hist_lines.append(['response_time_upstream_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
- elif histogram is not None:
- self.error('expect histogram list, but was {0}'.format(type(histogram)))
-
- if not self.configuration.get('all_time', True):
- self.order.remove('clients_all')
-
- # Add 'detailed_response_codes' chart if specified in the configuration
- if self.configuration.get('detailed_response_codes', True):
- if self.configuration.get('detailed_response_aggregate', True):
- codes = DET_RESP_AGGR[:1]
- else:
- codes = DET_RESP_AGGR[1:]
-
- for code in codes:
- self.order.append('detailed_response_codes%s' % code)
- self.definitions['detailed_response_codes%s' % code] = {
- 'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses',
- 'web_log.detailed_response_codes%s' % code, 'stacked'],
- 'lines': []
- }
-
- # Add 'requests_per_url' chart if specified in the configuration
- if self.storage['url_pattern']:
- for elem in self.storage['url_pattern']:
- dim = [elem.description, elem.description[12:], 'incremental']
- self.definitions['requests_per_url']['lines'].append(dim)
- self.data[elem.description] = 0
- self.data['url_pattern_other'] = 0
- else:
- self.order.remove('requests_per_url')
-
- # Add 'requests_per_user_defined' chart if specified in the configuration
- if self.storage['user_pattern'] and 'user_defined' in match_dict:
- for elem in self.storage['user_pattern']:
- dim = [elem.description, elem.description[13:], 'incremental']
- self.definitions['requests_per_user_defined']['lines'].append(dim)
- self.data[elem.description] = 0
- self.data['user_pattern_other'] = 0
- else:
- self.order.remove('requests_per_user_defined')
-
- def get_data(self, raw_data=None):
- """
- Parses new log lines
- :return: dict OR None
- None if _get_raw_data method fails.
- In all other cases - dict.
- """
- if not raw_data:
- return None if raw_data is None else self.data
-
- filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter)
-
- unique_current = set()
- timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
-
- for line in filtered_data:
- match = self.storage['regex'].search(line)
- if match:
- match_dict = match.groupdict()
- try:
- code = match_dict['code'][0] + 'xx'
- self.data[code] += 1
- except KeyError:
- self.data['0xx'] += 1
- # detailed response code
- if self.configuration.get('detailed_response_codes', True):
- self.get_data_per_response_codes_detailed(code=match_dict['code'])
- # response statuses
- self.get_data_per_statuses(code=match_dict['code'])
- # requests per user defined pattern
- if self.storage['user_pattern'] and 'user_defined' in match_dict:
- self.get_data_per_pattern(row=match_dict['user_defined'],
- other='user_pattern_other',
- pattern=self.storage['user_pattern'])
- # method, url, http version
- self.get_data_from_request_field(match_dict=match_dict)
- # bandwidth sent
- bytes_sent = match_dict['bytes_sent'] if '-' not in match_dict['bytes_sent'] else 0
- self.data['bytes_sent'] += int(bytes_sent)
- # request processing time and bandwidth received
- if 'resp_length' in match_dict:
- resp_length = match_dict['resp_length'] if '-' not in match_dict['resp_length'] else 0
- self.data['resp_length'] += int(resp_length)
- if 'resp_time' in match_dict:
- resp_time = self.storage['func_resp_time'](float(match_dict['resp_time']))
- get_timings(timings=timings['resp_time'], time=resp_time)
- if 'bucket_index' in self.storage:
- get_hist(self.storage['bucket_index'], self.storage['buckets'], resp_time / 1000)
- if 'resp_time_upstream' in match_dict and match_dict['resp_time_upstream'] != '-':
- resp_time_upstream = self.storage['func_resp_time'](float(match_dict['resp_time_upstream']))
- get_timings(timings=timings['resp_time_upstream'], time=resp_time_upstream)
- if 'bucket_index' in self.storage:
- get_hist(self.storage['bucket_index'], self.storage['upstream_buckets'], resp_time / 1000)
- # requests per ip proto
- proto = 'ipv6' if ':' in match_dict['address'] else 'ipv4'
- self.data['req_' + proto] += 1
- # unique clients ips
- if self.configuration.get('all_time', True):
- if address_not_in_pool(pool=self.storage['unique_all_time'],
- address=match_dict['address'],
- pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
- self.data['unique_tot_' + proto] += 1
- if match_dict['address'] not in unique_current:
- self.data['unique_cur_' + proto] += 1
- unique_current.add(match_dict['address'])
- else:
- self.data['unmatched'] += 1
-
- # timings
- for elem in timings:
- self.data[elem + '_min'] += timings[elem]['minimum']
- self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
- self.data[elem + '_max'] += timings[elem]['maximum']
-
- # histogram
- if 'bucket_index' in self.storage:
- buckets = self.storage['buckets']
- upstream_buckets = self.storage['upstream_buckets']
- for i in range(0, len(self.storage['bucket_index'])):
- hist_key = 'response_time_hist_%d' % i
- upstream_hist_key = 'response_time_upstream_hist_%d' % i
- self.data[hist_key] = buckets[i]
- self.data[upstream_hist_key] = upstream_buckets[i]
-
- return self.data
-
- def find_regex(self, last_line):
- """
- :param last_line: str: literally last line from log file
- :return: tuple where:
- [0]: dict or None: match_dict or None
- [1]: str: error description
- We need to find appropriate pattern for current log file
- All logic is do a regex search through the string for all predefined patterns
- until we find something or fail.
- """
- # REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code
- # 5. Bytes sent 6. Response length 7. Response process time
- default = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
- r' -.*?"(?P<request>[^"]*)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+|-)')
-
- apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
- r' -.*?"(?P<request>[^"]*)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+|-)'
- r' (?P<resp_length>\d+|-)'
- r' (?P<resp_time>\d+) ')
-
- apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
- r' -.*?"(?P<request>[^"]*)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+|-)'
- r' .*?'
- r' (?P<resp_length>\d+|-)'
- r' (?P<resp_time>\d+)'
- r'(?: |$)')
-
- nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<request>[^"]*)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+)'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+\.\d+) ')
-
- nginx_ext2_insert = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<request>[^"]*)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+)'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+\.\d+)'
- r' (?P<resp_time_upstream>[\d.-]+)')
-
- nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
- r' -.*?"(?P<request>[^"]*)"'
- r' (?P<code>[1-9]\d{2})'
- r' (?P<bytes_sent>\d+)'
- r' .*?'
- r' (?P<resp_length>\d+)'
- r' (?P<resp_time>\d+\.\d+)')
-
- def func_usec(time):
- return time
-
- def func_sec(time):
- return time * 1000000
-
- r_regex = [apache_ext_insert, apache_ext_append,
- nginx_ext2_insert, nginx_ext_insert, nginx_ext_append,
- default]
- r_function = [func_usec, func_usec, func_sec, func_sec, func_sec, func_usec]
- regex_function = zip(r_regex, r_function)
-
- match_dict = dict()
- for regex, func in regex_function:
- match = regex.search(last_line)
- if match:
- self.storage['regex'] = regex
- self.storage['func_resp_time'] = func
- match_dict = match.groupdict()
- break
-
- return find_regex_return(match_dict=match_dict or None,
- msg='Unknown log format. You need to use "custom_log_format" feature.')
-
- def find_regex_custom(self, last_line):
- """
- :param last_line: str: literally last line from log file
- :return: tuple where:
- [0]: dict or None: match_dict or None
- [1]: str: error description
-
- We are here only if "custom_log_format" is in logs. We need to make sure:
- 1. "custom_log_format" is a dict
- 2. "pattern" in "custom_log_format" and pattern is <str> instance
- 3. if "time_multiplier" is in "custom_log_format" it must be <int> or <float> instance
-
- If all parameters is ok we need to make sure:
- 1. Pattern search is success
- 2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict")
-
- If pattern search is success we need to make sure:
- 1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict"
-
- If this is True we need to make sure:
- 1. All mandatory key values from "match_dict" have the correct format
- ("code" is integer, "method" is uppercase word, etc)
-
- If non mandatory keys in "match_dict" we need to make sure:
- 1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format
- ("resp_length" is integer or "-", "resp_time" is integer or float)
-
- """
- if not hasattr(self.configuration.get('custom_log_format'), 'keys'):
- return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>')
-
- pattern = self.configuration.get('custom_log_format', dict()).get('pattern')
- if not (pattern and isinstance(pattern, str)):
- return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>')
-
- resp_time_func = self.configuration.get('custom_log_format', dict()).get('time_multiplier') or 0
-
- if not isinstance(resp_time_func, (int, float)):
- return find_regex_return(msg='Custom log: "time_multiplier" is not an integer or a float')
-
- try:
- regex = re.compile(pattern)
- except re.error as error:
- return find_regex_return(msg='Pattern compile error: %s' % str(error))
-
- match = regex.search(last_line)
- if not match:
- return find_regex_return(msg='Custom log: pattern search FAILED')
-
- match_dict = match.groupdict() or None
- if match_dict is None:
- return find_regex_return(msg='Custom log: search OK but contains no named subgroups'
- ' (you need to use ?P<subgroup_name>)')
- mandatory_dict = {'address': r'[\w.:-]+',
- 'code': r'[1-9]\d{2}',
- 'bytes_sent': r'\d+|-'}
- optional_dict = {'resp_length': r'\d+|-',
- 'resp_time': r'[\d.]+',
- 'resp_time_upstream': r'[\d.-]+',
- 'method': r'[A-Z]+',
- 'http_version': r'\d(?:.\d)?'}
-
- mandatory_values = set(mandatory_dict) - set(match_dict)
- if mandatory_values:
- return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing'
- % list(mandatory_values))
- for key in mandatory_dict:
- if not re.search(mandatory_dict[key], match_dict[key]):
- return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
- % (key, match_dict[key]))
-
- optional_values = set(optional_dict) & set(match_dict)
- for key in optional_values:
- if not re.search(optional_dict[key], match_dict[key]):
- return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
- % (key, match_dict[key]))
-
- dot_in_time = '.' in match_dict.get('resp_time', '')
- if dot_in_time:
- self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1000000)
- else:
- self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1)
-
- self.storage['regex'] = regex
- return find_regex_return(match_dict=match_dict)
-
- def get_data_from_request_field(self, match_dict):
- if match_dict.get('request'):
- match_dict = REQUEST_REGEX.search(match_dict['request'])
- if match_dict:
- match_dict = match_dict.groupdict()
- else:
- return
- # requests per url
- if match_dict.get('url') and self.storage['url_pattern']:
- self.get_data_per_pattern(row=match_dict['url'],
- other='url_pattern_other',
- pattern=self.storage['url_pattern'])
- # requests per http method
- if match_dict.get('method'):
- if match_dict['method'] not in self.data:
- self.charts['http_method'].add_dimension([match_dict['method'],
- match_dict['method'],
- 'incremental'])
- self.data[match_dict['method']] = 0
- self.data[match_dict['method']] += 1
- # requests per http version
- if match_dict.get('http_version'):
- dim_id = match_dict['http_version'].replace('.', '_')
- if dim_id not in self.data:
- self.charts['http_version'].add_dimension([dim_id,
- match_dict['http_version'],
- 'incremental'])
- self.data[dim_id] = 0
- self.data[dim_id] += 1
- # requests per port number
- if match_dict.get('port'):
- if match_dict['port'] not in self.data:
- self.charts['port'].add_dimension([match_dict['port'],
- match_dict['port'],
- 'incremental'])
- self.data[match_dict['port']] = 0
- self.data[match_dict['port']] += 1
- # requests per vhost
- if match_dict.get('vhost'):
- dim_id = match_dict['vhost'].replace('.', '_')
- if dim_id not in self.data:
- self.charts['vhost'].add_dimension([dim_id,
- match_dict['vhost'],
- 'incremental'])
- self.data[dim_id] = 0
- self.data[dim_id] += 1
-
- def get_data_per_response_codes_detailed(self, code):
- """
- :param code: str: CODE from parsed line. Ex.: '202, '499'
- :return:
- Calls add_new_dimension method If the value is found for the first time
- """
- if code not in self.data:
- if self.configuration.get('detailed_response_aggregate', True):
- self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
- self.data[code] = 0
- else:
- code_index = int(code[0]) if int(code[0]) < 6 else 6
- chart_key = 'detailed_response_codes' + DET_RESP_AGGR[code_index]
- self.charts[chart_key].add_dimension([code, code, 'incremental'])
- self.data[code] = 0
- self.data[code] += 1
-
- def get_data_per_pattern(self, row, other, pattern):
- """
- :param row: str:
- :param other: str:
- :param pattern: named tuple: (['pattern_description', 'regular expression'])
- :return:
- Scan through string looking for the first location where patterns produce a match for all user
- defined patterns
- """
- match = None
- for elem in pattern:
- if elem.func(row):
- self.data[elem.description] += 1
- match = True
- break
- if not match:
- self.data[other] += 1
-
- def get_data_per_statuses(self, code):
- """
- :param code: str: response status code. Ex.: '202', '499'
- :return:
- """
- code_class = code[0]
- if code_class == '2' or code == '304' or code_class == '1' or code == '401':
- self.data['successful_requests'] += 1
- elif code_class == '3':
- self.data['redirects'] += 1
- elif code_class == '4':
- self.data['bad_requests'] += 1
- elif code_class == '5':
- self.data['server_errors'] += 1
- else:
- self.data['other_requests'] += 1
-
-
-class ApacheCache:
- def __init__(self, service):
- self.service = service
- self.order = ORDER_APACHE_CACHE
- self.definitions = CHARTS_APACHE_CACHE
-
- @staticmethod
- def check():
- return True
-
- @staticmethod
- def get_data(raw_data=None):
- data = dict(hit=0, miss=0, other=0)
- if not raw_data:
- return None if raw_data is None else data
-
- for line in raw_data:
- if 'cache hit' in line:
- data['hit'] += 1
- elif 'cache miss' in line:
- data['miss'] += 1
- else:
- data['other'] += 1
- return data
-
-
-class Squid:
- def __init__(self, service):
- self.service = service
- self.order = ORDER_SQUID
- self.definitions = CHARTS_SQUID
- self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
- self.storage = dict()
- self.data = {
- 'duration_max': 0,
- 'duration_avg': 0,
- 'duration_min': 0,
- 'bytes': 0,
- '0xx': 0,
- '1xx': 0,
- '2xx': 0,
- '3xx': 0,
- '4xx': 0,
- '5xx': 0,
- 'other': 0,
- 'unmatched': 0,
- 'unique_ipv4': 0,
- 'unique_ipv6': 0,
- 'unique_tot_ipv4': 0,
- 'unique_tot_ipv6': 0,
- 'successful_requests': 0,
- 'redirects': 0,
- 'bad_requests': 0,
- 'server_errors': 0,
- 'other_requests': 0
- }
-
- def __getattr__(self, item):
- return getattr(self.service, item)
-
- def check(self):
- last_line = read_last_line(self.log_path)
- if not last_line:
- return False
- self.storage['unique_all_time'] = list()
- self.storage['regex'] = re.compile(r'[0-9.]+\s+(?P<duration>[0-9]+)'
- r' (?P<client_address>[\da-f.:]+)'
- r' (?P<squid_code>[A-Z_]+)/'
- r'(?P<http_code>[0-9]+)'
- r' (?P<bytes>[0-9]+)'
- r' (?P<method>[A-Z_]+)'
- r' (?P<url>[^ ]+)'
- r' (?P<user>[^ ]+)'
- r' (?P<hier_code>[A-Z_]+)/[\da-z.:-]+'
- r' (?P<mime_type>[A-Za-z-]*)')
-
- match = self.storage['regex'].search(last_line)
- if not match:
- self.error('Regex not matches (%s)' % self.storage['regex'].pattern)
- return False
- self.storage['dynamic'] = {
- 'http_code': {
- 'chart': 'squid_detailed_response_codes',
- 'func_dim_id': None,
- 'func_dim': None
- },
- 'hier_code': {
- 'chart': 'squid_hier_code',
- 'func_dim_id': None,
- 'func_dim': lambda v: v.replace('HIER_', '')
- },
- 'method': {
- 'chart': 'squid_method',
- 'func_dim_id': None,
- 'func_dim': None
- },
- 'mime_type': {
- 'chart': 'squid_mime_type',
- 'func_dim_id': lambda v: str.lower(v) if str.lower(v) in MIME_TYPES else 'unknown',
- 'func_dim': None
- }
- }
- if not self.configuration.get('all_time', True):
- self.order.remove('squid_clients_all')
- return True
-
- def get_data(self, raw_data=None):
- if not raw_data:
- return None if raw_data is None else self.data
-
- filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter)
-
- unique_ip = set()
- timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
-
- for row in filtered_data:
- match = self.storage['regex'].search(row)
- if match:
- match = match.groupdict()
- if match['duration'] != '0':
- get_timings(timings=timings['duration'], time=float(match['duration']) * 1000)
- try:
- self.data[match['http_code'][0] + 'xx'] += 1
- except KeyError:
- self.data['other'] += 1
-
- self.get_data_per_statuses(match['http_code'])
-
- self.get_data_per_squid_code(match['squid_code'])
-
- self.data['bytes'] += int(match['bytes'])
-
- proto = 'ipv4' if '.' in match['client_address'] else 'ipv6'
- # unique clients ips
- if self.configuration.get('all_time', True):
- if address_not_in_pool(pool=self.storage['unique_all_time'],
- address=match['client_address'],
- pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
- self.data['unique_tot_' + proto] += 1
-
- if match['client_address'] not in unique_ip:
- self.data['unique_' + proto] += 1
- unique_ip.add(match['client_address'])
-
- for key, values in self.storage['dynamic'].items():
- if match[key] == '-':
- continue
- dimension_id = values['func_dim_id'](match[key]) if values['func_dim_id'] else match[key]
- if dimension_id not in self.data:
- dimension = values['func_dim'](match[key]) if values['func_dim'] else dimension_id
- self.charts[values['chart']].add_dimension([dimension_id,
- dimension,
- 'incremental'])
- self.data[dimension_id] = 0
- self.data[dimension_id] += 1
- else:
- self.data['unmatched'] += 1
-
- for elem in timings:
- self.data[elem + '_min'] += timings[elem]['minimum']
- self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
- self.data[elem + '_max'] += timings[elem]['maximum']
- return self.data
-
- def get_data_per_statuses(self, code):
- """
- :param code: str: response status code. Ex.: '202', '499'
- :return:
- """
- code_class = code[0]
- if code_class == '2' or code == '304' or code_class == '1' or code == '000':
- self.data['successful_requests'] += 1
- elif code_class == '3':
- self.data['redirects'] += 1
- elif code_class == '4':
- self.data['bad_requests'] += 1
- elif code_class == '5' or code_class == '6':
- self.data['server_errors'] += 1
- else:
- self.data['other_requests'] += 1
-
- def get_data_per_squid_code(self, code):
- """
- :param code: str: squid response code. Ex.: 'TCP_MISS', 'TCP_MISS_ABORTED'
- :return:
- """
- if code not in self.data:
- self.charts['squid_code'].add_dimension([code, code, 'incremental'])
- self.data[code] = 0
- self.data[code] += 1
-
- for tag in code.split('_'):
- try:
- chart_key = SQUID_CODES[tag]
- except KeyError:
- continue
- dimension_id = '_'.join(['code_detailed', tag])
- if dimension_id not in self.data:
- self.charts[chart_key].add_dimension([dimension_id, tag, 'incremental'])
- self.data[dimension_id] = 0
- self.data[dimension_id] += 1
-
-
-def get_timings(timings, time):
- """
- :param timings:
- :param time:
- :return:
- """
- if timings['minimum'] is None:
- timings['minimum'] = time
- if time > timings['maximum']:
- timings['maximum'] = time
- elif time < timings['minimum']:
- timings['minimum'] = time
- timings['summary'] += time
- timings['count'] += 1
-
-
-def get_hist(index, buckets, time):
- """
- :param index: histogram index (Ex. [10, 50, 100, 150, ...])
- :param buckets: histogram buckets
- :param time: time
- :return: None
- """
- for i in range(len(index) - 1, -1, -1):
- if time <= index[i]:
- buckets[i] += 1
- else:
- break
-
-
-def address_not_in_pool(pool, address, pool_size):
- """
- :param pool: list of ip addresses
- :param address: ip address
- :param pool_size: current pool size
- :return: True if address not in pool. False otherwise.
- """
- index = bisect.bisect_left(pool, address)
- if index < pool_size:
- if pool[index] == address:
- return False
- bisect.insort_left(pool, address)
- return True
- bisect.insort_left(pool, address)
- return True
-
-
-def find_regex_return(match_dict=None, msg='Generic error message'):
- """
- :param match_dict: dict: re.search.groupdict() or None
- :param msg: str: error description
- :return: tuple:
- """
- return match_dict, msg
-
-
-def check_patterns(string, dimension_regex_dict):
- """
- :param string: str:
- :param dimension_regex_dict: dict: ex. {'dim1': '<pattern1>', 'dim2': '<pattern2>'}
- :return: list of named tuples or None:
- We need to make sure all patterns are valid regular expressions
- """
- if not hasattr(dimension_regex_dict, 'keys'):
- return None
-
- result = list()
-
- def valid_pattern(pattern):
- """
- :param pattern: str
- :return: re.compile(pattern) or None
- """
- if not isinstance(pattern, str):
- return False
- try:
- return re.compile(pattern)
- except re.error:
- return False
-
- def func_search(pattern):
- def closure(v):
- return pattern.search(v)
-
- return closure
-
- for dimension, regex in dimension_regex_dict.items():
- valid = valid_pattern(regex)
- if isinstance(dimension, str) and valid_pattern:
- func = func_search(valid)
- result.append(NAMED_PATTERN(description='_'.join([string, dimension]),
- func=func))
- return result or None
-
-
-def filter_data(raw_data, pre_filter):
- """
- :param raw_data:
- :param pre_filter:
- :return:
- """
-
- if not pre_filter:
- return raw_data
- filtered = raw_data
- for elem in pre_filter:
- if elem.description == 'filter_include':
- filtered = filter(elem.func, filtered)
- elif elem.description == 'filter_exclude':
- filtered = filterfalse(elem.func, filtered)
- return filtered
diff --git a/collectors/python.d.plugin/web_log/web_log.conf b/collectors/python.d.plugin/web_log/web_log.conf
deleted file mode 100644
index 220b7c28..00000000
--- a/collectors/python.d.plugin/web_log/web_log.conf
+++ /dev/null
@@ -1,219 +0,0 @@
-# netdata python.d.plugin configuration for web log
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-
-# ----------------------------------------------------------------------
-# PLUGIN CONFIGURATION
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, web_log also supports the following:
-#
-# path: 'PATH' # the path to web server log file
-# path: 'PATH[0-9]*[0-9]' # log files with date suffix are also supported
-# detailed_response_codes: yes/no # default: yes. Additional chart where response codes are not grouped
-# detailed_response_aggregate: yes/no # default: yes. Not aggregated detailed response codes charts
-# all_time : yes/no # default: yes. All time unique client IPs chart (50000 addresses ~ 400KB)
-# filter: # filter with regex
-# include: 'REGEX' # only those rows that matches the regex
-# exclude: 'REGEX' # all rows except those that matches the regex
-# categories: # requests per url chart configuration
-# cacti: 'cacti.*' # name(dimension): REGEX to match
-# observium: 'observium.*' # name(dimension): REGEX to match
-# stub_status: 'stub_status' # name(dimension): REGEX to match
-# user_defined: # requests per pattern in <user_defined> field (custom_log_format)
-# cacti: 'cacti.*' # name(dimension): REGEX to match
-# observium: 'observium.*' # name(dimension): REGEX to match
-# stub_status: 'stub_status' # name(dimension): REGEX to match
-# custom_log_format: # define a custom log format
-# pattern: '(?P<address>[\da-f.:]+) -.*?"(?P<method>[A-Z]+) (?P<url>.*?)" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+\.\d+) '
-# time_multiplier: 1000000 # type <int>/<float> - convert time to microseconds
-# histogram: [1,3,10,30,100, ...] # type list of int - Cumulative histogram of response time in milli seconds
-
-# ----------------------------------------------------------------------
-# WEB SERVER CONFIGURATION
-#
-# Make sure the web server log directory and the web server log files
-# can be read by user 'netdata'.
-#
-# To enable the timings chart and the requests size dimension, the
-# web server needs to log them. This is how to add them:
-#
-# nginx:
-# log_format netdata '$remote_addr - $remote_user [$time_local] '
-# '"$request" $status $body_bytes_sent '
-# '$request_length $request_time $upstream_response_time '
-# '"$http_referer" "$http_user_agent"';
-# access_log /var/log/nginx/access.log netdata;
-#
-# apache (you need mod_logio enabled):
-# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" vhost_netdata
-# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" netdata
-# CustomLog "/var/log/apache2/access.log" netdata
-
-# ----------------------------------------------------------------------
-# VHOST AND PORT
-# if your want to graph the request/sec per virtual host and per port (to check the number of requests in http vs https)
-
-# in apache : (%v gives the hostname, %p the port number)
-# LogFormat "%v %p %h %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" vhost_netdata
-#
-# and in this file in apache_vhosts_log section, add :
-# custom_log_format:
-# pattern: '(?P<vhost>[a-zA-Z\d.-_]+) (?P<port>\d+) (?P<address>[\da-f.:]+) \[.*\] "(?P<method>[A-Z]+)[^"]*" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+)'
-
-# in nginx: ($host or $http_host give the hostname, $server_port the port number)
-# log_format netdatavhost '$host $server_port $remote_addr - $remote_user [$time_local] '
-# '"$request" $status $body_bytes_sent '
-# '$request_length $request_time $upstream_response_time '
-# '"$http_referer" "$http_user_agent"';
-#
-# access_log /var/log/nginx/access.log netdatavhost;
-#
-# be aware that the access_log directive in a server{} block overwrites the one in http{}, if your vhosts have individual log
-# files, you have to specify the general netdata log in each vhost as a second access_log statement.
-#
-# and in this file in nginx_log section, add :
-# custom_log_format:
-# pattern: '(?P<vhost>[a-zA-Z\d.-_\[\]]+) (?P<port>\d+) (?P<address>[\da-f.:]+) .* "(?P<method>[A-Z]+)[^"]*" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+)'
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them per web server will run (when they have the same name)
-
-
-# -------------------------------------------
-# nginx log on various distros
-
-# debian, arch
-nginx_log:
- name: 'nginx'
- path: '/var/log/nginx/access.log'
-
-# gentoo
-nginx_log2:
- name: 'nginx'
- path: '/var/log/nginx/localhost.access_log'
-
-
-# -------------------------------------------
-# apache log on various distros
-
-# debian
-apache_log:
- name: 'apache'
- path: '/var/log/apache2/access.log'
-
-# gentoo
-apache_log2:
- name: 'apache'
- path: '/var/log/apache2/access_log'
-
-# arch
-apache_log3:
- name: 'apache'
- path: '/var/log/httpd/access_log'
-
-# debian
-apache_vhosts_log:
- name: 'apache_vhosts'
- path: '/var/log/apache2/other_vhosts_access.log'
-
-
-# -------------------------------------------
-# gunicorn log on various distros
-
-gunicorn_log:
- name: 'gunicorn'
- path: '/var/log/gunicorn/access.log'
-
-gunicorn_log2:
- name: 'gunicorn'
- path: '/var/log/gunicorn/gunicorn-access.log'
-
-# -------------------------------------------
-# Apache Cache
-apache_cache:
- name: 'apache_cache'
- type: 'apache_cache'
- path: '/var/log/apache/cache.log'
-
-apache2_cache:
- name: 'apache_cache'
- type: 'apache_cache'
- path: '/var/log/apache2/cache.log'
-
-httpd_cache:
- name: 'apache_cache'
- type: 'apache_cache'
- path: '/var/log/httpd/cache.log'
-
-# -------------------------------------------
-# Squid
-
-# debian/ubuntu
-squid_log1:
- name: 'squid'
- type: 'squid'
- path: '/var/log/squid3/access.log'
-
-#gentoo
-squid_log2:
- name: 'squid'
- type: 'squid'
- path: '/var/log/squid/access.log'