summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-02-07 11:49:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-02-07 12:42:05 +0000
commit2e85f9325a797977eea9dfea0a925775ddd211d9 (patch)
tree452c7f30d62fca5755f659b99e4e53c7b03afc21 /collectors/python.d.plugin
parentReleasing debian version 1.19.0-4. (diff)
downloadnetdata-2e85f9325a797977eea9dfea0a925775ddd211d9.tar.xz
netdata-2e85f9325a797977eea9dfea0a925775ddd211d9.zip
Merging upstream version 1.29.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors/python.d.plugin')
-rw-r--r--collectors/python.d.plugin/.keep0
-rw-r--r--collectors/python.d.plugin/Makefile.am6
-rw-r--r--collectors/python.d.plugin/Makefile.in2092
-rw-r--r--collectors/python.d.plugin/README.md36
-rw-r--r--collectors/python.d.plugin/adaptec_raid/README.md48
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py2
-rw-r--r--collectors/python.d.plugin/alarms/Makefile.inc (renamed from collectors/python.d.plugin/unbound/Makefile.inc)6
-rw-r--r--collectors/python.d.plugin/alarms/README.md58
-rw-r--r--collectors/python.d.plugin/alarms/alarms.chart.py71
-rw-r--r--collectors/python.d.plugin/alarms/alarms.conf50
-rw-r--r--collectors/python.d.plugin/am2320/README.md31
-rw-r--r--collectors/python.d.plugin/am2320/am2320.chart.py9
-rw-r--r--collectors/python.d.plugin/am2320/am2320.conf2
-rw-r--r--collectors/python.d.plugin/anomalies/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/anomalies/README.md231
-rw-r--r--collectors/python.d.plugin/anomalies/anomalies.chart.py349
-rw-r--r--collectors/python.d.plugin/anomalies/anomalies.conf181
-rw-r--r--collectors/python.d.plugin/apache/README.md24
-rw-r--r--collectors/python.d.plugin/apache/apache.chart.py3
-rw-r--r--collectors/python.d.plugin/beanstalk/README.md22
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.chart.py4
-rw-r--r--collectors/python.d.plugin/bind_rndc/README.md22
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py4
-rw-r--r--collectors/python.d.plugin/boinc/README.md25
-rw-r--r--collectors/python.d.plugin/boinc/boinc.chart.py2
-rw-r--r--collectors/python.d.plugin/ceph/README.md20
-rw-r--r--collectors/python.d.plugin/ceph/ceph.chart.py56
-rw-r--r--collectors/python.d.plugin/ceph/ceph.conf4
-rw-r--r--collectors/python.d.plugin/chrony/README.md37
-rw-r--r--collectors/python.d.plugin/couchdb/README.md18
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.chart.py24
-rw-r--r--collectors/python.d.plugin/dns_query_time/README.md20
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py2
-rw-r--r--collectors/python.d.plugin/dnsdist/README.md18
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.chart.py4
-rw-r--r--collectors/python.d.plugin/dockerd/README.md20
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.chart.py5
-rw-r--r--collectors/python.d.plugin/dovecot/README.md24
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.chart.py5
-rw-r--r--collectors/python.d.plugin/elasticsearch/README.md22
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py8
-rw-r--r--collectors/python.d.plugin/energid/README.md22
-rw-r--r--collectors/python.d.plugin/energid/energid.chart.py19
-rw-r--r--collectors/python.d.plugin/example/README.md7
-rw-r--r--collectors/python.d.plugin/example/example.chart.py1
-rw-r--r--collectors/python.d.plugin/exim/README.md27
-rw-r--r--collectors/python.d.plugin/exim/exim.chart.py1
-rw-r--r--collectors/python.d.plugin/fail2ban/README.md22
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.chart.py20
-rw-r--r--collectors/python.d.plugin/freeradius/README.md18
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.chart.py2
-rw-r--r--collectors/python.d.plugin/gearman/README.md24
-rw-r--r--collectors/python.d.plugin/gearman/gearman.chart.py26
-rw-r--r--collectors/python.d.plugin/go_expvar/README.md58
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.chart.py3
-rw-r--r--collectors/python.d.plugin/haproxy/README.md20
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py7
-rw-r--r--collectors/python.d.plugin/hddtemp/README.md20
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.chart.py4
-rw-r--r--collectors/python.d.plugin/hpssa/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/hpssa/README.md61
-rw-r--r--collectors/python.d.plugin/hpssa/hpssa.chart.py395
-rw-r--r--collectors/python.d.plugin/hpssa/hpssa.conf (renamed from collectors/python.d.plugin/unbound/unbound.conf)42
-rw-r--r--collectors/python.d.plugin/httpcheck/README.md22
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.chart.py3
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.conf3
-rw-r--r--collectors/python.d.plugin/icecast/README.md22
-rw-r--r--collectors/python.d.plugin/icecast/icecast.chart.py1
-rw-r--r--collectors/python.d.plugin/ipfs/README.md47
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.chart.py37
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.conf11
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/README.md32
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py66
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf7
-rw-r--r--collectors/python.d.plugin/litespeed/README.md20
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.chart.py24
-rw-r--r--collectors/python.d.plugin/logind/README.md37
-rw-r--r--collectors/python.d.plugin/megacli/README.md47
-rw-r--r--collectors/python.d.plugin/megacli/megacli.chart.py5
-rw-r--r--collectors/python.d.plugin/memcached/README.md21
-rw-r--r--collectors/python.d.plugin/memcached/memcached.chart.py1
-rw-r--r--collectors/python.d.plugin/mongodb/README.md24
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.chart.py2
-rw-r--r--collectors/python.d.plugin/monit/README.md18
-rw-r--r--collectors/python.d.plugin/monit/monit.chart.py28
-rw-r--r--collectors/python.d.plugin/mysql/README.md32
-rw-r--r--collectors/python.d.plugin/mysql/mysql.chart.py3
-rw-r--r--collectors/python.d.plugin/nginx/README.md24
-rw-r--r--collectors/python.d.plugin/nginx/nginx.chart.py1
-rw-r--r--collectors/python.d.plugin/nginx_plus/README.md25
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py1
-rw-r--r--collectors/python.d.plugin/nsd/README.md12
-rw-r--r--collectors/python.d.plugin/nsd/nsd.chart.py1
-rw-r--r--collectors/python.d.plugin/ntpd/README.md22
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.chart.py3
-rw-r--r--collectors/python.d.plugin/nvidia_smi/README.md58
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py175
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf4
-rw-r--r--collectors/python.d.plugin/openldap/README.md20
-rw-r--r--collectors/python.d.plugin/openldap/openldap.chart.py25
-rw-r--r--collectors/python.d.plugin/openldap/openldap.conf15
-rw-r--r--collectors/python.d.plugin/oracledb/README.md31
-rw-r--r--collectors/python.d.plugin/oracledb/oracledb.chart.py121
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/README.md22
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py3
-rw-r--r--collectors/python.d.plugin/phpfpm/README.md54
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.chart.py10
-rw-r--r--collectors/python.d.plugin/portcheck/README.md20
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.chart.py1
-rw-r--r--collectors/python.d.plugin/postfix/README.md12
-rw-r--r--collectors/python.d.plugin/postgres/README.md49
-rw-r--r--collectors/python.d.plugin/postgres/postgres.chart.py105
-rw-r--r--collectors/python.d.plugin/postgres/postgres.conf9
-rw-r--r--collectors/python.d.plugin/powerdns/README.md20
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.chart.py1
-rw-r--r--collectors/python.d.plugin/proxysql/README.md33
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.chart.py5
-rw-r--r--collectors/python.d.plugin/puppet/README.md18
-rw-r--r--collectors/python.d.plugin/puppet/puppet.chart.py8
-rw-r--r--collectors/python.d.plugin/python.d.conf3
-rw-r--r--collectors/python.d.plugin/python.d.plugin784
-rw-r--r--collectors/python.d.plugin/python.d.plugin.in86
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py8
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py6
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py9
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py2
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py29
-rw-r--r--collectors/python.d.plugin/python_modules/bases/charts.py17
-rw-r--r--collectors/python.d.plugin/python_modules/bases/collection.py37
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loggers.py14
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/filelock.py451
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/monotonic.py42
-rw-r--r--collectors/python.d.plugin/rabbitmq/README.md46
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py152
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.conf6
-rw-r--r--collectors/python.d.plugin/redis/README.md20
-rw-r--r--collectors/python.d.plugin/redis/redis.chart.py46
-rw-r--r--collectors/python.d.plugin/rethinkdbs/README.md20
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py1
-rw-r--r--collectors/python.d.plugin/retroshare/README.md46
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.chart.py1
-rw-r--r--collectors/python.d.plugin/riakkv/README.md24
-rw-r--r--collectors/python.d.plugin/riakkv/riakkv.chart.py55
-rw-r--r--collectors/python.d.plugin/samba/README.md26
-rw-r--r--collectors/python.d.plugin/samba/samba.chart.py33
-rw-r--r--collectors/python.d.plugin/sensors/README.md20
-rw-r--r--collectors/python.d.plugin/sensors/sensors.chart.py6
-rw-r--r--collectors/python.d.plugin/smartd_log/README.md22
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.chart.py19
-rw-r--r--collectors/python.d.plugin/spigotmc/README.md20
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.chart.py17
-rw-r--r--collectors/python.d.plugin/springboot/README.md24
-rw-r--r--collectors/python.d.plugin/springboot/springboot.chart.py6
-rw-r--r--collectors/python.d.plugin/springboot/springboot.conf8
-rw-r--r--collectors/python.d.plugin/squid/README.md20
-rw-r--r--collectors/python.d.plugin/squid/squid.chart.py1
-rw-r--r--collectors/python.d.plugin/tomcat/README.md20
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.chart.py2
-rw-r--r--collectors/python.d.plugin/tor/README.md24
-rw-r--r--collectors/python.d.plugin/tor/tor.chart.py3
-rw-r--r--collectors/python.d.plugin/traefik/README.md20
-rw-r--r--collectors/python.d.plugin/traefik/traefik.chart.py2
-rw-r--r--collectors/python.d.plugin/unbound/README.md114
-rw-r--r--collectors/python.d.plugin/unbound/unbound.chart.py318
-rw-r--r--collectors/python.d.plugin/uwsgi/README.md21
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.chart.py2
-rw-r--r--collectors/python.d.plugin/varnish/README.md102
-rw-r--r--collectors/python.d.plugin/varnish/varnish.chart.py145
-rw-r--r--collectors/python.d.plugin/w1sensor/README.md21
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.chart.py2
-rw-r--r--collectors/python.d.plugin/web_log/README.md36
-rw-r--r--collectors/python.d.plugin/web_log/web_log.chart.py13
172 files changed, 4624 insertions, 4159 deletions
diff --git a/collectors/python.d.plugin/.keep b/collectors/python.d.plugin/.keep
deleted file mode 100644
index e69de29bb..000000000
--- a/collectors/python.d.plugin/.keep
+++ /dev/null
diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am
index cb14e3500..1de2d1d54 100644
--- a/collectors/python.d.plugin/Makefile.am
+++ b/collectors/python.d.plugin/Makefile.am
@@ -29,7 +29,6 @@ dist_python_DATA = \
userpythonconfigdir=$(configdir)/python.d
dist_userpythonconfig_DATA = \
- .keep \
$(NULL)
# Explicitly install directories to avoid permission issues due to umask
@@ -41,7 +40,9 @@ dist_pythonconfig_DATA = \
$(NULL)
include adaptec_raid/Makefile.inc
+include alarms/Makefile.inc
include am2320/Makefile.inc
+include anomalies/Makefile.inc
include apache/Makefile.inc
include beanstalk/Makefile.inc
include bind_rndc/Makefile.inc
@@ -64,6 +65,7 @@ include go_expvar/Makefile.inc
include haproxy/Makefile.inc
include hddtemp/Makefile.inc
include httpcheck/Makefile.inc
+include hpssa/Makefile.inc
include icecast/Makefile.inc
include ipfs/Makefile.inc
include isc_dhcpd/Makefile.inc
@@ -103,7 +105,6 @@ include squid/Makefile.inc
include tomcat/Makefile.inc
include tor/Makefile.inc
include traefik/Makefile.inc
-include unbound/Makefile.inc
include uwsgi/Makefile.inc
include varnish/Makefile.inc
include w1sensor/Makefile.inc
@@ -142,6 +143,7 @@ dist_third_party_DATA = \
python_modules/third_party/mcrcon.py \
python_modules/third_party/boinc_client.py \
python_modules/third_party/monotonic.py \
+ python_modules/third_party/filelock.py \
$(NULL)
pythonyaml2dir=$(pythonmodulesdir)/pyyaml2
diff --git a/collectors/python.d.plugin/Makefile.in b/collectors/python.d.plugin/Makefile.in
deleted file mode 100644
index 36a4f0ddb..000000000
--- a/collectors/python.d.plugin/Makefile.in
+++ /dev/null
@@ -1,2092 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/python.d.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
- $(dist_python_SCRIPTS) $(dist_bases_DATA) \
- $(dist_bases_framework_services_DATA) $(dist_libconfig_DATA) \
- $(dist_noinst_DATA) $(dist_python_DATA) \
- $(dist_python_urllib3_DATA) \
- $(dist_python_urllib3_backports_DATA) \
- $(dist_python_urllib3_contrib_DATA) \
- $(dist_python_urllib3_packages_DATA) \
- $(dist_python_urllib3_securetransport_DATA) \
- $(dist_python_urllib3_ssl_match_hostname_DATA) \
- $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
- $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
- $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
- $(dist_userpythonconfig_DATA) $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" \
- "$(DESTDIR)$(basesdir)" \
- "$(DESTDIR)$(bases_framework_servicesdir)" \
- "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" \
- "$(DESTDIR)$(python_urllib3dir)" \
- "$(DESTDIR)$(python_urllib3_backportsdir)" \
- "$(DESTDIR)$(python_urllib3_contribdir)" \
- "$(DESTDIR)$(python_urllib3_packagesdir)" \
- "$(DESTDIR)$(python_urllib3_securetransportdir)" \
- "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" \
- "$(DESTDIR)$(python_urllib3_utildir)" \
- "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" \
- "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" \
- "$(DESTDIR)$(third_partydir)" \
- "$(DESTDIR)$(userpythonconfigdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS) $(dist_python_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \
- $(dist_libconfig_DATA) $(dist_noinst_DATA) $(dist_python_DATA) \
- $(dist_python_urllib3_DATA) \
- $(dist_python_urllib3_backports_DATA) \
- $(dist_python_urllib3_contrib_DATA) \
- $(dist_python_urllib3_packages_DATA) \
- $(dist_python_urllib3_securetransport_DATA) \
- $(dist_python_urllib3_ssl_match_hostname_DATA) \
- $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
- $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
- $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
- $(dist_userpythonconfig_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in \
- $(srcdir)/adaptec_raid/Makefile.inc \
- $(srcdir)/am2320/Makefile.inc $(srcdir)/apache/Makefile.inc \
- $(srcdir)/beanstalk/Makefile.inc \
- $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc \
- $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc \
- $(srcdir)/couchdb/Makefile.inc \
- $(srcdir)/dns_query_time/Makefile.inc \
- $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dockerd/Makefile.inc \
- $(srcdir)/dovecot/Makefile.inc \
- $(srcdir)/elasticsearch/Makefile.inc \
- $(srcdir)/energid/Makefile.inc $(srcdir)/example/Makefile.inc \
- $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc \
- $(srcdir)/freeradius/Makefile.inc \
- $(srcdir)/gearman/Makefile.inc \
- $(srcdir)/go_expvar/Makefile.inc \
- $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc \
- $(srcdir)/httpcheck/Makefile.inc \
- $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc \
- $(srcdir)/isc_dhcpd/Makefile.inc \
- $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc \
- $(srcdir)/megacli/Makefile.inc \
- $(srcdir)/memcached/Makefile.inc \
- $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc \
- $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc \
- $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nsd/Makefile.inc \
- $(srcdir)/ntpd/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc \
- $(srcdir)/openldap/Makefile.inc \
- $(srcdir)/oracledb/Makefile.inc \
- $(srcdir)/ovpn_status_log/Makefile.inc \
- $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc \
- $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc \
- $(srcdir)/powerdns/Makefile.inc \
- $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc \
- $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc \
- $(srcdir)/rethinkdbs/Makefile.inc \
- $(srcdir)/retroshare/Makefile.inc \
- $(srcdir)/riakkv/Makefile.inc $(srcdir)/samba/Makefile.inc \
- $(srcdir)/sensors/Makefile.inc \
- $(srcdir)/smartd_log/Makefile.inc \
- $(srcdir)/spigotmc/Makefile.inc \
- $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc \
- $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc \
- $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc \
- $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc \
- $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc \
- $(top_srcdir)/build/subst.inc
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- python.d.plugin \
- $(NULL)
-
-SUFFIXES = .in
-dist_libconfig_DATA = \
- python.d.conf \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- python.d.plugin \
- $(NULL)
-
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA = python.d.plugin.in README.md $(NULL) \
- adaptec_raid/README.md adaptec_raid/Makefile.inc \
- am2320/README.md am2320/Makefile.inc apache/README.md \
- apache/Makefile.inc beanstalk/README.md beanstalk/Makefile.inc \
- bind_rndc/README.md bind_rndc/Makefile.inc boinc/README.md \
- boinc/Makefile.inc ceph/README.md ceph/Makefile.inc \
- chrony/README.md chrony/Makefile.inc couchdb/README.md \
- couchdb/Makefile.inc dnsdist/README.md dnsdist/Makefile.inc \
- dns_query_time/README.md dns_query_time/Makefile.inc \
- dockerd/README.md dockerd/Makefile.inc dovecot/README.md \
- dovecot/Makefile.inc elasticsearch/README.md \
- elasticsearch/Makefile.inc energid/README.md \
- energid/Makefile.inc example/README.md example/Makefile.inc \
- exim/README.md exim/Makefile.inc fail2ban/README.md \
- fail2ban/Makefile.inc freeradius/README.md \
- freeradius/Makefile.inc gearman/README.md gearman/Makefile.inc \
- go_expvar/README.md go_expvar/Makefile.inc haproxy/README.md \
- haproxy/Makefile.inc hddtemp/README.md hddtemp/Makefile.inc \
- httpcheck/README.md httpcheck/Makefile.inc icecast/README.md \
- icecast/Makefile.inc ipfs/README.md ipfs/Makefile.inc \
- isc_dhcpd/README.md isc_dhcpd/Makefile.inc litespeed/README.md \
- litespeed/Makefile.inc logind/README.md logind/Makefile.inc \
- megacli/README.md megacli/Makefile.inc memcached/README.md \
- memcached/Makefile.inc mongodb/README.md mongodb/Makefile.inc \
- monit/README.md monit/Makefile.inc mysql/README.md \
- mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \
- nginx_plus/README.md nginx_plus/Makefile.inc \
- nvidia_smi/README.md nvidia_smi/Makefile.inc nsd/README.md \
- nsd/Makefile.inc ntpd/README.md ntpd/Makefile.inc \
- ovpn_status_log/README.md ovpn_status_log/Makefile.inc \
- openldap/README.md openldap/Makefile.inc oracledb/README.md \
- oracledb/Makefile.inc phpfpm/README.md phpfpm/Makefile.inc \
- portcheck/README.md portcheck/Makefile.inc postfix/README.md \
- postfix/Makefile.inc postgres/README.md postgres/Makefile.inc \
- powerdns/README.md powerdns/Makefile.inc proxysql/README.md \
- proxysql/Makefile.inc puppet/README.md puppet/Makefile.inc \
- rabbitmq/README.md rabbitmq/Makefile.inc redis/README.md \
- redis/Makefile.inc rethinkdbs/README.md \
- rethinkdbs/Makefile.inc retroshare/README.md \
- retroshare/Makefile.inc riakkv/README.md riakkv/Makefile.inc \
- samba/README.md samba/Makefile.inc sensors/README.md \
- sensors/Makefile.inc smartd_log/README.md \
- smartd_log/Makefile.inc spigotmc/README.md \
- spigotmc/Makefile.inc springboot/README.md \
- springboot/Makefile.inc squid/README.md squid/Makefile.inc \
- tomcat/README.md tomcat/Makefile.inc tor/README.md \
- tor/Makefile.inc traefik/README.md traefik/Makefile.inc \
- unbound/README.md unbound/Makefile.inc uwsgi/README.md \
- uwsgi/Makefile.inc varnish/README.md varnish/Makefile.inc \
- w1sensor/README.md w1sensor/Makefile.inc web_log/README.md \
- web_log/Makefile.inc
-dist_python_SCRIPTS = \
- $(NULL)
-
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-dist_python_DATA = $(NULL) adaptec_raid/adaptec_raid.chart.py \
- am2320/am2320.chart.py apache/apache.chart.py \
- beanstalk/beanstalk.chart.py bind_rndc/bind_rndc.chart.py \
- boinc/boinc.chart.py ceph/ceph.chart.py chrony/chrony.chart.py \
- couchdb/couchdb.chart.py dnsdist/dnsdist.chart.py \
- dns_query_time/dns_query_time.chart.py \
- dockerd/dockerd.chart.py dovecot/dovecot.chart.py \
- elasticsearch/elasticsearch.chart.py energid/energid.chart.py \
- example/example.chart.py exim/exim.chart.py \
- fail2ban/fail2ban.chart.py freeradius/freeradius.chart.py \
- gearman/gearman.chart.py go_expvar/go_expvar.chart.py \
- haproxy/haproxy.chart.py hddtemp/hddtemp.chart.py \
- httpcheck/httpcheck.chart.py icecast/icecast.chart.py \
- ipfs/ipfs.chart.py isc_dhcpd/isc_dhcpd.chart.py \
- litespeed/litespeed.chart.py logind/logind.chart.py \
- megacli/megacli.chart.py memcached/memcached.chart.py \
- mongodb/mongodb.chart.py monit/monit.chart.py \
- mysql/mysql.chart.py nginx/nginx.chart.py \
- nginx_plus/nginx_plus.chart.py nvidia_smi/nvidia_smi.chart.py \
- nsd/nsd.chart.py ntpd/ntpd.chart.py \
- ovpn_status_log/ovpn_status_log.chart.py \
- openldap/openldap.chart.py oracledb/oracledb.chart.py \
- phpfpm/phpfpm.chart.py portcheck/portcheck.chart.py \
- postfix/postfix.chart.py postgres/postgres.chart.py \
- powerdns/powerdns.chart.py proxysql/proxysql.chart.py \
- puppet/puppet.chart.py rabbitmq/rabbitmq.chart.py \
- redis/redis.chart.py rethinkdbs/rethinkdbs.chart.py \
- retroshare/retroshare.chart.py riakkv/riakkv.chart.py \
- samba/samba.chart.py sensors/sensors.chart.py \
- smartd_log/smartd_log.chart.py spigotmc/spigotmc.chart.py \
- springboot/springboot.chart.py squid/squid.chart.py \
- tomcat/tomcat.chart.py tor/tor.chart.py \
- traefik/traefik.chart.py unbound/unbound.chart.py \
- uwsgi/uwsgi.chart.py varnish/varnish.chart.py \
- w1sensor/w1sensor.chart.py web_log/web_log.chart.py
-userpythonconfigdir = $(configdir)/python.d
-dist_userpythonconfig_DATA = \
- .keep \
- $(NULL)
-
-pythonconfigdir = $(libconfigdir)/python.d
-dist_pythonconfig_DATA = $(NULL) adaptec_raid/adaptec_raid.conf \
- am2320/am2320.conf apache/apache.conf beanstalk/beanstalk.conf \
- bind_rndc/bind_rndc.conf boinc/boinc.conf ceph/ceph.conf \
- chrony/chrony.conf couchdb/couchdb.conf dnsdist/dnsdist.conf \
- dns_query_time/dns_query_time.conf dockerd/dockerd.conf \
- dovecot/dovecot.conf elasticsearch/elasticsearch.conf \
- energid/energid.conf example/example.conf exim/exim.conf \
- fail2ban/fail2ban.conf freeradius/freeradius.conf \
- gearman/gearman.conf go_expvar/go_expvar.conf \
- haproxy/haproxy.conf hddtemp/hddtemp.conf \
- httpcheck/httpcheck.conf icecast/icecast.conf ipfs/ipfs.conf \
- isc_dhcpd/isc_dhcpd.conf litespeed/litespeed.conf \
- logind/logind.conf megacli/megacli.conf \
- memcached/memcached.conf mongodb/mongodb.conf monit/monit.conf \
- mysql/mysql.conf nginx/nginx.conf nginx_plus/nginx_plus.conf \
- nvidia_smi/nvidia_smi.conf nsd/nsd.conf ntpd/ntpd.conf \
- ovpn_status_log/ovpn_status_log.conf openldap/openldap.conf \
- oracledb/oracledb.conf phpfpm/phpfpm.conf \
- portcheck/portcheck.conf postfix/postfix.conf \
- postgres/postgres.conf powerdns/powerdns.conf \
- proxysql/proxysql.conf puppet/puppet.conf \
- rabbitmq/rabbitmq.conf redis/redis.conf \
- rethinkdbs/rethinkdbs.conf retroshare/retroshare.conf \
- riakkv/riakkv.conf samba/samba.conf sensors/sensors.conf \
- smartd_log/smartd_log.conf spigotmc/spigotmc.conf \
- springboot/springboot.conf squid/squid.conf tomcat/tomcat.conf \
- tor/tor.conf traefik/traefik.conf unbound/unbound.conf \
- uwsgi/uwsgi.conf varnish/varnish.conf w1sensor/w1sensor.conf \
- web_log/web_log.conf
-pythonmodulesdir = $(pythondir)/python_modules
-dist_pythonmodules_DATA = \
- python_modules/__init__.py \
- $(NULL)
-
-basesdir = $(pythonmodulesdir)/bases
-dist_bases_DATA = \
- python_modules/bases/__init__.py \
- python_modules/bases/charts.py \
- python_modules/bases/collection.py \
- python_modules/bases/loaders.py \
- python_modules/bases/loggers.py \
- $(NULL)
-
-bases_framework_servicesdir = $(basesdir)/FrameworkServices
-dist_bases_framework_services_DATA = \
- python_modules/bases/FrameworkServices/__init__.py \
- python_modules/bases/FrameworkServices/ExecutableService.py \
- python_modules/bases/FrameworkServices/LogService.py \
- python_modules/bases/FrameworkServices/MySQLService.py \
- python_modules/bases/FrameworkServices/SimpleService.py \
- python_modules/bases/FrameworkServices/SocketService.py \
- python_modules/bases/FrameworkServices/UrlService.py \
- $(NULL)
-
-third_partydir = $(pythonmodulesdir)/third_party
-dist_third_party_DATA = \
- python_modules/third_party/__init__.py \
- python_modules/third_party/ordereddict.py \
- python_modules/third_party/lm_sensors.py \
- python_modules/third_party/mcrcon.py \
- python_modules/third_party/boinc_client.py \
- python_modules/third_party/monotonic.py \
- $(NULL)
-
-pythonyaml2dir = $(pythonmodulesdir)/pyyaml2
-dist_pythonyaml2_DATA = \
- python_modules/pyyaml2/__init__.py \
- python_modules/pyyaml2/composer.py \
- python_modules/pyyaml2/constructor.py \
- python_modules/pyyaml2/cyaml.py \
- python_modules/pyyaml2/dumper.py \
- python_modules/pyyaml2/emitter.py \
- python_modules/pyyaml2/error.py \
- python_modules/pyyaml2/events.py \
- python_modules/pyyaml2/loader.py \
- python_modules/pyyaml2/nodes.py \
- python_modules/pyyaml2/parser.py \
- python_modules/pyyaml2/reader.py \
- python_modules/pyyaml2/representer.py \
- python_modules/pyyaml2/resolver.py \
- python_modules/pyyaml2/scanner.py \
- python_modules/pyyaml2/serializer.py \
- python_modules/pyyaml2/tokens.py \
- $(NULL)
-
-pythonyaml3dir = $(pythonmodulesdir)/pyyaml3
-dist_pythonyaml3_DATA = \
- python_modules/pyyaml3/__init__.py \
- python_modules/pyyaml3/composer.py \
- python_modules/pyyaml3/constructor.py \
- python_modules/pyyaml3/cyaml.py \
- python_modules/pyyaml3/dumper.py \
- python_modules/pyyaml3/emitter.py \
- python_modules/pyyaml3/error.py \
- python_modules/pyyaml3/events.py \
- python_modules/pyyaml3/loader.py \
- python_modules/pyyaml3/nodes.py \
- python_modules/pyyaml3/parser.py \
- python_modules/pyyaml3/reader.py \
- python_modules/pyyaml3/representer.py \
- python_modules/pyyaml3/resolver.py \
- python_modules/pyyaml3/scanner.py \
- python_modules/pyyaml3/serializer.py \
- python_modules/pyyaml3/tokens.py \
- $(NULL)
-
-python_urllib3dir = $(pythonmodulesdir)/urllib3
-dist_python_urllib3_DATA = \
- python_modules/urllib3/__init__.py \
- python_modules/urllib3/_collections.py \
- python_modules/urllib3/connection.py \
- python_modules/urllib3/connectionpool.py \
- python_modules/urllib3/exceptions.py \
- python_modules/urllib3/fields.py \
- python_modules/urllib3/filepost.py \
- python_modules/urllib3/response.py \
- python_modules/urllib3/poolmanager.py \
- python_modules/urllib3/request.py \
- $(NULL)
-
-python_urllib3_utildir = $(python_urllib3dir)/util
-dist_python_urllib3_util_DATA = \
- python_modules/urllib3/util/__init__.py \
- python_modules/urllib3/util/connection.py \
- python_modules/urllib3/util/request.py \
- python_modules/urllib3/util/response.py \
- python_modules/urllib3/util/retry.py \
- python_modules/urllib3/util/selectors.py \
- python_modules/urllib3/util/ssl_.py \
- python_modules/urllib3/util/timeout.py \
- python_modules/urllib3/util/url.py \
- python_modules/urllib3/util/wait.py \
- $(NULL)
-
-python_urllib3_packagesdir = $(python_urllib3dir)/packages
-dist_python_urllib3_packages_DATA = \
- python_modules/urllib3/packages/__init__.py \
- python_modules/urllib3/packages/ordered_dict.py \
- python_modules/urllib3/packages/six.py \
- $(NULL)
-
-python_urllib3_backportsdir = $(python_urllib3_packagesdir)/backports
-dist_python_urllib3_backports_DATA = \
- python_modules/urllib3/packages/backports/__init__.py \
- python_modules/urllib3/packages/backports/makefile.py \
- $(NULL)
-
-python_urllib3_ssl_match_hostnamedir = $(python_urllib3_packagesdir)/ssl_match_hostname
-dist_python_urllib3_ssl_match_hostname_DATA = \
- python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
- python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
- $(NULL)
-
-python_urllib3_contribdir = $(python_urllib3dir)/contrib
-dist_python_urllib3_contrib_DATA = \
- python_modules/urllib3/contrib/__init__.py \
- python_modules/urllib3/contrib/appengine.py \
- python_modules/urllib3/contrib/ntlmpool.py \
- python_modules/urllib3/contrib/pyopenssl.py \
- python_modules/urllib3/contrib/securetransport.py \
- python_modules/urllib3/contrib/socks.py \
- $(NULL)
-
-python_urllib3_securetransportdir = $(python_urllib3_contribdir)/_securetransport
-dist_python_urllib3_securetransport_DATA = \
- python_modules/urllib3/contrib/_securetransport/__init__.py \
- python_modules/urllib3/contrib/_securetransport/bindings.py \
- python_modules/urllib3/contrib/_securetransport/low_level.py \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/am2320/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/energid/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/gearman/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/openldap/Makefile.inc $(srcdir)/oracledb/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/riakkv/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/am2320/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/energid/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/gearman/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/openldap/Makefile.inc $(srcdir)/oracledb/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/riakkv/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc $(am__empty):
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pythondir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pythondir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pythonSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
-install-dist_basesDATA: $(dist_bases_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(basesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(basesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(basesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(basesdir)" || exit $$?; \
- done
-
-uninstall-dist_basesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(basesdir)'; $(am__uninstall_files_from_dir)
-install-dist_bases_framework_servicesDATA: $(dist_bases_framework_services_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(bases_framework_servicesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(bases_framework_servicesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bases_framework_servicesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(bases_framework_servicesdir)" || exit $$?; \
- done
-
-uninstall-dist_bases_framework_servicesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(bases_framework_servicesdir)'; $(am__uninstall_files_from_dir)
-install-dist_libconfigDATA: $(dist_libconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_libconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonDATA: $(dist_python_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythondir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythondir)" || exit $$?; \
- done
-
-uninstall-dist_pythonDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3DATA: $(dist_python_urllib3_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3dir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3dir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_backportsDATA: $(dist_python_urllib3_backports_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_backportsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_backportsdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_backportsDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_backportsdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_contribDATA: $(dist_python_urllib3_contrib_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_contribdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_contribdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_contribdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_contribdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_contribDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_contribdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_packagesDATA: $(dist_python_urllib3_packages_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_packagesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_packagesdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_packagesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_packagesdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_securetransportDATA: $(dist_python_urllib3_securetransport_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_securetransportDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_securetransportdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_ssl_match_hostnameDATA: $(dist_python_urllib3_ssl_match_hostname_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_ssl_match_hostnameDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_utilDATA: $(dist_python_urllib3_util_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_utildir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_utildir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_utildir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_utildir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_utilDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_utildir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonconfigDATA: $(dist_pythonconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_pythonconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonmodulesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonmodulesdir)" || exit $$?; \
- done
-
-uninstall-dist_pythonmodulesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml2dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml2dir)" || exit $$?; \
- done
-
-uninstall-dist_pythonyaml2DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml3dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml3dir)" || exit $$?; \
- done
-
-uninstall-dist_pythonyaml3DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir)
-install-dist_third_partyDATA: $(dist_third_party_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(third_partydir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(third_partydir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(third_partydir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(third_partydir)" || exit $$?; \
- done
-
-uninstall-dist_third_partyDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(third_partydir)'; $(am__uninstall_files_from_dir)
-install-dist_userpythonconfigDATA: $(dist_userpythonconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(userpythonconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(userpythonconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userpythonconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(userpythonconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_userpythonconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(userpythonconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" "$(DESTDIR)$(bases_framework_servicesdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" "$(DESTDIR)$(python_urllib3_backportsdir)" "$(DESTDIR)$(python_urllib3_contribdir)" "$(DESTDIR)$(python_urllib3_packagesdir)" "$(DESTDIR)$(python_urllib3_securetransportdir)" "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" "$(DESTDIR)$(python_urllib3_utildir)" "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)" "$(DESTDIR)$(userpythonconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_basesDATA \
- install-dist_bases_framework_servicesDATA \
- install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_python_urllib3DATA \
- install-dist_python_urllib3_backportsDATA \
- install-dist_python_urllib3_contribDATA \
- install-dist_python_urllib3_packagesDATA \
- install-dist_python_urllib3_securetransportDATA \
- install-dist_python_urllib3_ssl_match_hostnameDATA \
- install-dist_python_urllib3_utilDATA \
- install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
- install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
- install-dist_third_partyDATA install-dist_userpythonconfigDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am: install-exec-local
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_basesDATA \
- uninstall-dist_bases_framework_servicesDATA \
- uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_python_urllib3DATA \
- uninstall-dist_python_urllib3_backportsDATA \
- uninstall-dist_python_urllib3_contribDATA \
- uninstall-dist_python_urllib3_packagesDATA \
- uninstall-dist_python_urllib3_securetransportDATA \
- uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
- uninstall-dist_python_urllib3_utilDATA \
- uninstall-dist_pythonconfigDATA \
- uninstall-dist_pythonmodulesDATA \
- uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
- uninstall-dist_third_partyDATA \
- uninstall-dist_userpythonconfigDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_basesDATA \
- install-dist_bases_framework_servicesDATA \
- install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_python_urllib3DATA \
- install-dist_python_urllib3_backportsDATA \
- install-dist_python_urllib3_contribDATA \
- install-dist_python_urllib3_packagesDATA \
- install-dist_python_urllib3_securetransportDATA \
- install-dist_python_urllib3_ssl_match_hostnameDATA \
- install-dist_python_urllib3_utilDATA \
- install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
- install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
- install-dist_third_partyDATA install-dist_userpythonconfigDATA \
- install-dvi install-dvi-am install-exec install-exec-am \
- install-exec-local install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am \
- uninstall-dist_basesDATA \
- uninstall-dist_bases_framework_servicesDATA \
- uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_python_urllib3DATA \
- uninstall-dist_python_urllib3_backportsDATA \
- uninstall-dist_python_urllib3_contribDATA \
- uninstall-dist_python_urllib3_packagesDATA \
- uninstall-dist_python_urllib3_securetransportDATA \
- uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
- uninstall-dist_python_urllib3_utilDATA \
- uninstall-dist_pythonconfigDATA \
- uninstall-dist_pythonmodulesDATA \
- uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
- uninstall-dist_third_partyDATA \
- uninstall-dist_userpythonconfigDATA
-
-.PRECIOUS: Makefile
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- -e 's#[@]registrydir_POST@#$(registrydir)#g' \
- -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Explicitly install directories to avoid permission issues due to umask
-install-exec-local:
- $(INSTALL) -d $(DESTDIR)$(userpythonconfigdir)
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
index f38ab6783..a05bc81dd 100644
--- a/collectors/python.d.plugin/README.md
+++ b/collectors/python.d.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "python.d.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/README.md
+-->
+
# python.d.plugin
`python.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `python`.
@@ -62,6 +67,8 @@ Depending on where Netdata was installed, execute one of the following commands
Where `[module]` is the directory name under <https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin>
+**Note**: If you would like execute a collector in debug mode while it is still running by Netdata, you can pass the `nolock` CLI option to the above commands.
+
## How to write a new module
Writing new python module is simple. You just need to remember to include 5 major things:
@@ -74,7 +81,27 @@ Writing new python module is simple. You just need to remember to include 5 majo
If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](#pull-request-checklist-for-python-plugins) beforehand to make sure you have updated all the files you need to.
-For a quick start, you can look at the [example plugin](example/example.chart.py).
+For a quick start, you can look at the [example
+plugin](https://raw.githubusercontent.com/netdata/netdata/master/collectors/python.d.plugin/example/example.chart.py).
+
+**Note**: If you are working 'locally' on a new collector and would like to run it in an already installed and running Netdata (as opposed to having to install Netdata from source again with your new changes) to can copy over the relevant file to where Netdata expects it and then either `sudo service netdata restart` to have it be picked up and used by Netdata or you can just run the updated collector in debug mode by following a process like below (this assumes you have [installed Netdata from a GitHub fork](https://learn.netdata.cloud/docs/agent/packaging/installer/methods/manual) you have made to do your development on).
+
+```bash
+# clone your fork (done once at the start but shown here for clarity)
+#git clone --branch my-example-collector https://github.com/mygithubusername/netdata.git --depth=100
+# go into your netdata source folder
+cd netdata
+# git pull your latest changes (assuming you built from a fork you are using to develop on)
+git pull
+# instead of running the installer we can just copy over the updated collector files
+#sudo ./netdata-installer.sh --dont-wait
+# copy over the file you have updated locally (pretending we are working on the 'example' collector)
+sudo cp collectors/python.d.plugin/example/example.chart.py /usr/libexec/netdata/python.d/
+# become user netdata
+sudo su -s /bin/bash netdata
+# run your updated collector in debug mode to see if it works without having to reinstall netdata
+/usr/libexec/netdata/plugins.d/python.d.plugin example debug trace nolock
+```
### Global variables `ORDER` and `CHART`
@@ -197,10 +224,16 @@ For additional security it uses python `subprocess.Popen` (without `shell=True`
_Examples: `apache`, `nginx`, `tomcat`_
+_Multiple Endpoints (urls) Examples: [`rabbitmq`](/collectors/python.d.plugin/rabbitmq/README.md) (simpler) ,
+[`elasticsearch`](/collectors/python.d.plugin/elasticsearch/README.md) (threaded)_
+
+
_Variables from config file_: `url`, `user`, `pass`.
If data is grabbed by accessing service via HTTP protocol, this class can be used. It can handle HTTP Basic Auth when specified with `user` and `pass` credentials.
+Please note that the config file can use different variables according to the specification of each module.
+
`_get_raw_data` returns list of utf-8 decoded strings (lines).
### SocketService
@@ -230,5 +263,6 @@ At minimum, to be buildable and testable, the PR needs to include:
- A makefile for the plugin at `collectors/python.d.plugin/<module_dir>/Makefile.inc`. Check an existing plugin for what this should look like.
- A line in `collectors/python.d.plugin/Makefile.am` including the above-mentioned makefile. Place it with the other plugin includes (please keep the includes sorted alphabetically).
- Optionally, chart information in `web/gui/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
+- Optionally, some default alarm configurations for your collector in `health/health.d/<module_name>.conf` and a line adding `<module_name>.conf` in `health/Makefile.am`.
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md
index 127d595b2..d35ccecbc 100644
--- a/collectors/python.d.plugin/adaptec_raid/README.md
+++ b/collectors/python.d.plugin/adaptec_raid/README.md
@@ -1,12 +1,24 @@
-# adaptec raid
+<!--
+title: "Adaptec RAID controller monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/adaptec_raid/README.md
+sidebar_label: "Adaptec RAID"
+-->
-Module collects logical and physical devices health metrics.
+# Adaptec RAID controller monitoring with Netdata
-**Requirements:**
+Collects logical and physical devices metrics.
-- `arcconf` program
-- `sudo` program
-- `netdata` user needs to be able to sudo the `arcconf` program without password
+## Requirements
+
+The module uses `arcconf`, which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `arcconf` as root without password.
+
+Add to `sudoers`:
+
+```
+netdata ALL=(root) NOPASSWD: /path/to/arcconf
+```
To grab stats it executes:
@@ -23,27 +35,23 @@ It produces:
4. **Physical Device Temperature**
-## prerequisite
+## Configuration
-This module uses `arcconf` which can only be executed by root. It uses
-`sudo` and assumes that it is configured such that the `netdata` user can
-execute `arcconf` as root without password.
+**adaptec_raid** is disabled by default. Should be explicitly enabled in `python.d.conf`.
-Add to `sudoers`:
-
-```
-netdata ALL=(root) NOPASSWD: /path/to/arcconf
+```yaml
+adaptec_raid: yes
```
-## configuration
-
- **adaptec_raid** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+Edit the `python.d/adaptec_raid.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-```yaml
-adaptec_raid: yes
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/adaptec_raid.conf
```
-### Screenshot:
+
![image](https://user-images.githubusercontent.com/22274335/47278133-6d306680-d601-11e8-87c2-cc9c0f42d686.png)
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
index 3fcb5fda8..564c2ce87 100644
--- a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
@@ -5,13 +5,11 @@
import re
-
from copy import deepcopy
from bases.FrameworkServices.ExecutableService import ExecutableService
from bases.collection import find_binary
-
disabled_by_default = True
update_every = 5
diff --git a/collectors/python.d.plugin/unbound/Makefile.inc b/collectors/python.d.plugin/alarms/Makefile.inc
index 59c306aed..c2de11724 100644
--- a/collectors/python.d.plugin/unbound/Makefile.inc
+++ b/collectors/python.d.plugin/alarms/Makefile.inc
@@ -5,9 +5,9 @@
# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
# install these files
-dist_python_DATA += unbound/unbound.chart.py
-dist_pythonconfig_DATA += unbound/unbound.conf
+dist_python_DATA += alarms/alarms.chart.py
+dist_pythonconfig_DATA += alarms/alarms.conf
# do not install these files, but include them in the distribution
-dist_noinst_DATA += unbound/README.md unbound/Makefile.inc
+dist_noinst_DATA += alarms/README.md alarms/Makefile.inc
diff --git a/collectors/python.d.plugin/alarms/README.md b/collectors/python.d.plugin/alarms/README.md
new file mode 100644
index 000000000..ea96061cc
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/README.md
@@ -0,0 +1,58 @@
+<!--
+title: "Alarms"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/alarms/README.md
+-->
+
+# Alarms - graphing Netdata alarm states over time
+
+This collector creates an 'Alarms' menu with one line plot showing alarm states over time. Alarm states are mapped to integer values according to the below default mapping. Any alarm status types not in this mapping will be ignored (Note: This mapping can be changed by editing the `status_map` in the `alarms.conf` file). If you would like to learn more about the different alarm statuses check out the docs [here](https://learn.netdata.cloud/docs/agent/health/reference#alarm-statuses).
+
+```
+{
+ 'CLEAR': 0,
+ 'WARNING': 1,
+ 'CRITICAL': 2
+}
+```
+
+## Charts
+
+Below is an example of the chart produced when running `stress-ng --all 2` for a few minutes. You can see the various warning and critical alarms raised.
+
+![alarms collector](https://user-images.githubusercontent.com/1153921/101641493-0b086a80-39ef-11eb-9f55-0713e5dfb19f.png)
+
+## Configuration
+
+Enable the collector and restart Netdata.
+
+```bash
+cd /etc/netdata/
+sudo ./edit-config python.d.conf
+# Set `alarms: no` to `alarms: yes`
+sudo systemctl restart netdata
+```
+
+If needed, edit the `python.d/alarms.conf` configuration file using `edit-config` from the your agent's [config
+directory](/docs/configure/nodes.md), which is usually at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/alarms.conf
+```
+
+The `alarms` specific part of the `alarms.conf` file should look like this:
+
+```yaml
+# what url to pull data from
+local:
+ url: 'http://127.0.0.1:19999/api/v1/alarms?all'
+ # define how to map alarm status to numbers for the chart
+ status_map:
+ CLEAR: 0
+ WARNING: 1
+ CRITICAL: 2
+```
+
+It will default to pulling all alarms at each time step from the Netdata rest api at `http://127.0.0.1:19999/api/v1/alarms?all`
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Falarms%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/alarms/alarms.chart.py b/collectors/python.d.plugin/alarms/alarms.chart.py
new file mode 100644
index 000000000..973a1f382
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/alarms.chart.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+# Description: alarms netdata python.d module
+# Author: andrewm4894
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+update_every = 10
+disabled_by_default = True
+
+
+def charts_template(sm):
+ order = [
+ 'alarms',
+ ]
+
+ mappings = ', '.join(['{0}={1}'.format(k, v) for k, v in sm.items()])
+ charts = {
+ 'alarms': {
+ 'options': [None, 'Alarms ({0})'.format(mappings), 'status', 'alarms', 'alarms.status', 'line'],
+ 'lines': [],
+ 'variables': [
+ ['alarms_num'],
+ ]
+ }
+ }
+ return order, charts
+
+
+DEFAULT_STATUS_MAP = {'CLEAR': 0, 'WARNING': 1, 'CRITICAL': 2}
+
+DEFAULT_URL = 'http://127.0.0.1:19999/api/v1/alarms?all'
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.sm = self.configuration.get('status_map', DEFAULT_STATUS_MAP)
+ self.order, self.definitions = charts_template(self.sm)
+ self.url = self.configuration.get('url', DEFAULT_URL)
+ self.collected_alarms = set()
+
+ def _get_data(self):
+ raw_data = self._get_raw_data()
+ if raw_data is None:
+ return None
+
+ raw_data = loads(raw_data)
+ alarms = raw_data.get('alarms', {})
+
+ data = {a: self.sm[alarms[a]['status']] for a in alarms if alarms[a]['status'] in self.sm}
+ self.update_charts(alarms, data)
+ data['alarms_num'] = len(data)
+
+ return data
+
+ def update_charts(self, alarms, data):
+ if not self.charts:
+ return
+
+ for a in data:
+ if a not in self.collected_alarms:
+ self.collected_alarms.add(a)
+ self.charts['alarms'].add_dimension([a, a, 'absolute', '1', '1'])
+
+ for a in list(self.collected_alarms):
+ if a not in alarms:
+ self.collected_alarms.remove(a)
+ self.charts['alarms'].del_dimension(a, hide=False)
diff --git a/collectors/python.d.plugin/alarms/alarms.conf b/collectors/python.d.plugin/alarms/alarms.conf
new file mode 100644
index 000000000..fd7780c59
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/alarms.conf
@@ -0,0 +1,50 @@
+# netdata python.d.plugin configuration for example
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+
+# what url to pull data from
+local:
+ url: 'http://127.0.0.1:19999/api/v1/alarms?all'
+ # define how to map alarm status to numbers for the chart
+ status_map:
+ CLEAR: 0
+ WARNING: 1
+ CRITICAL: 2
diff --git a/collectors/python.d.plugin/am2320/README.md b/collectors/python.d.plugin/am2320/README.md
index 709575221..14ddaa735 100644
--- a/collectors/python.d.plugin/am2320/README.md
+++ b/collectors/python.d.plugin/am2320/README.md
@@ -1,7 +1,14 @@
-# AM2320
-This module will display a graph of the temperture and humity from a AM2320 sensor.
+<!--
+title: "AM2320 sensor monitoring with netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/am2320/README.md
+sidebar_label: "AM2320"
+-->
-**Requirements:**
+# AM2320 sensor monitoring with netdata
+
+Displays a graph of the temperature and humidity from a AM2320 sensor.
+
+## Requirements
- Adafruit Circuit Python AM2320 library
- Adafruit AM2320 I2C sensor
- Python 3 (Adafruit libraries are not Python 2.x compatible)
@@ -11,12 +18,20 @@ It produces the following charts:
1. **Temperature**
2. **Humidity**
-## configuration
+## Configuration
+
+Edit the `python.d/am2320.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-Raspbery Pi Instructions:
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/am2320.conf
+```
+
+Raspberry Pi Instructions:
Hardware install:
-Connect the am2320 to the Raspbery Pi I2C pins
+Connect the am2320 to the Raspberry Pi I2C pins
Raspberry Pi 3B/4 Pins:
@@ -25,7 +40,7 @@ Raspberry Pi 3B/4 Pins:
- Board GND (pin 6) to sensor GND (pin 3)
- Board SCL (pin 5) to sensor SCL (pin 4)
-You may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesnt hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.
+You may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.
Software install:
- `sudo pip3 install adafruit-circuitpython-am2320`
@@ -35,3 +50,5 @@ Software install:
- save the file.
- restart the netdata service.
- check the dashboard.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fam2320%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/am2320/am2320.chart.py b/collectors/python.d.plugin/am2320/am2320.chart.py
index c15e16eee..8e66544bd 100644
--- a/collectors/python.d.plugin/am2320/am2320.chart.py
+++ b/collectors/python.d.plugin/am2320/am2320.chart.py
@@ -7,14 +7,13 @@ try:
import board
import busio
import adafruit_am2320
+
HAS_AM2320 = True
except ImportError:
HAS_AM2320 = False
-
from bases.FrameworkServices.SimpleService import SimpleService
-
ORDER = [
'temperature',
'humidity',
@@ -60,9 +59,9 @@ class Service(SimpleService):
def get_data(self):
try:
return {
- 'temperature': self.am.temperature,
- 'humidity': self.am.relative_humidity,
- }
+ 'temperature': self.am.temperature,
+ 'humidity': self.am.relative_humidity,
+ }
except (OSError, RuntimeError) as error:
self.error(error)
diff --git a/collectors/python.d.plugin/am2320/am2320.conf b/collectors/python.d.plugin/am2320/am2320.conf
index 982f5cd0a..c6b9885fc 100644
--- a/collectors/python.d.plugin/am2320/am2320.conf
+++ b/collectors/python.d.plugin/am2320/am2320.conf
@@ -1,4 +1,4 @@
-# netdata python.d.plugin configuration for am2320 temperture/humity sensor
+# netdata python.d.plugin configuration for am2320 temperature/humidity sensor
#
# This file is in YaML format. Generally the format is:
#
diff --git a/collectors/python.d.plugin/anomalies/Makefile.inc b/collectors/python.d.plugin/anomalies/Makefile.inc
new file mode 100644
index 000000000..94937b36a
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += anomalies/anomalies.chart.py
+dist_pythonconfig_DATA += anomalies/anomalies.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += anomalies/README.md anomalies/Makefile.inc
+
diff --git a/collectors/python.d.plugin/anomalies/README.md b/collectors/python.d.plugin/anomalies/README.md
new file mode 100644
index 000000000..862f4f345
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/README.md
@@ -0,0 +1,231 @@
+<!--
+title: "Anomaly detection with Netdata"
+description: "Use ML-driven anomaly detection to narrow your focus to only affected metrics and services/processes on your node to shorten root cause analysis."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/anomalies/README.md
+sidebar_url: Anomalies
+-->
+
+# Anomaly detection with Netdata
+
+This collector uses the Python [PyOD](https://pyod.readthedocs.io/en/latest/index.html) library to perform unsupervised [anomaly detection](https://en.wikipedia.org/wiki/Anomaly_detection) on your Netdata charts and/or dimensions.
+
+Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return an anomaly probability and anomaly flag for each chart or custom model you define. This computation consists of a **train** function that runs every `train_n_secs` to train the ML models to learn what 'normal' typically looks like on your node. At each iteration there is also a **predict** function that uses the latest trained models and most recent metrics to produce an anomaly probability and anomaly flag for each chart or custom model you define.
+
+> As this is a somewhat unique collector and involves often subjective concepts like anomalies and anomaly probabilities, we would love to hear any feedback on it from the community. Please let us know on the [community forum](https://community.netdata.cloud/t/anomalies-collector-feedback-megathread/767) or drop us a note at [analytics-ml-team@netdata.cloud](mailto:analytics-ml-team@netdata.cloud) for any and all feedback, both positive and negative. This sort of feedback is priceless to help us make complex features more useful.
+
+## Charts
+
+Two charts are produced:
+
+- **Anomaly Probability** (`anomalies.probability`): This chart shows the probability that the latest observed data is anomalous based on the trained model for that chart (using the [`predict_proba()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict_proba) method of the trained PyOD model).
+- **Anomaly** (`anomalies.anomaly`): This chart shows `1` or `0` predictions of if the latest observed data is considered anomalous or not based on the trained model (using the [`predict()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict) method of the trained PyOD model).
+
+Below is an example of the charts produced by this collector and how they might look when things are 'normal' on the node. The anomaly probabilities tend to bounce randomly around a typically low probability range, one or two might randomly jump or drift outside of this range every now and then and show up as anomalies on the anomaly chart.
+
+![netdata-anomalies-collector-normal](https://user-images.githubusercontent.com/2178292/100663699-99755000-334e-11eb-922f-0c41a0176484.jpg)
+
+If we then go onto the system and run a command like `stress-ng --all 2` to create some [stress](https://wiki.ubuntu.com/Kernel/Reference/stress-ng), we see some charts begin to have anomaly probabilities that jump outside the typical range. When the anomaly probabilities change enough, we will start seeing anomalies being flagged on the `anomalies.anomaly` chart. The idea is that these charts are the most anomalous right now so could be a good place to start your troubleshooting.
+
+![netdata-anomalies-collector-abnormal](https://user-images.githubusercontent.com/2178292/100663710-9bd7aa00-334e-11eb-9d14-76fda73bc309.jpg)
+
+Then, as the issue passes, the anomaly probabilities should settle back down into their 'normal' range again.
+
+![netdata-anomalies-collector-normal-again](https://user-images.githubusercontent.com/2178292/100666681-481a9000-3351-11eb-9979-64728ee2dfb6.jpg)
+
+## Requirements
+
+- This collector will only work with Python 3 and requires the packages below be installed.
+
+```bash
+# become netdata user
+sudo su -s /bin/bash netdata
+# install required packages for the netdata user
+pip3 install --user netdata-pandas==0.0.32 numba==0.50.1 scikit-learn==0.23.2 pyod==0.8.3
+```
+
+## Configuration
+
+Install the Python requirements above, enable the collector and restart Netdata.
+
+```bash
+cd /etc/netdata/
+sudo ./edit-config python.d.conf
+# Set `anomalies: no` to `anomalies: yes`
+sudo systemctl restart netdata
+```
+
+The configuration for the anomalies collector defines how it will behave on your system and might take some experimentation with over time to set it optimally for your node. Out of the box, the config comes with some [sane defaults](https://www.netdata.cloud/blog/redefining-monitoring-netdata/) to get you started that try to balance the flexibility and power of the ML models with the goal of being as cheap as possible in term of cost on the node resources.
+
+_**Note**: If you are unsure about any of the below configuration options then it's best to just ignore all this and leave the `anomalies.conf` file alone to begin with. Then you can return to it later if you would like to tune things a bit more once the collector is running for a while and you have a feeling for its performance on your node._
+
+Edit the `python.d/anomalies.conf` configuration file using `edit-config` from the your agent's [config
+directory](/docs/configure/nodes.md), which is usually at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/anomalies.conf
+```
+
+The default configuration should look something like this. Here you can see each parameter (with sane defaults) and some information about each one and what it does.
+
+```yaml
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+
+# Pull data from local Netdata node.
+local:
+ name: 'local'
+
+ # Host to pull data from.
+ host: '127.0.0.1:19999'
+
+ # Username and Password for Netdata if using basic auth.
+ # username: '???'
+ # password: '???'
+
+ # Use http or https to pull data
+ protocol: 'http'
+
+ # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
+ charts_regex: 'system\..*'
+
+ # Charts to exclude, useful if you would like to exclude some specific charts.
+ # Note: should be a ',' separated string like 'chart.name,chart.name'.
+ charts_to_exclude: 'system.uptime,system.entropy'
+
+ # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'.
+ # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html.
+ model: 'pca'
+
+ # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs.
+ train_max_n: 100000
+
+ # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes).
+ # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained.
+ train_every_n: 1800
+
+ # The length of the window of data to train on (14400 = last 4 hours).
+ train_n_secs: 14400
+
+ # How many prediction steps after a train event to just use previous prediction value for.
+ # Used to reduce possibility of the training step itself appearing as an anomaly on the charts.
+ train_no_prediction_n: 10
+
+ # If you would like to train the model for the first time on a specific window then you can define it using the below two variables.
+ # Start of training data for initial model.
+ # initial_train_data_after: 1604578857
+
+ # End of training data for initial model.
+ # initial_train_data_before: 1604593257
+
+ # If you would like to ignore recent data in training then you can offset it by offset_n_secs.
+ offset_n_secs: 0
+
+ # How many lagged values of each dimension to include in the 'feature vector' each model is trained on.
+ lags_n: 5
+
+ # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on.
+ smooth_n: 3
+
+ # How many differences to take in preprocessing your data.
+ # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing
+ # diffs_n=0 would mean training models on the raw values of each dimension.
+ # diffs_n=1 means everything is done in terms of differences.
+ diffs_n: 1
+
+ # What is the typical proportion of anomalies in your data on average?
+ # This parameter can control the sensitivity of your models to anomalies.
+ # Some discussion here: https://github.com/yzhao062/pyod/issues/144
+ contamination: 0.001
+
+ # Set to true to include an "average_prob" dimension on anomalies probability chart which is
+ # just the average of all anomaly probabilities at each time step
+ include_average_prob: true
+
+ # Define any custom models you would like to create anomaly probabilities for, some examples below to show how.
+ # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers
+ # and one on the cpu and mem apps metrics for the python.d.plugin.
+ # custom_models:
+ # - name: 'demos_cpu'
+ # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
+ # - name: 'apps_python_d_plugin'
+ # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin'
+
+ # Set to true to normalize, using min-max standardization, features used for the custom models.
+ # Useful if your custom models contain dimensions on very different scales an model you use does
+ # not internally do its own normalization. Usually best to leave as false.
+ # custom_models_normalize: false
+```
+
+## Custom models
+
+In the `anomalies.conf` file you can also define some "custom models" which you can use to group one or more metrics into a single model much like is done by default for the charts you specify. This is useful if you have a handful of metrics that exist in different charts but perhaps are related to the same underlying thing you would like to perform anomaly detection on, for example a specific app or user.
+
+To define a custom model you would include configuration like below in `anomalies.conf`. By default there should already be some commented out examples in there.
+
+`name` is a name you give your custom model, this is what will appear alongside any other specified charts in the `anomalies.probability` and `anomalies.anomaly` charts. `dimensions` is a string of metrics you want to include in your custom model. By default the [netdata-pandas](https://github.com/netdata/netdata-pandas) library used to pull the data from Netdata uses a "chart.a|dim.1" type of naming convention in the pandas columns it returns, hence the `dimensions` string should look like "chart.name|dimension.name,chart.name|dimension.name". The examples below hopefully make this clear.
+
+```yaml
+custom_models:
+ # a model for anomaly detection on the netdata user in terms of cpu, mem, threads, processes and sockets.
+ - name: 'user_netdata'
+ dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata'
+ # a model for anomaly detection on the netdata python.d.plugin app in terms of cpu, mem, threads, processes and sockets.
+ - name: 'apps_python_d_plugin'
+ dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin'
+
+custom_models_normalize: false
+```
+
+## Troubleshooting
+
+To see any relevant log messages you can use a command like below.
+
+```bash
+`grep 'anomalies' /var/log/netdata/error.log`
+```
+
+If you would like to log in as `netdata` user and run the collector in debug mode to see more detail.
+
+```bash
+# become netdata user
+sudo su -s /bin/bash netdata
+# run collector in debug using `nolock` option if netdata is already running the collector itself.
+/usr/libexec/netdata/plugins.d/python.d.plugin anomalies debug trace nolock
+```
+
+## Deepdive tutorial
+
+If you would like to go deeper on what exactly the anomalies collector is doing under the hood then check out this [deepdive tutorial](https://github.com/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb) in our community repo where you can play around with some data from our demo servers (or your own if its accessible to you) and work through the calculations step by step.
+
+(Note: as its a Jupyter Notebook it might render a little prettier on [nbviewer](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb))
+
+## Notes
+
+- Python 3 is required as the [`netdata-pandas`](https://github.com/netdata/netdata-pandas) package uses Python async libraries ([asks](https://pypi.org/project/asks/) and [trio](https://pypi.org/project/trio/)) to make asynchronous calls to the [Netdata REST API](https://learn.netdata.cloud/docs/agent/web/api) to get the required data for each chart.
+- Python 3 is also required for the underlying ML libraries of [numba](https://pypi.org/project/numba/), [scikit-learn](https://pypi.org/project/scikit-learn/), and [PyOD](https://pypi.org/project/pyod/).
+- It may take a few hours or so (depending on your choice of `train_secs_n`) for the collector to 'settle' into it's typical behaviour in terms of the trained models and probabilities you will see in the normal running of your node.
+- As this collector does most of the work in Python itself, with [PyOD](https://pyod.readthedocs.io/en/latest/) leveraging [numba](https://numba.pydata.org/) under the hood, you may want to try it out first on a test or development system to get a sense of its performance characteristics on a node similar to where you would like to use it.
+- `lags_n`, `smooth_n`, and `diffs_n` together define the preprocessing done to the raw data before models are trained and before each prediction. This essentially creates a [feature vector](https://en.wikipedia.org/wiki/Feature_(machine_learning)#:~:text=In%20pattern%20recognition%20and%20machine,features%20that%20represent%20some%20object.&text=Feature%20vectors%20are%20often%20combined,score%20for%20making%20a%20prediction.) for each chart model (or each custom model). The default settings for these parameters aim to create a rolling matrix of recent smoothed [differenced](https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing) values for each chart. The aim of the model then is to score how unusual this 'matrix' of features is for each chart based on what it has learned as 'normal' from the training data. So as opposed to just looking at the single most recent value of a dimension and considering how strange it is, this approach looks at a recent smoothed window of all dimensions for a chart (or dimensions in a custom model) and asks how unusual the data as a whole looks. This should be more flexible in capturing a wider range of [anomaly types](https://andrewm4894.com/2020/10/19/different-types-of-time-series-anomalies/) and be somewhat more robust to temporary 'spikes' in the data that tend to always be happening somewhere in your metrics but often are not the most important type of anomaly (this is all covered in a lot more detail in the [deepdive tutorial](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb)).
+- You can see how long model training is taking by looking in the logs for the collector `grep 'anomalies' /var/log/netdata/error.log | grep 'training'` and you should see lines like `2020-12-01 22:02:14: python.d INFO: anomalies[local] : training complete in 2.81 seconds (runs_counter=2700, model=pca, train_n_secs=14400, models=26, n_fit_success=26, n_fit_fails=0, after=1606845731, before=1606860131).`.
+ - This also gives counts of the number of models, if any, that failed to fit and so had to default back to the DefaultModel (which is currently [HBOS](https://pyod.readthedocs.io/en/latest/_modules/pyod/models/hbos.html)).
+ - `after` and `before` here refer to the start and end of the training data used to train the models.
+- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the typical performance characteristics we saw from running this collector (with defaults) were:
+ - A runtime (`netdata.runtime_anomalies`) of ~80ms when doing scoring and ~3 seconds when training or retraining the models.
+ - Typically ~3%-3.5% additional cpu usage from scoring, jumping to ~60% for a couple of seconds during model training.
+ - About ~150mb of ram (`apps.mem`) being continually used by the `python.d.plugin`.
+- If you activate this collector on a fresh node, it might take a little while to build up enough data to calculate a realistic and useful model.
+- Some models like `iforest` can be comparatively expensive (on same n1-standard-2 system above ~2s runtime during predict, ~40s training time, ~50% cpu on both train and predict) so if you would like to use it you might be advised to set a relatively high `update_every` maybe 10, 15 or 30 in `anomalies.conf`.
+- Setting a higher `train_every_n` and `update_every` is an easy way to devote less resources on the node to anomaly detection. Specifying less charts and a lower `train_n_secs` will also help reduce resources at the expense of covering less charts and maybe a more noisy model if you set `train_n_secs` to be too small for how your node tends to behave.
+
+## Useful links and further reading
+
+- [PyOD documentation](https://pyod.readthedocs.io/en/latest/), [PyOD Github](https://github.com/yzhao062/pyod).
+- [Anomaly Detection](https://en.wikipedia.org/wiki/Anomaly_detection) wikipedia page.
+- [Anomaly Detection YouTube playlist](https://www.youtube.com/playlist?list=PL6Zhl9mK2r0KxA6rB87oi4kWzoqGd5vp0) maintained by [andrewm4894](https://github.com/andrewm4894/) from Netdata.
+- [awesome-TS-anomaly-detection](https://github.com/rob-med/awesome-TS-anomaly-detection) Github list of useful tools, libraries and resources.
+- [Mendeley public group](https://www.mendeley.com/community/interesting-anomaly-detection-papers/) with some interesting anomaly detection papers we have been reading.
+- Good [blog post](https://www.anodot.com/blog/what-is-anomaly-detection/) from Anodot on time series anomaly detection. Anodot also have some great whitepapers in this space too that some may find useful.
+- Novelty and outlier detection in the [scikit-learn documentation](https://scikit-learn.org/stable/modules/outlier_detection.html).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fanomalies%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/anomalies/anomalies.chart.py b/collectors/python.d.plugin/anomalies/anomalies.chart.py
new file mode 100644
index 000000000..97dbb1d1e
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/anomalies.chart.py
@@ -0,0 +1,349 @@
+# -*- coding: utf-8 -*-
+# Description: anomalies netdata python.d module
+# Author: andrewm4894
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import time
+from datetime import datetime
+import re
+import warnings
+
+import requests
+import numpy as np
+import pandas as pd
+from netdata_pandas.data import get_data, get_allmetrics_async
+from pyod.models.hbos import HBOS
+from pyod.models.pca import PCA
+from pyod.models.loda import LODA
+from pyod.models.iforest import IForest
+from pyod.models.cblof import CBLOF
+from pyod.models.feature_bagging import FeatureBagging
+from pyod.models.copod import COPOD
+from sklearn.preprocessing import MinMaxScaler
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# ignore some sklearn/numpy warnings that are ok
+warnings.filterwarnings('ignore', r'All-NaN slice encountered')
+warnings.filterwarnings('ignore', r'invalid value encountered in true_divide')
+warnings.filterwarnings('ignore', r'divide by zero encountered in true_divide')
+warnings.filterwarnings('ignore', r'invalid value encountered in subtract')
+
+disabled_by_default = True
+
+ORDER = ['probability', 'anomaly']
+
+CHARTS = {
+ 'probability': {
+ 'options': ['probability', 'Anomaly Probability', 'probability', 'anomalies', 'anomalies.probability', 'line'],
+ 'lines': []
+ },
+ 'anomaly': {
+ 'options': ['anomaly', 'Anomaly', 'count', 'anomalies', 'anomalies.anomaly', 'stacked'],
+ 'lines': []
+ },
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.basic_init()
+ self.charts_init()
+ self.custom_models_init()
+ self.model_params_init()
+ self.models_init()
+
+ def check(self):
+ _ = get_allmetrics_async(
+ host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', wide=True, sort_cols=True,
+ protocol=self.protocol, numeric_only=True, float_size='float32', user=self.username, pwd=self.password
+ )
+ return True
+
+ def basic_init(self):
+ """Perform some basic initialization.
+ """
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.protocol = self.configuration.get('protocol', 'http')
+ self.host = self.configuration.get('host', '127.0.0.1:19999')
+ self.username = self.configuration.get('username', None)
+ self.password = self.configuration.get('password', None)
+ self.fitted_at = {}
+ self.df_allmetrics = pd.DataFrame()
+ self.data_latest = {}
+ self.last_train_at = 0
+ self.include_average_prob = bool(self.configuration.get('include_average_prob', True))
+
+ def charts_init(self):
+ """Do some initialisation of charts in scope related variables.
+ """
+ self.charts_regex = re.compile(self.configuration.get('charts_regex','None'))
+ self.charts_available = [c for c in list(requests.get(f'{self.protocol}://{self.host}/api/v1/charts').json().get('charts', {}).keys())]
+ self.charts_in_scope = list(filter(self.charts_regex.match, self.charts_available))
+ self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',')
+ if len(self.charts_to_exclude) > 0:
+ self.charts_in_scope = [c for c in self.charts_in_scope if c not in self.charts_to_exclude]
+
+ def custom_models_init(self):
+ """Perform initialization steps related to custom models.
+ """
+ self.custom_models = self.configuration.get('custom_models', None)
+ self.custom_models_normalize = bool(self.configuration.get('custom_models_normalize', False))
+ if self.custom_models:
+ self.custom_models_names = [model['name'] for model in self.custom_models]
+ self.custom_models_dims = [i for s in [model['dimensions'].split(',') for model in self.custom_models] for i in s]
+ self.custom_models_dims = [dim if '::' in dim else f'{self.host}::{dim}' for dim in self.custom_models_dims]
+ self.custom_models_charts = list(set([dim.split('|')[0].split('::')[1] for dim in self.custom_models_dims]))
+ self.custom_models_hosts = list(set([dim.split('::')[0] for dim in self.custom_models_dims]))
+ self.custom_models_host_charts_dict = {}
+ for host in self.custom_models_hosts:
+ self.custom_models_host_charts_dict[host] = list(set([dim.split('::')[1].split('|')[0] for dim in self.custom_models_dims if dim.startswith(host)]))
+ self.custom_models_dims_renamed = [f"{model['name']}|{dim}" for model in self.custom_models for dim in model['dimensions'].split(',')]
+ self.models_in_scope = list(set([f'{self.host}::{c}' for c in self.charts_in_scope] + self.custom_models_names))
+ self.charts_in_scope = list(set(self.charts_in_scope + self.custom_models_charts))
+ self.host_charts_dict = {self.host: self.charts_in_scope}
+ for host in self.custom_models_host_charts_dict:
+ if host not in self.host_charts_dict:
+ self.host_charts_dict[host] = self.custom_models_host_charts_dict[host]
+ else:
+ for chart in self.custom_models_host_charts_dict[host]:
+ if chart not in self.host_charts_dict[host]:
+ self.host_charts_dict[host].extend(chart)
+ else:
+ self.models_in_scope = [f'{self.host}::{c}' for c in self.charts_in_scope]
+ self.host_charts_dict = {self.host: self.charts_in_scope}
+ self.model_display_names = {model: model.split('::')[1] if '::' in model else model for model in self.models_in_scope}
+
+ def model_params_init(self):
+ """Model parameters initialisation.
+ """
+ self.train_max_n = self.configuration.get('train_max_n', 100000)
+ self.train_n_secs = self.configuration.get('train_n_secs', 14400)
+ self.offset_n_secs = self.configuration.get('offset_n_secs', 0)
+ self.train_every_n = self.configuration.get('train_every_n', 1800)
+ self.train_no_prediction_n = self.configuration.get('train_no_prediction_n', 10)
+ self.initial_train_data_after = self.configuration.get('initial_train_data_after', 0)
+ self.initial_train_data_before = self.configuration.get('initial_train_data_before', 0)
+ self.contamination = self.configuration.get('contamination', 0.001)
+ self.lags_n = {model: self.configuration.get('lags_n', 5) for model in self.models_in_scope}
+ self.smooth_n = {model: self.configuration.get('smooth_n', 5) for model in self.models_in_scope}
+ self.diffs_n = {model: self.configuration.get('diffs_n', 5) for model in self.models_in_scope}
+
+ def models_init(self):
+ """Models initialisation.
+ """
+ self.model = self.configuration.get('model', 'pca')
+ if self.model == 'pca':
+ self.models = {model: PCA(contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'loda':
+ self.models = {model: LODA(contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'iforest':
+ self.models = {model: IForest(n_estimators=50, bootstrap=True, behaviour='new', contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'cblof':
+ self.models = {model: CBLOF(n_clusters=3, contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'feature_bagging':
+ self.models = {model: FeatureBagging(base_estimator=PCA(contamination=self.contamination), contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'copod':
+ self.models = {model: COPOD(contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'hbos':
+ self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope}
+ else:
+ self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope}
+ self.custom_model_scalers = {model: MinMaxScaler() for model in self.models_in_scope}
+
+ def validate_charts(self, name, data, algorithm='absolute', multiplier=1, divisor=1):
+ """If dimension not in chart then add it.
+ """
+ for dim in data:
+ if dim not in self.charts[name]:
+ self.charts[name].add_dimension([dim, dim, algorithm, multiplier, divisor])
+
+ def add_custom_models_dims(self, df):
+ """Given a df, select columns used by custom models, add custom model name as prefix, and append to df.
+
+ :param df <pd.DataFrame>: dataframe to append new renamed columns to.
+ :return: <pd.DataFrame> dataframe with additional columns added relating to the specified custom models.
+ """
+ df_custom = df[self.custom_models_dims].copy()
+ df_custom.columns = self.custom_models_dims_renamed
+ df = df.join(df_custom)
+
+ return df
+
+ def make_features(self, arr, train=False, model=None):
+ """Take in numpy array and preprocess accordingly by taking diffs, smoothing and adding lags.
+
+ :param arr <np.ndarray>: numpy array we want to make features from.
+ :param train <bool>: True if making features for training, in which case need to fit_transform scaler and maybe sample train_max_n.
+ :param model <str>: model to make features for.
+ :return: <np.ndarray> transformed numpy array.
+ """
+
+ def lag(arr, n):
+ res = np.empty_like(arr)
+ res[:n] = np.nan
+ res[n:] = arr[:-n]
+
+ return res
+
+ arr = np.nan_to_num(arr)
+
+ diffs_n = self.diffs_n[model]
+ smooth_n = self.smooth_n[model]
+ lags_n = self.lags_n[model]
+
+ if self.custom_models_normalize and model in self.custom_models_names:
+ if train:
+ arr = self.custom_model_scalers[model].fit_transform(arr)
+ else:
+ arr = self.custom_model_scalers[model].transform(arr)
+
+ if diffs_n > 0:
+ arr = np.diff(arr, diffs_n, axis=0)
+ arr = arr[~np.isnan(arr).any(axis=1)]
+
+ if smooth_n > 1:
+ arr = np.cumsum(arr, axis=0, dtype=float)
+ arr[smooth_n:] = arr[smooth_n:] - arr[:-smooth_n]
+ arr = arr[smooth_n - 1:] / smooth_n
+ arr = arr[~np.isnan(arr).any(axis=1)]
+
+ if lags_n > 0:
+ arr_orig = np.copy(arr)
+ for lag_n in range(1, lags_n + 1):
+ arr = np.concatenate((arr, lag(arr_orig, lag_n)), axis=1)
+ arr = arr[~np.isnan(arr).any(axis=1)]
+
+ if train:
+ if len(arr) > self.train_max_n:
+ arr = arr[np.random.randint(arr.shape[0], size=self.train_max_n), :]
+
+ arr = np.nan_to_num(arr)
+
+ return arr
+
+ def train(self, models_to_train=None, train_data_after=0, train_data_before=0):
+ """Pull required training data and train a model for each specified model.
+
+ :param models_to_train <list>: list of models to train on.
+ :param train_data_after <int>: integer timestamp for start of train data.
+ :param train_data_before <int>: integer timestamp for end of train data.
+ """
+ now = datetime.now().timestamp()
+ if train_data_after > 0 and train_data_before > 0:
+ before = train_data_before
+ after = train_data_after
+ else:
+ before = int(now) - self.offset_n_secs
+ after = before - self.train_n_secs
+
+ # get training data
+ df_train = get_data(
+ host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', after=after, before=before,
+ sort_cols=True, numeric_only=True, protocol=self.protocol, float_size='float32', user=self.username, pwd=self.password
+ ).ffill()
+ if self.custom_models:
+ df_train = self.add_custom_models_dims(df_train)
+
+ # train model
+ self.try_fit(df_train, models_to_train=models_to_train)
+ self.info(f'training complete in {round(time.time() - now, 2)} seconds (runs_counter={self.runs_counter}, model={self.model}, train_n_secs={self.train_n_secs}, models={len(self.fitted_at)}, n_fit_success={self.n_fit_success}, n_fit_fails={self.n_fit_fail}, after={after}, before={before}).')
+ self.last_train_at = self.runs_counter
+
+ def try_fit(self, df_train, models_to_train=None):
+ """Try fit each model and try to fallback to a default model if fit fails for any reason.
+
+ :param df_train <pd.DataFrame>: data to train on.
+ :param models_to_train <list>: list of models to train.
+ """
+ if models_to_train is None:
+ models_to_train = list(self.models.keys())
+ self.n_fit_fail, self.n_fit_success = 0, 0
+ for model in models_to_train:
+ X_train = self.make_features(
+ df_train[df_train.columns[df_train.columns.str.startswith(f'{model}|')]].values,
+ train=True, model=model)
+ try:
+ self.models[model].fit(X_train)
+ self.n_fit_success += 1
+ except Exception as e:
+ self.n_fit_fail += 1
+ self.info(e)
+ self.info(f'training failed for {model} at run_counter {self.runs_counter}, defaulting to hbos model.')
+ self.models[model] = HBOS(contamination=self.contamination)
+ self.models[model].fit(X_train)
+ self.fitted_at[model] = self.runs_counter
+
+ def predict(self):
+ """Get latest data, make it into a feature vector, and get predictions for each available model.
+
+ :return: (<dict>,<dict>) tuple of dictionaries, one for probability scores and the other for anomaly predictions.
+ """
+ # get recent data to predict on
+ df_allmetrics = get_allmetrics_async(
+ host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', wide=True, sort_cols=True,
+ protocol=self.protocol, numeric_only=True, float_size='float32', user=self.username, pwd=self.password
+ )
+ if self.custom_models:
+ df_allmetrics = self.add_custom_models_dims(df_allmetrics)
+ self.df_allmetrics = self.df_allmetrics.append(df_allmetrics).ffill().tail((max(self.lags_n.values()) + max(self.smooth_n.values()) + max(self.diffs_n.values())) * 2)
+
+ # get predictions
+ data_probability, data_anomaly = self.try_predict()
+
+ return data_probability, data_anomaly
+
+ def try_predict(self):
+ """Try make prediction and fall back to last known prediction if fails.
+
+ :return: (<dict>,<dict>) tuple of dictionaries, one for probability scores and the other for anomaly predictions.
+ """
+ data_probability, data_anomaly = {}, {}
+ for model in self.fitted_at.keys():
+ model_display_name = self.model_display_names[model]
+ X_model = np.nan_to_num(self.make_features(
+ self.df_allmetrics[self.df_allmetrics.columns[self.df_allmetrics.columns.str.startswith(f'{model}|')]].values,
+ model=model)[-1,:].reshape(1, -1))
+ try:
+ data_probability[model_display_name + '_prob'] = np.nan_to_num(self.models[model].predict_proba(X_model)[-1][1]) * 10000
+ data_anomaly[model_display_name + '_anomaly'] = self.models[model].predict(X_model)[-1]
+ except Exception:
+ #self.info(e)
+ if model_display_name + '_prob' in self.data_latest:
+ #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, using last prediction instead.')
+ data_probability[model_display_name + '_prob'] = self.data_latest[model_display_name + '_prob']
+ data_anomaly[model_display_name + '_anomaly'] = self.data_latest[model_display_name + '_anomaly']
+ else:
+ #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, skipping as no previous prediction.')
+ continue
+
+ return data_probability, data_anomaly
+
+ def get_data(self):
+
+ # if not all models have been trained then train those we need to
+ if len(self.fitted_at) < len(self.models):
+ self.train(
+ models_to_train=[m for m in self.models if m not in self.fitted_at],
+ train_data_after=self.initial_train_data_after,
+ train_data_before=self.initial_train_data_before)
+ # retrain all models as per schedule from config
+ elif self.train_every_n > 0 and self.runs_counter % self.train_every_n == 0:
+ self.train()
+
+ # roll forward previous predictions around a training step to avoid the possibility of having the training itself trigger an anomaly
+ if (self.runs_counter - self.last_train_at) <= self.train_no_prediction_n:
+ data = self.data_latest
+ else:
+ data_probability, data_anomaly = self.predict()
+ if self.include_average_prob:
+ data_probability['average_prob'] = np.mean(list(data_probability.values()))
+ data = {**data_probability, **data_anomaly}
+ self.validate_charts('probability', data_probability, divisor=100)
+ self.validate_charts('anomaly', data_anomaly)
+
+ self.data_latest = data
+
+ return data
diff --git a/collectors/python.d.plugin/anomalies/anomalies.conf b/collectors/python.d.plugin/anomalies/anomalies.conf
new file mode 100644
index 000000000..9950534aa
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/anomalies.conf
@@ -0,0 +1,181 @@
+# netdata python.d.plugin configuration for anomalies
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 2
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+
+# Pull data from local Netdata node.
+local:
+ name: 'local'
+
+ # Host to pull data from.
+ host: '127.0.0.1:19999'
+
+ # Username and Password for Netdata if using basic auth.
+ # username: '???'
+ # password: '???'
+
+ # Use http or https to pull data
+ protocol: 'http'
+
+ # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
+ charts_regex: 'system\..*'
+
+ # Charts to exclude, useful if you would like to exclude some specific charts.
+ # Note: should be a ',' separated string like 'chart.name,chart.name'.
+ charts_to_exclude: 'system.uptime,system.entropy'
+
+ # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'.
+ # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html.
+ model: 'pca'
+
+ # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs.
+ train_max_n: 100000
+
+ # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes).
+ # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained.
+ train_every_n: 1800
+
+ # The length of the window of data to train on (14400 = last 4 hours).
+ train_n_secs: 14400
+
+ # How many prediction steps after a train event to just use previous prediction value for.
+ # Used to reduce possibility of the training step itself appearing as an anomaly on the charts.
+ train_no_prediction_n: 10
+
+ # If you would like to train the model for the first time on a specific window then you can define it using the below two variables.
+ # Start of training data for initial model.
+ # initial_train_data_after: 1604578857
+
+ # End of training data for initial model.
+ # initial_train_data_before: 1604593257
+
+ # If you would like to ignore recent data in training then you can offset it by offset_n_secs.
+ offset_n_secs: 0
+
+ # How many lagged values of each dimension to include in the 'feature vector' each model is trained on.
+ lags_n: 5
+
+ # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on.
+ smooth_n: 3
+
+ # How many differences to take in preprocessing your data.
+ # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing
+ # diffs_n=0 would mean training models on the raw values of each dimension.
+ # diffs_n=1 means everything is done in terms of differences.
+ diffs_n: 1
+
+ # What is the typical proportion of anomalies in your data on average?
+ # This parameter can control the sensitivity of your models to anomalies.
+ # Some discussion here: https://github.com/yzhao062/pyod/issues/144
+ contamination: 0.001
+
+ # Set to true to include an "average_prob" dimension on anomalies probability chart which is
+ # just the average of all anomaly probabilities at each time step
+ include_average_prob: true
+
+ # Define any custom models you would like to create anomaly probabilities for, some examples below to show how.
+ # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers
+ # and one on the cpu and mem apps metrics for the python.d.plugin.
+ # custom_models:
+ # - name: 'demos_cpu'
+ # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
+ # - name: 'apps_python_d_plugin'
+ # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin'
+
+ # Set to true to normalize, using min-max standardization, features used for the custom models.
+ # Useful if your custom models contain dimensions on very different scales an model you use does
+ # not internally do its own normalization. Usually best to leave as false.
+ # custom_models_normalize: false
+
+# Standalone Custom models example as an additional collector job.
+# custom:
+# name: 'custom'
+# host: '127.0.0.1:19999'
+# protocol: 'http'
+# charts_regex: 'None'
+# charts_to_exclude: 'None'
+# model: 'pca'
+# train_max_n: 100000
+# train_every_n: 1800
+# train_n_secs: 14400
+# offset_n_secs: 0
+# lags_n: 5
+# smooth_n: 3
+# diffs_n: 1
+# contamination: 0.001
+# custom_models:
+# - name: 'user_netdata'
+# dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata'
+# - name: 'apps_python_d_plugin'
+# dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin'
+
+# Pull data from some demo nodes for cross node custom models.
+# demos:
+# name: 'demos'
+# host: '127.0.0.1:19999'
+# protocol: 'http'
+# charts_regex: 'None'
+# charts_to_exclude: 'None'
+# model: 'pca'
+# train_max_n: 100000
+# train_every_n: 1800
+# train_n_secs: 14400
+# offset_n_secs: 0
+# lags_n: 5
+# smooth_n: 3
+# diffs_n: 1
+# contamination: 0.001
+# custom_models:
+# - name: 'system.cpu'
+# dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
+# - name: 'system.ip'
+# dimensions: 'london.my-netdata.io::system.ip|received,london.my-netdata.io::system.ip|sent,newyork.my-netdata.io::system.ip|received,newyork.my-netdata.io::system.ip|sent'
+# - name: 'system.net'
+# dimensions: 'london.my-netdata.io::system.net|received,london.my-netdata.io::system.net|sent,newyork.my-netdata.io::system.net|received,newyork.my-netdata.io::system.net|sent'
+# - name: 'system.io'
+# dimensions: 'london.my-netdata.io::system.io|in,london.my-netdata.io::system.io|out,newyork.my-netdata.io::system.io|in,newyork.my-netdata.io::system.io|out'
+
+# Example additional job if you want to also pull data from a child streaming to your
+# local parent or even a remote node so long as the Netdata REST API is accessible.
+# mychildnode1:
+# name: 'mychildnode1'
+# host: '127.0.0.1:19999/host/mychildnode1'
+# protocol: 'http'
+# charts_regex: 'system\..*'
+# charts_to_exclude: 'None'
+# model: 'pca'
+# train_max_n: 100000
+# train_every_n: 1800
+# train_n_secs: 14400
+# offset_n_secs: 0
+# lags_n: 5
+# smooth_n: 3
+# diffs_n: 1
+# contamination: 0.001
diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md
index 8f0ec0c1c..d27525095 100644
--- a/collectors/python.d.plugin/apache/README.md
+++ b/collectors/python.d.plugin/apache/README.md
@@ -1,8 +1,14 @@
-# apache
+<!--
+title: "Apache monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/apache/README.md
+sidebar_label: "Apache"
+-->
-This module will monitor one or more Apache servers depending on configuration.
+# Apache monitoring with Netdata
-**Requirements:**
+Monitors one or more Apache servers depending on configuration.
+
+## Requirements
- apache with enabled `mod_status`
@@ -43,11 +49,19 @@ It produces the following charts:
- size_req
-## configuration
+## Configuration
+
+Edit the `python.d/apache.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/apache.conf
+```
Needs only `url` to server's `server-status?auto`
-Here is an example for 2 servers:
+Example for two servers:
```yaml
update_every : 10
diff --git a/collectors/python.d.plugin/apache/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py
index 655616d07..ceac9ecd5 100644
--- a/collectors/python.d.plugin/apache/apache.chart.py
+++ b/collectors/python.d.plugin/apache/apache.chart.py
@@ -5,7 +5,6 @@
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'requests',
'connections',
@@ -38,7 +37,7 @@ CHARTS = {
]},
'bytespersec': {
'options': [None, 'Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
- 'apache.bytesperreq', 'area'],
+ 'apache.bytespersec', 'area'],
'lines': [
['size_sec', None, 'absolute', 8, 1000 * 100000]
]},
diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md
index c93dfa0d4..24315adb4 100644
--- a/collectors/python.d.plugin/beanstalk/README.md
+++ b/collectors/python.d.plugin/beanstalk/README.md
@@ -1,8 +1,14 @@
-# beanstalk
+<!--
+title: "Beanstalk monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/beanstalk/README.md
+sidebar_label: "Beanstalk"
+-->
-Module provides server and tube-level statistics:
+# Beanstalk monitoring with Netdata
-**Requirements:**
+Provides server and tube-level statistics.
+
+## Requirements
- `python-beanstalkc`
@@ -103,7 +109,15 @@ Module provides server and tube-level statistics:
- since
- left
-## configuration
+## Configuration
+
+Edit the `python.d/beanstalk.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/beanstalk.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
index 9c8319872..396543e5a 100644
--- a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
@@ -5,6 +5,7 @@
try:
import beanstalkc
+
BEANSTALKC = True
except ImportError:
BEANSTALKC = False
@@ -12,7 +13,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
from bases.loaders import load_yaml
-
ORDER = [
'cpu_usage',
'jobs_rate',
@@ -109,7 +109,7 @@ CHARTS = {
'options': [None, 'Uptime', 'seconds', 'server statistics', 'beanstalk.uptime', 'line'],
'lines': [
['uptime'],
- ]
+ ]
}
}
diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md
index 021a5d660..2832575dd 100644
--- a/collectors/python.d.plugin/bind_rndc/README.md
+++ b/collectors/python.d.plugin/bind_rndc/README.md
@@ -1,8 +1,14 @@
-# bind_rndc
+<!--
+title: "ISC Bind monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/bind_rndc/README.md
+sidebar_label: "ISC Bind"
+-->
-Module parses bind dump file to collect real-time performance metrics
+# ISC Bind monitoring with Netdata
-**Requirements:**
+Collects Name server summary performance statistics using `rndc` tool.
+
+## Requirements
- Version of bind must be 9.6 +
- Netdata must have permissions to run `rndc stats`
@@ -49,7 +55,15 @@ It produces:
- Same as Incoming queries
-## configuration
+## Configuration
+
+Edit the `python.d/bind_rndc.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/bind_rndc.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
index 60f40c2f7..9d6c9fec7 100644
--- a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
@@ -4,13 +4,11 @@
# SPDX-License-Identifier: GPL-3.0-or-later
import os
-
from collections import defaultdict
from subprocess import Popen
-from bases.collection import find_binary
from bases.FrameworkServices.SimpleService import SimpleService
-
+from bases.collection import find_binary
update_every = 30
diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md
index 260ae54b6..bd509c900 100644
--- a/collectors/python.d.plugin/boinc/README.md
+++ b/collectors/python.d.plugin/boinc/README.md
@@ -1,13 +1,24 @@
-# boinc
+<!--
+title: "BOINC monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/boinc/README.md
+sidebar_label: "BOINC"
+-->
-This module monitors task counts for the Berkely Open Infrastructure
-Networking Computing (BOINC) distributed computing client using the same
-RPC interface that the BOINC monitoring GUI does.
+# BOINC monitoring with Netdata
-It provides charts tracking the total number of tasks and active tasks,
-as well as ones tracking each of the possible states for tasks.
+Monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client using the same RPC interface that the BOINC monitoring GUI does.
-## configuration
+It provides charts tracking the total number of tasks and active tasks, as well as ones tracking each of the possible states for tasks.
+
+## Configuration
+
+Edit the `python.d/boinc.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/boinc.conf
+```
BOINC requires use of a password to access it's RPC interface. You can
find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
diff --git a/collectors/python.d.plugin/boinc/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py
index e10b28cea..a31eda1c2 100644
--- a/collectors/python.d.plugin/boinc/boinc.chart.py
+++ b/collectors/python.d.plugin/boinc/boinc.chart.py
@@ -6,10 +6,8 @@
import socket
from bases.FrameworkServices.SimpleService import SimpleService
-
from third_party import boinc_client
-
ORDER = [
'tasks',
'states',
diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md
index f5b36e149..5d671f2aa 100644
--- a/collectors/python.d.plugin/ceph/README.md
+++ b/collectors/python.d.plugin/ceph/README.md
@@ -1,8 +1,12 @@
-# ceph
+<!--
+title: "CEPH monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ceph/README.md
+sidebar_label: "CEPH"
+-->
-This module monitors the ceph cluster usage and consumption data of a server.
+# CEPH monitoring with Netdata
-It produces:
+Monitors the ceph cluster usage and consumption data of a server, and produces:
- Cluster statistics (usage, available, latency, objects, read/write rate)
- OSD usage
@@ -12,7 +16,7 @@ It produces:
- Pool read/write rate
- number of objects per pool
-**Requirements:**
+## Requirements
- `rados` python module
- Granting read permissions to ceph group from keyring file
@@ -23,6 +27,14 @@ It produces:
## Configuration
+Edit the `python.d/ceph.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/ceph.conf
+```
+
Sample:
```yaml
diff --git a/collectors/python.d.plugin/ceph/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py
index fe9b2b9ab..494eef45d 100644
--- a/collectors/python.d.plugin/ceph/ceph.chart.py
+++ b/collectors/python.d.plugin/ceph/ceph.chart.py
@@ -5,6 +5,7 @@
try:
import rados
+
CEPH = True
except ImportError:
CEPH = False
@@ -30,6 +31,7 @@ ORDER = [
'pool_read_operations',
'pool_write_operations',
'osd_usage',
+ 'osd_size',
'osd_apply_latency',
'osd_commit_latency'
]
@@ -100,6 +102,10 @@ CHARTS = {
'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'],
'lines': []
},
+ 'osd_size': {
+ 'options': [None, 'Ceph OSDs size', 'KiB', 'osd', 'ceph.osd_size', 'line'],
+ 'lines': []
+ },
'osd_apply_latency': {
'options': [None, 'Ceph OSDs apply latency', 'milliseconds', 'osd', 'ceph.apply_latency', 'line'],
'lines': []
@@ -119,6 +125,7 @@ class Service(SimpleService):
self.definitions = CHARTS
self.config_file = self.configuration.get('config_file')
self.keyring_file = self.configuration.get('keyring_file')
+ self.rados_id = self.configuration.get('rados_id', 'admin')
def check(self):
"""
@@ -147,7 +154,8 @@ class Service(SimpleService):
return False
try:
self.cluster = rados.Rados(conffile=self.config_file,
- conf=dict(keyring=self.keyring_file))
+ conf=dict(keyring=self.keyring_file),
+ rados_id=self.rados_id)
self.cluster.connect()
except rados.Error as error:
self.error(error)
@@ -161,7 +169,7 @@ class Service(SimpleService):
:return: None
"""
# Pool lines
- for pool in sorted(self._get_df()['pools'], key=lambda x:sorted(x.keys())):
+ for pool in sorted(self._get_df()['pools'], key=lambda x: sorted(x.keys())):
self.definitions['pool_usage']['lines'].append([pool['name'],
pool['name'],
'absolute'])
@@ -169,23 +177,26 @@ class Service(SimpleService):
pool['name'],
'absolute'])
self.definitions['pool_read_bytes']['lines'].append(['read_{0}'.format(pool['name']),
- pool['name'],
- 'absolute', 1, 1024])
- self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']),
pool['name'],
'absolute', 1, 1024])
+ self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute', 1, 1024])
self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']),
- pool['name'],
- 'absolute'])
- self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']),
pool['name'],
'absolute'])
+ self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute'])
# OSD lines
- for osd in sorted(self._get_osd_df()['nodes'], key=lambda x:sorted(x.keys())):
+ for osd in sorted(self._get_osd_df()['nodes'], key=lambda x: sorted(x.keys())):
self.definitions['osd_usage']['lines'].append([osd['name'],
osd['name'],
'absolute'])
+ self.definitions['osd_size']['lines'].append(['size_{0}'.format(osd['name']),
+ osd['name'],
+ 'absolute'])
self.definitions['osd_apply_latency']['lines'].append(['apply_latency_{0}'.format(osd['name']),
osd['name'],
'absolute'])
@@ -203,8 +214,10 @@ class Service(SimpleService):
df = self._get_df()
osd_df = self._get_osd_df()
osd_perf = self._get_osd_perf()
+ osd_perf_infos = get_osd_perf_infos(osd_perf)
pool_stats = self._get_osd_pool_stats()
- data.update(self._get_general(osd_perf, pool_stats))
+
+ data.update(self._get_general(osd_perf_infos, pool_stats))
for pool in df['pools']:
data.update(self._get_pool_usage(pool))
data.update(self._get_pool_objects(pool))
@@ -212,14 +225,15 @@ class Service(SimpleService):
data.update(self._get_pool_rw(pool_io))
for osd in osd_df['nodes']:
data.update(self._get_osd_usage(osd))
- for osd_apply_commit in osd_perf['osd_perf_infos']:
+ data.update(self._get_osd_size(osd))
+ for osd_apply_commit in osd_perf_infos:
data.update(self._get_osd_latency(osd_apply_commit))
return data
except (ValueError, AttributeError) as error:
self.error(error)
return None
- def _get_general(self, osd_perf, pool_stats):
+ def _get_general(self, osd_perf_infos, pool_stats):
"""
Get ceph's general usage
:return: dict
@@ -237,7 +251,7 @@ class Service(SimpleService):
write_bytes_sec += pool_rw_io_b['client_io_rate'].get('write_bytes_sec', 0)
read_op_per_sec += pool_rw_io_b['client_io_rate'].get('read_op_per_sec', 0)
write_op_per_sec += pool_rw_io_b['client_io_rate'].get('write_op_per_sec', 0)
- for perf in osd_perf['osd_perf_infos']:
+ for perf in osd_perf_infos:
apply_latency += perf['perf_stats']['apply_latency_ms']
commit_latency += perf['perf_stats']['commit_latency_ms']
@@ -291,6 +305,14 @@ class Service(SimpleService):
return {osd['name']: float(osd['kb_used'])}
@staticmethod
+ def _get_osd_size(osd):
+ """
+ Process raw data into osd dict information to get osd size (kb)
+ :return: A osd dict with osd name's key and size bytes' value
+ """
+ return {'size_{0}'.format(osd['name']): float(osd['kb'])}
+
+ @staticmethod
def _get_osd_latency(osd):
"""
Get ceph osd apply and commit latency
@@ -342,3 +364,11 @@ class Service(SimpleService):
'prefix': 'osd pool stats',
'format': 'json'
}), '')[1].decode('utf-8'))
+
+
+def get_osd_perf_infos(osd_perf):
+ # https://github.com/netdata/netdata/issues/8247
+ # module uses 'osd_perf_infos' data, its been moved under 'osdstats` since Ceph v14.2
+ if 'osd_perf_infos' in osd_perf:
+ return osd_perf['osd_perf_infos']
+ return osd_perf['osdstats']['osd_perf_infos']
diff --git a/collectors/python.d.plugin/ceph/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf
index 4caabbf6d..81788e866 100644
--- a/collectors/python.d.plugin/ceph/ceph.conf
+++ b/collectors/python.d.plugin/ceph/ceph.conf
@@ -64,10 +64,12 @@
# config_file: 'config_file' # Ceph config file.
# keyring_file: 'keyring_file' # Ceph keyring file. netdata user must be added into ceph group
# # and keyring file must be read group permission.
+# rados_id: 'rados username' # ID used to connect to ceph cluster. Allows
+# # creating a read only key for pulling data v.s. admin
# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
# only one of them will run (they have the same name)
#
config_file: '/etc/ceph/ceph.conf'
keyring_file: '/etc/ceph/ceph.client.admin.keyring'
-
+rados_id: 'admin'
diff --git a/collectors/python.d.plugin/chrony/README.md b/collectors/python.d.plugin/chrony/README.md
index a45adb333..b1e7ec35c 100644
--- a/collectors/python.d.plugin/chrony/README.md
+++ b/collectors/python.d.plugin/chrony/README.md
@@ -1,8 +1,12 @@
-# chrony
+<!--
+title: "Chrony monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/chrony/README.md
+sidebar_label: "Chrony"
+-->
-This module monitors the precision and statistics of a local chronyd server.
+# Chrony monitoring with Netdata
-It produces:
+Monitors the precision and statistics of a local chronyd server, and produces:
- frequency
- last offset
@@ -13,11 +17,33 @@ It produces:
- skew
- system time
-**Requirements:**
+## Requirements
+
Verify that user Netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`.
+## Enable the collector
+
+The `chrony` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `chrony` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
+restart netdata`, or the appropriate method for your system, to finish enabling the `chrony` collector.
+
## Configuration
+Edit the `python.d/chrony.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/chrony.conf
+```
+
Sample:
```yaml
@@ -29,6 +55,7 @@ local:
command: 'chronyc -n tracking'
```
----
+Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the appropriate method for your
+system, to finish configuring the `chrony` collector.
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fchrony%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md
index 288970674..896bbdd31 100644
--- a/collectors/python.d.plugin/couchdb/README.md
+++ b/collectors/python.d.plugin/couchdb/README.md
@@ -1,6 +1,12 @@
-# couchdb
+<!--
+title: "Apache CouchDB monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/couchdb/README.md
+sidebar_label: "CouchDB"
+-->
-This module monitors vital statistics of a local Apache CouchDB 2.x server, including:
+# Apache CouchDB monitoring with Netdata
+
+Monitors vital statistics of a local Apache CouchDB 2.x server, including:
- Overall server reads/writes
- HTTP traffic breakdown
@@ -13,6 +19,14 @@ This module monitors vital statistics of a local Apache CouchDB 2.x server, incl
## Configuration
+Edit the `python.d/couchdb.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/couchdb.conf
+```
+
Sample for a local server running on port 5984:
```yaml
diff --git a/collectors/python.d.plugin/couchdb/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py
index 50fe6669f..a395f356c 100644
--- a/collectors/python.d.plugin/couchdb/couchdb.chart.py
+++ b/collectors/python.d.plugin/couchdb/couchdb.chart.py
@@ -6,8 +6,8 @@
from collections import namedtuple, defaultdict
from json import loads
-from threading import Thread
from socket import gethostbyname, gaierror
+from threading import Thread
try:
from queue import Queue
@@ -16,10 +16,8 @@ except ImportError:
from bases.FrameworkServices.UrlService import UrlService
-
update_every = 1
-
METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
OVERVIEW_STATS = [
@@ -127,7 +125,7 @@ CHARTS = {
['couchdb_httpd_request_methods_GET', 'GET', 'incremental'],
['couchdb_httpd_request_methods_HEAD', 'HEAD', 'incremental'],
['couchdb_httpd_request_methods_OPTIONS', 'OPTIONS',
- 'incremental'],
+ 'incremental'],
['couchdb_httpd_request_methods_POST', 'POST', 'incremental'],
['couchdb_httpd_request_methods_PUT', 'PUT', 'incremental']
]
@@ -141,13 +139,13 @@ CHARTS = {
['couchdb_httpd_status_codes_201', '201 Created', 'incremental'],
['couchdb_httpd_status_codes_202', '202 Accepted', 'incremental'],
['couchdb_httpd_status_codes_2xx', 'Other 2xx Success',
- 'incremental'],
+ 'incremental'],
['couchdb_httpd_status_codes_3xx', '3xx Redirection',
- 'incremental'],
+ 'incremental'],
['couchdb_httpd_status_codes_4xx', '4xx Client error',
- 'incremental'],
+ 'incremental'],
['couchdb_httpd_status_codes_5xx', '5xx Server error',
- 'incremental']
+ 'incremental']
]
},
'open_files': {
@@ -280,19 +278,19 @@ class Service(UrlService):
if self._get_raw_data(self.url + '/' + db)]
for db in self.dbs:
self.definitions['db_sizes_file']['lines'].append(
- ['db_'+db+'_sizes_file', db, 'absolute', 1, 1000]
+ ['db_' + db + '_sizes_file', db, 'absolute', 1, 1000]
)
self.definitions['db_sizes_external']['lines'].append(
- ['db_'+db+'_sizes_external', db, 'absolute', 1, 1000]
+ ['db_' + db + '_sizes_external', db, 'absolute', 1, 1000]
)
self.definitions['db_sizes_active']['lines'].append(
- ['db_'+db+'_sizes_active', db, 'absolute', 1, 1000]
+ ['db_' + db + '_sizes_active', db, 'absolute', 1, 1000]
)
self.definitions['db_doc_counts']['lines'].append(
- ['db_'+db+'_doc_count', db, 'absolute']
+ ['db_' + db + '_doc_count', db, 'absolute']
)
self.definitions['db_doc_del_counts']['lines'].append(
- ['db_'+db+'_doc_del_count', db, 'absolute']
+ ['db_' + db + '_doc_del_count', db, 'absolute']
)
return UrlService.check(self)
diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md
index ebf34a3d6..e1fde7471 100644
--- a/collectors/python.d.plugin/dns_query_time/README.md
+++ b/collectors/python.d.plugin/dns_query_time/README.md
@@ -1,6 +1,12 @@
-# dns_query_time
+<!--
+title: "DNS query RTT monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dns_query_time/README.md
+sidebar_label: "DNS query RTT"
+-->
-This module provides DNS query time statistics.
+# DNS query RTT monitoring with Netdata
+
+Measures DNS query round trip time.
**Requirement:**
@@ -8,6 +14,16 @@ This module provides DNS query time statistics.
It produces one aggregate chart or one chart per DNS server, showing the query time.
+## Configuration
+
+Edit the `python.d/dns_query_time.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/dns_query_time.conf
+```
+
---
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdns_query_time%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
index 7fe860314..7e1cb32b3 100644
--- a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
@@ -11,6 +11,7 @@ try:
import dns.message
import dns.query
import dns.name
+
DNS_PYTHON = True
except ImportError:
DNS_PYTHON = False
@@ -22,7 +23,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-
update_every = 5
diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md
index 4310fe28a..7c279efaf 100644
--- a/collectors/python.d.plugin/dnsdist/README.md
+++ b/collectors/python.d.plugin/dnsdist/README.md
@@ -1,8 +1,12 @@
-# dnsdist
+<!--
+title: "PowerDNS dnsdist monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dnsdist/README.md
+sidebar_label: "PowerDNS dnsdist"
+-->
-This module monitors dnsdist performance and health metrics.
+# PowerDNS dnsdist monitoring with Netdata
-The module draws the following charts:
+Collects load-balancer performance and health metrics, and draws the following charts:
1. **Response latency**
@@ -47,6 +51,14 @@ The module draws the following charts:
## Configuration
+Edit the `python.d/dnsdist.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/dnsdist.conf
+```
+
```yaml
localhost:
name : 'local'
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
index d60858659..7e947923f 100644
--- a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
@@ -5,7 +5,6 @@ from json import loads
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'queries',
'queries_dropped',
@@ -21,7 +20,6 @@ ORDER = [
'query_latency_avg'
]
-
CHARTS = {
'queries': {
'options': [None, 'Client queries received', 'queries/s', 'queries', 'dnsdist.queries', 'line'],
@@ -107,7 +105,7 @@ CHARTS = {
]
},
'query_latency_avg': {
- 'options': [None, 'Average latency for the last N queries', 'ms/query', 'latency',
+ 'options': [None, 'Average latency for the last N queries', 'microseconds', 'latency',
'dnsdist.query_latency_avg', 'line'],
'lines': [
['latency-avg100', '100', 'absolute'],
diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md
index ec69262fa..178bae2cc 100644
--- a/collectors/python.d.plugin/dockerd/README.md
+++ b/collectors/python.d.plugin/dockerd/README.md
@@ -1,6 +1,12 @@
-# dockerd
+<!--
+title: "Docker Engine monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dockerd/README.md
+sidebar_label: "Docker Engine"
+-->
-Module monitor docker health metrics.
+# Docker Engine monitoring with Netdata
+
+Collects docker container health metrics.
**Requirement:**
@@ -20,7 +26,15 @@ Following charts are drawn:
- count
-## configuration
+## Configuration
+
+Edit the `python.d/dockerd.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/dockerd.conf
+```
```yaml
update_every : 1
diff --git a/collectors/python.d.plugin/dockerd/dockerd.chart.py b/collectors/python.d.plugin/dockerd/dockerd.chart.py
index 8bd45df9e..bd9640bbf 100644
--- a/collectors/python.d.plugin/dockerd/dockerd.chart.py
+++ b/collectors/python.d.plugin/dockerd/dockerd.chart.py
@@ -4,14 +4,14 @@
try:
import docker
+
HAS_DOCKER = True
except ImportError:
HAS_DOCKER = False
-from bases.FrameworkServices.SimpleService import SimpleService
-
from distutils.version import StrictVersion
+from bases.FrameworkServices.SimpleService import SimpleService
# charts order (can be overridden if you want less charts, or different order)
ORDER = [
@@ -44,7 +44,6 @@ CHARTS = {
}
}
-
MIN_REQUIRED_VERSION = '3.2.0'
diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md
index 6048f1a63..730b64257 100644
--- a/collectors/python.d.plugin/dovecot/README.md
+++ b/collectors/python.d.plugin/dovecot/README.md
@@ -1,6 +1,12 @@
-# dovecot
+<!--
+title: "Dovecot monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dovecot/README.md
+sidebar_label: "Dovecot"
+-->
-This module provides statistics information from Dovecot server.
+# Dovecot monitoring with Netdata
+
+Provides statistics information from Dovecot server.
Statistics are taken from dovecot socket by executing `EXPORT global` command.
More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics)
@@ -32,8 +38,8 @@ Module gives information with following charts:
5. **Context Switches**
- - volountary
- - involountary
+ - voluntary
+ - involuntary
6. **disk** in bytes/s
@@ -69,7 +75,15 @@ Module gives information with following charts:
- hit
- miss
-## configuration
+## Configuration
+
+Edit the `python.d/dovecot.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/dovecot.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/dovecot/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py
index be1fa53d5..dfaef28b5 100644
--- a/collectors/python.d.plugin/dovecot/dovecot.chart.py
+++ b/collectors/python.d.plugin/dovecot/dovecot.chart.py
@@ -5,10 +5,8 @@
from bases.FrameworkServices.SocketService import SocketService
-
UNIX_SOCKET = '/var/run/dovecot/stats'
-
ORDER = [
'sessions',
'logins',
@@ -51,7 +49,8 @@ CHARTS = {
]
},
'context_switches': {
- 'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches', 'line'],
+ 'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches',
+ 'line'],
'lines': [
['vol_cs', 'voluntary', 'absolute'],
['invol_cs', 'involuntary', 'absolute']
diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md
index 211dfabfa..d8d7581bc 100644
--- a/collectors/python.d.plugin/elasticsearch/README.md
+++ b/collectors/python.d.plugin/elasticsearch/README.md
@@ -1,6 +1,12 @@
-# elasticsearch
+<!--
+title: "Elasticsearch monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/elasticsearch/README.md
+sidebar_label: "Elasticsearch"
+-->
-This module monitors [Elasticsearch](https://www.elastic.co/products/elasticsearch) performance and health metrics.
+# Elasticsearch monitoring with Netdata
+
+Monitors [Elasticsearch](https://www.elastic.co/products/elasticsearch) performance and health metrics.
It produces:
@@ -16,7 +22,7 @@ It produces:
- Time spent on indexing, refreshing, flushing
- Indexing and flushing latency
-3. **Memory usage and garbace collection** charts:
+3. **Memory usage and garbage collection** charts:
- JVM heap currently in use, committed
- Count of garbage collections
@@ -58,7 +64,15 @@ It produces:
- Num of replicas
- Health status
-## configuration
+## Configuration
+
+Edit the `python.d/elasticsearch.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/elasticsearch.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
index 8aaa08583..dddf50b4c 100644
--- a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
@@ -245,7 +245,7 @@ CHARTS = {
'elastic.index_translog_operations', 'area'],
'lines': [
['indices_translog_operations', 'total', 'absolute'],
- ['indices_translog_uncommitted_operations', 'uncommited', 'absolute']
+ ['indices_translog_uncommitted_operations', 'uncommitted', 'absolute']
]
},
'index_translog_size': {
@@ -253,7 +253,7 @@ CHARTS = {
'elastic.index_translog_size', 'area'],
'lines': [
['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567],
- ['indices_translog_uncommitted_size_in_bytes', 'uncommited', 'absolute', 1, 1048567]
+ ['indices_translog_uncommitted_size_in_bytes', 'uncommitted', 'absolute', 1, 1048567]
]
},
'index_segments_count': {
@@ -295,7 +295,7 @@ CHARTS = {
'options': [None, 'JVM Heap Commit And Usage', 'MiB', 'memory usage and gc',
'elastic.jvm_heap_bytes', 'area'],
'lines': [
- ['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576],
+ ['jvm_mem_heap_committed_in_bytes', 'committed', 'absolute', 1, 1048576],
['jvm_mem_heap_used_in_bytes', 'used', 'absolute', 1, 1048576]
]
},
@@ -513,6 +513,8 @@ def convert_index_store_size_to_bytes(size):
return round(float(size[:-2]) * 1024 * 1024)
elif size.endswith('gb'):
return round(float(size[:-2]) * 1024 * 1024 * 1024)
+ elif size.endswith('tb'):
+ return round(float(size[:-2]) * 1024 * 1024 * 1024 * 1024)
elif size.endswith('b'):
return round(float(size[:-1]))
return -1
diff --git a/collectors/python.d.plugin/energid/README.md b/collectors/python.d.plugin/energid/README.md
index fc5101590..60c829fed 100644
--- a/collectors/python.d.plugin/energid/README.md
+++ b/collectors/python.d.plugin/energid/README.md
@@ -1,9 +1,15 @@
-# energid
+<!--
+title: "Energi Core node monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/energid/README.md
+sidebar_label: "Energi Core"
+-->
-A collector for [Energi Core](https://github.com/energicryptocurrency/energi)
-node instance monitoring.
+# Energi Core node monitoring with Netdata
-As Energi Core Gen 1 & 2 are based on the original Bitcoin code and
+Monitors blockchain, memory, network and unspent transactions statistics.
+
+
+As [Energi Core](https://github.com/energicryptocurrency/energi) Gen 1 & 2 are based on the original Bitcoin code and
supports very similar JSON RPC, there is quite high chance the module works
with many others forks including bitcoind itself.
@@ -42,6 +48,14 @@ long daemon startup.
## Configuration
+Edit the `python.d/energid.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/energid.conf
+```
+
Sample:
```yaml
diff --git a/collectors/python.d.plugin/energid/energid.chart.py b/collectors/python.d.plugin/energid/energid.chart.py
index b8aa89e50..079c32dc8 100644
--- a/collectors/python.d.plugin/energid/energid.chart.py
+++ b/collectors/python.d.plugin/energid/energid.chart.py
@@ -41,9 +41,9 @@ CHARTS = {
'mempool': {
'options': [None, 'MemPool', 'MiB', 'memory', 'energid.mempool', 'area'],
'lines': [
- ['mempool_max', 'Max', 'absolute', None, 1024*1024],
- ['mempool_current', 'Usage', 'absolute', None, 1024*1024],
- ['mempool_txsize', 'TX Size', 'absolute', None, 1024*1024],
+ ['mempool_max', 'Max', 'absolute', None, 1024 * 1024],
+ ['mempool_current', 'Usage', 'absolute', None, 1024 * 1024],
+ ['mempool_txsize', 'TX Size', 'absolute', None, 1024 * 1024],
],
},
'secmem': {
@@ -93,22 +93,23 @@ METHODS = {
'mempool_max': r['maxmempool'],
},
'getmemoryinfo': lambda r: dict([
- ('secmem_' + k, v) for (k,v) in r['locked'].items()
+ ('secmem_' + k, v) for (k, v) in r['locked'].items()
]),
'getnetworkinfo': lambda r: {
- 'network_timeoffset' : r['timeoffset'],
+ 'network_timeoffset': r['timeoffset'],
'network_connections': r['connections'],
},
'gettxoutsetinfo': lambda r: {
- 'utxo_count' : r['txouts'],
- 'utxo_xfers' : r['transactions'],
- 'utxo_size' : r['disk_size'],
- 'utxo_amount' : r['total_amount'],
+ 'utxo_count': r['txouts'],
+ 'utxo_xfers': r['transactions'],
+ 'utxo_size': r['disk_size'],
+ 'utxo_amount': r['total_amount'],
},
}
JSON_RPC_VERSION = '1.1'
+
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md
index 699ebe69c..561ea62ed 100644
--- a/collectors/python.d.plugin/example/README.md
+++ b/collectors/python.d.plugin/example/README.md
@@ -1,4 +1,9 @@
-# example
+<!--
+title: "Example"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/example/README.md
+-->
+
+# Example
An example python data collection module.
You can use this example to help you [write a new Python module](../#how-to-write-a-new-module).
diff --git a/collectors/python.d.plugin/example/example.chart.py b/collectors/python.d.plugin/example/example.chart.py
index cc8c18759..61ae47f22 100644
--- a/collectors/python.d.plugin/example/example.chart.py
+++ b/collectors/python.d.plugin/example/example.chart.py
@@ -7,7 +7,6 @@ from random import SystemRandom
from bases.FrameworkServices.SimpleService import SimpleService
-
priority = 90000
ORDER = [
diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md
index 985bd6e36..240aa7bed 100644
--- a/collectors/python.d.plugin/exim/README.md
+++ b/collectors/python.d.plugin/exim/README.md
@@ -1,8 +1,33 @@
-# exim
+<!--
+title: "Exim monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/exim/README.md
+sidebar_label: "Exim"
+-->
+
+# Exim monitoring with Netdata
Simple module executing `exim -bpc` to grab exim queue.
This command can take a lot of time to finish its execution thus it is not recommended to run it every second.
+## Requirements
+
+The module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.
+
+1. Edit the `exim` configuration with your preferred editor and add:
+`queue_list_requires_admin = false`
+2. Restart `exim` and Netdata
+
+*WHM (CPanel) server*
+
+On a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.
+
+1. Login to WHM
+2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor
+3. Scroll down to the button **Add additional configuration setting** and click on it.
+4. In the new dropdown which will appear above we need to find and choose:
+`queue_list_requires_admin` and set to `false`
+5. Scroll to the end and click the **Save** button.
+
It produces only one chart:
1. **Exim Queue Emails**
diff --git a/collectors/python.d.plugin/exim/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py
index 68b7b5cfb..7238a1bea 100644
--- a/collectors/python.d.plugin/exim/exim.chart.py
+++ b/collectors/python.d.plugin/exim/exim.chart.py
@@ -5,7 +5,6 @@
from bases.FrameworkServices.ExecutableService import ExecutableService
-
EXIM_COMMAND = 'exim -bpc'
ORDER = [
diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md
index 1ab0f6f63..c1ad994a5 100644
--- a/collectors/python.d.plugin/fail2ban/README.md
+++ b/collectors/python.d.plugin/fail2ban/README.md
@@ -1,14 +1,28 @@
-# fail2ban
+<!--
+title: "Fail2ban monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/fail2ban/README.md
+sidebar_label: "Fail2ban"
+-->
-Module monitor fail2ban log file to show all bans for all active jails
+# Fail2ban monitoring with Netdata
-**Requirements:**
+Monitors the fail2ban log file to show all bans for all active jails.
+
+## Requirements
- fail2ban.log file MUST BE readable by Netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d)
It produces one chart with multiple lines (one line per jail)
-## configuration
+## Configuration
+
+Edit the `python.d/fail2ban.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/fail2ban.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
index 9f5f2dcc4..99dbf79dd 100644
--- a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
@@ -3,15 +3,13 @@
# Author: ilyam8
# SPDX-License-Identifier: GPL-3.0-or-later
-import re
import os
-
+import re
from collections import defaultdict
from glob import glob
from bases.FrameworkServices.LogService import LogService
-
ORDER = [
'jails_bans',
'jails_in_jail',
@@ -25,13 +23,13 @@ def charts(jails):
ch = {
ORDER[0]: {
- 'options': [None, 'Jails Ban Rate', 'bans/s', 'bans', 'jail.bans', 'line'],
- 'lines': []
+ 'options': [None, 'Jails Ban Rate', 'bans/s', 'bans', 'jail.bans', 'line'],
+ 'lines': []
},
ORDER[1]: {
- 'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs', 'in jail',
- 'jail.in_jail', 'line'],
- 'lines': []
+ 'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs', 'in jail',
+ 'jail.in_jail', 'line'],
+ 'lines': []
},
}
for jail in jails:
@@ -52,7 +50,7 @@ def charts(jails):
return ch
-RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= (true|false)')
+RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= +(true|yes|false|no)')
# Example:
# 2018-09-12 11:45:53,715 fail2ban.actions[25029]: WARNING [ssh] Unban 195.201.88.33
@@ -198,9 +196,9 @@ class Service(LogService):
if name in exclude:
continue
- if status == 'true' and name not in active_jails:
+ if status in ('true','yes') and name not in active_jails:
active_jails.append(name)
- elif status == 'false' and name in active_jails:
+ elif status in ('false','no') and name in active_jails:
active_jails.remove(name)
return active_jails or DEFAULT_JAILS
diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md
index 3a2cdf9b4..2993c8952 100644
--- a/collectors/python.d.plugin/freeradius/README.md
+++ b/collectors/python.d.plugin/freeradius/README.md
@@ -1,4 +1,10 @@
-# freeradius
+<!--
+title: "FreeRADIUS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/freeradius/README.md
+sidebar_label: "FreeRADIUS"
+-->
+
+# FreeRADIUS monitoring with Netdata
Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second.
@@ -44,7 +50,15 @@ It produces:
- proxy-acct-malformed-requests
- proxy-acct-unknown-typesa
-## configuration
+## Configuration
+
+Edit the `python.d/freeradius.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/freeradius.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/freeradius/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py
index 9022d5e60..161d57e07 100644
--- a/collectors/python.d.plugin/freeradius/freeradius.chart.py
+++ b/collectors/python.d.plugin/freeradius/freeradius.chart.py
@@ -6,8 +6,8 @@
import re
from subprocess import Popen, PIPE
-from bases.collection import find_binary
from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import find_binary
update_every = 15
diff --git a/collectors/python.d.plugin/gearman/README.md b/collectors/python.d.plugin/gearman/README.md
index cbb4da3e2..b9fc914bf 100644
--- a/collectors/python.d.plugin/gearman/README.md
+++ b/collectors/python.d.plugin/gearman/README.md
@@ -1,8 +1,12 @@
-# Gearman
+<!--
+title: "Gearman monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/gearman/README.md
+sidebar_label: "Gearman"
+-->
-Module monitors Gearman worker statistics. A chart
-is shown for each job as well as one showing a summary
-of all workers.
+# Gearman monitoring with Netdata
+
+Monitors Gearman worker statistics. A chart is shown for each job as well as one showing a summary of all workers.
Note: Charts may show as a line graph rather than an area
graph if you load Netdata with no jobs running. To change
@@ -20,7 +24,15 @@ It produces:
* Workers idle
* Workers running
-### configuration
+## Configuration
+
+Edit the `python.d/gearman.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/gearman.conf
+```
```yaml
localhost:
@@ -36,4 +48,4 @@ localhost:
When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:4730`.
----
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fgearman%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/gearman/gearman.chart.py b/collectors/python.d.plugin/gearman/gearman.chart.py
index 26f3533c4..5e280a4d8 100644
--- a/collectors/python.d.plugin/gearman/gearman.chart.py
+++ b/collectors/python.d.plugin/gearman/gearman.chart.py
@@ -4,9 +4,9 @@
# Gearman Netdata Plugin
-from bases.FrameworkServices.SocketService import SocketService
from copy import deepcopy
+from bases.FrameworkServices.SocketService import SocketService
CHARTS = {
'total_workers': {
@@ -29,6 +29,7 @@ def job_chart_template(job_name):
]
}
+
def build_result_dict(job):
"""
Get the status for each job
@@ -46,6 +47,7 @@ def build_result_dict(job):
'{0}_running'.format(job['job_name']): running,
}
+
def parse_worker_data(job):
job_name = job[0]
job_metrics = job[1:]
@@ -119,6 +121,7 @@ class Service(SocketService):
Example output returned from
_get_raw_data():
+ prefix generic_worker4 78 78 500
generic_worker2 78 78 500
generic_worker3 0 0 760
generic_worker1 0 0 500
@@ -135,13 +138,24 @@ class Service(SocketService):
self.debug("Gearman returned no data")
raise GearmanReadException()
- job_lines = raw.splitlines()[:-1]
- job_lines = [job.split() for job in sorted(job_lines)]
+ workers = list()
+
+ for line in raw.splitlines()[:-1]:
+ parts = line.split()
+ if not parts:
+ continue
+
+ name = '_'.join(parts[:-3])
+ try:
+ values = [int(w) for w in parts[-3:]]
+ except ValueError:
+ continue
- for line in job_lines:
- line[1:] = map(int, line[1:])
+ w = [name]
+ w.extend(values)
+ workers.append(w)
- return job_lines
+ return workers
def process_jobs(self, active_jobs):
diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md
index 7d78fabd0..a73610e7a 100644
--- a/collectors/python.d.plugin/go_expvar/README.md
+++ b/collectors/python.d.plugin/go_expvar/README.md
@@ -1,11 +1,14 @@
-# go_expvar
+<!--
+title: "Go applications monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/go_expvar/README.md
+sidebar_label: "Go applications"
+-->
-The `go_expvar` module can monitor any Go application that exposes its metrics with the use of
-`expvar` package from the Go standard library.
+# Go applications monitoring with Netdata
-`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts.
+Monitors Go application that exposes its metrics with the use of `expvar` package from the Go standard library. The package produces charts for Go runtime memory statistics and optionally any number of custom charts.
-For the memory statistics, it produces the following charts:
+The `go_expvar` module produces the following charts:
1. **Heap allocations** in kB
@@ -36,7 +39,7 @@ For the memory statistics, it produces the following charts:
- avg: average duration of all GC stop-the-world pauses
-## Monitoring Go Applications
+## Monitoring Go applications
Netdata can be used to monitor running Go applications that expose their metrics with
the use of the [expvar package](https://golang.org/pkg/expvar/) included in Go standard library.
@@ -66,7 +69,7 @@ Sample output:
```json
{
"cmdline": ["./expvar-demo-binary"],
-"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <ommited for brevity>}
+"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <omitted for brevity>}
}
```
@@ -112,9 +115,8 @@ the use of `netdata`s `go_expvar` module.
### Using Netdata go_expvar module
-The `go_expvar` module is disabled by default. To enable it, edit [`python.d.conf`](../python.d.conf)
-(to edit it on your system run `/etc/netdata/edit-config python.d.conf`), and change the `go_expvar`
-variable to `yes`:
+The `go_expvar` module is disabled by default. To enable it, edit `python.d.conf` (to edit it on your system run
+`/etc/netdata/edit-config python.d.conf`), and change the `go_expvar` variable to `yes`:
```
# Enable / Disable python.d.plugin modules
@@ -130,10 +132,9 @@ go_expvar: yes
...
```
-Next, we need to edit the module configuration file (found at [`/etc/netdata/python.d/go_expvar.conf`](go_expvar.conf) by default)
-(to edit it on your system run `/etc/netdata/edit-config python.d/go_expvar.conf`).
-The module configuration consists of jobs, where each job can be used to monitor a separate Go application.
-Let's see a sample job configuration:
+Next, we need to edit the module configuration file (found at `/etc/netdata/python.d/go_expvar.conf` by default) (to
+edit it on your system run `/etc/netdata/edit-config python.d/go_expvar.conf`). The module configuration consists of
+jobs, where each job can be used to monitor a separate Go application. Let's see a sample job configuration:
```
# /etc/netdata/python.d/go_expvar.conf
@@ -208,8 +209,8 @@ See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-28449
Please see these two links to the official Netdata documentation for more information about the values:
-- [External plugins - charts](../../plugins.d/#chart)
-- [Chart variables](../#global-variables-order-and-chart)
+- [External plugins - charts](/collectors/plugins.d/README.md#chart)
+- [Chart variables](/collectors/python.d.plugin/README.md#global-variables-order-and-chart)
**Line definitions**
@@ -232,7 +233,7 @@ hidden: False
```
Please see the following link for more information about the options and their default values:
-[External plugins - dimensions](../../plugins.d/#dimension)
+[External plugins - dimensions](/collectors/plugins.d/README.md#dimension)
Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;
All dicts in the resulting JSON document are then flattened to one level.
@@ -251,7 +252,28 @@ In the above case, the exported variables will be available under `runtime.gorou
`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,
the first defined key wins and all subsequent keys with the same name are ignored.
-**Configuration example**
+## Enable the collector
+
+The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
+restart netdata`, or the appropriate method for your system, to finish enabling the `go_expvar` collector.
+
+## Configuration
+
+Edit the `python.d/go_expvar.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/go_expvar.conf
+```
The configuration below matches the second Go application described above.
Netdata will monitor and chart memory stats for the application, as well as a custom chart of
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
index e82a87761..f9bbdc164 100644
--- a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
@@ -4,13 +4,12 @@
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import division
-import json
+import json
from collections import namedtuple
from bases.FrameworkServices.UrlService import UrlService
-
MEMSTATS_ORDER = [
'memstats_heap',
'memstats_stack',
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
index c4bb0447e..33d34f1ad 100644
--- a/collectors/python.d.plugin/haproxy/README.md
+++ b/collectors/python.d.plugin/haproxy/README.md
@@ -1,6 +1,12 @@
-# haproxy
+<!--
+title: "HAProxy monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/haproxy/README.md
+sidebar_label: "HAProxy"
+-->
-Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
+# HAProxy monitoring with Netdata
+
+Monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
And health metrics such as backend servers status (server check should be used).
Plugin can obtain data from url **OR** unix socket.
@@ -28,7 +34,15 @@ It produces:
- number of failed servers for every backend (in DOWN state)
-## configuration
+## Configuration
+
+Edit the `python.d/haproxy.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/haproxy.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
index 8df712943..6f94c9a07 100644
--- a/collectors/python.d.plugin/haproxy/haproxy.chart.py
+++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py
@@ -179,7 +179,6 @@ CHARTS = {
}
}
-
METRICS = {
'bin': {'algorithm': 'incremental', 'divisor': 1024},
'bout': {'algorithm': 'incremental', 'divisor': 1024},
@@ -193,7 +192,6 @@ METRICS = {
'hrsp_other': {'algorithm': 'incremental', 'divisor': 1}
}
-
BACKEND_METRICS = {
'qtime': {'algorithm': 'absolute', 'divisor': 1},
'ctime': {'algorithm': 'absolute', 'divisor': 1},
@@ -201,7 +199,6 @@ BACKEND_METRICS = {
'ttime': {'algorithm': 'absolute', 'divisor': 1}
}
-
REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
@@ -309,7 +306,7 @@ class Service(UrlService, SocketService):
name, METRICS[metric]['algorithm'], 1,
METRICS[metric]['divisor']])
self.definitions['fhrsp_total']['lines'].append(['_'.join(['frontend', 'hrsp_total', idx]),
- name, 'incremental', 1, 1])
+ name, 'incremental', 1, 1])
for back in self.data['backend']:
name, idx = back['# pxname'], back['# pxname'].replace('.', '_')
for metric in METRICS:
@@ -317,7 +314,7 @@ class Service(UrlService, SocketService):
name, METRICS[metric]['algorithm'], 1,
METRICS[metric]['divisor']])
self.definitions['bhrsp_total']['lines'].append(['_'.join(['backend', 'hrsp_total', idx]),
- name, 'incremental', 1, 1])
+ name, 'incremental', 1, 1])
for metric in BACKEND_METRICS:
self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
name, BACKEND_METRICS[metric]['algorithm'], 1,
diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md
index 03474c893..aaaf21421 100644
--- a/collectors/python.d.plugin/hddtemp/README.md
+++ b/collectors/python.d.plugin/hddtemp/README.md
@@ -1,13 +1,27 @@
-# hddtemp
+<!--
+title: "Hard drive temperature monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hddtemp/README.md
+sidebar_label: "Hard drive temperature"
+-->
-Module monitors disk temperatures from one or more hddtemp daemons.
+# Hard drive temperature monitoring with Netdata
+
+Monitors disk temperatures from one or more `hddtemp` daemons.
**Requirement:**
Running `hddtemp` in daemonized mode with access on tcp port
It produces one chart **Temperature** with dynamic number of dimensions (one per disk)
-## configuration
+## Configuration
+
+Edit the `python.d/hddtemp.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/hddtemp.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
index b5aaaeb39..6427aa180 100644
--- a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
@@ -6,12 +6,10 @@
import re
-
from copy import deepcopy
from bases.FrameworkServices.SocketService import SocketService
-
ORDER = [
'temperatures',
]
@@ -30,7 +28,7 @@ class Disk:
def __init__(self, id_, name, temp):
self.id = id_.split('/')[-1]
self.name = name.replace(' ', '_')
- self.temp = temp if temp.isdigit() else 0
+ self.temp = temp if temp.isdigit() else None
def __repr__(self):
return self.id
diff --git a/collectors/python.d.plugin/hpssa/Makefile.inc b/collectors/python.d.plugin/hpssa/Makefile.inc
new file mode 100644
index 000000000..1c04aa49c
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += hpssa/hpssa.chart.py
+dist_pythonconfig_DATA += hpssa/hpssa.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += hpssa/README.md hpssa/Makefile.inc
+
diff --git a/collectors/python.d.plugin/hpssa/README.md b/collectors/python.d.plugin/hpssa/README.md
new file mode 100644
index 000000000..2079ff2ad
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/README.md
@@ -0,0 +1,61 @@
+<!--
+title: "HP Smart Storage Arrays monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hpssa/README.md
+sidebar_label: "HP Smart Storage Arrays"
+-->
+
+# HP Smart Storage Arrays monitoring with Netdata
+
+Monitors controller, cache module, logical and physical drive state and temperature using `ssacli` tool.
+
+## Requirements:
+
+This module uses `ssacli`, which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `ssacli` as root without password.
+
+Add to `sudoers`:
+
+```
+netdata ALL=(root) NOPASSWD: /path/to/ssacli
+```
+
+To collect metrics, the module executes: `sudo -n ssacli ctrl all show config detail`
+
+This module produces:
+
+1. Controller state and temperature
+2. Cache module state and temperature
+3. Logical drive state
+4. Physical drive state and temperature
+
+## Enable the collector
+
+The `hpssa` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
+restart netdata`, or the appropriate method for your system, to finish enabling the `hpssa` collector.
+
+## Configuration
+
+Edit the `python.d/hpssa.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/hpssa.conf
+```
+
+If `ssacli` cannot be found in the `PATH`, configure it in `hpssa.conf`.
+
+```yaml
+ssacli_path: /usr/sbin/ssacli
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhpssa%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/hpssa/hpssa.chart.py b/collectors/python.d.plugin/hpssa/hpssa.chart.py
new file mode 100644
index 000000000..ce1b43009
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/hpssa.chart.py
@@ -0,0 +1,395 @@
+# -*- coding: utf-8 -*-
+# Description: hpssa netdata python.d module
+# Author: Peter Gnodde (gnoddep)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+from copy import deepcopy
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+disabled_by_default = True
+update_every = 5
+
+ORDER = [
+ 'ctrl_status',
+ 'ctrl_temperature',
+ 'ld_status',
+ 'pd_status',
+ 'pd_temperature',
+]
+
+CHARTS = {
+ 'ctrl_status': {
+ 'options': [
+ None,
+ 'Status 1 is OK, Status 0 is not OK',
+ 'Status',
+ 'Controller',
+ 'hpssa.ctrl_status',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'ctrl_temperature': {
+ 'options': [
+ None,
+ 'Temperature',
+ 'Celsius',
+ 'Controller',
+ 'hpssa.ctrl_temperature',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'ld_status': {
+ 'options': [
+ None,
+ 'Status 1 is OK, Status 0 is not OK',
+ 'Status',
+ 'Logical drives',
+ 'hpssa.ld_status',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'pd_status': {
+ 'options': [
+ None,
+ 'Status 1 is OK, Status 0 is not OK',
+ 'Status',
+ 'Physical drives',
+ 'hpssa.pd_status',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'pd_temperature': {
+ 'options': [
+ None,
+ 'Temperature',
+ 'Celsius',
+ 'Physical drives',
+ 'hpssa.pd_temperature',
+ 'line'
+ ],
+ 'lines': []
+ }
+}
+
+adapter_regex = re.compile(r'^(?P<adapter_type>.+) in Slot (?P<slot>\d+)')
+ignored_sections_regex = re.compile(
+ r'''
+ ^
+ Physical[ ]Drives
+ | None[ ]attached
+ | (?:Expander|Enclosure|SEP|Port[ ]Name:)[ ].+
+ | .+[ ]at[ ]Port[ ]\S+,[ ]Box[ ]\d+,[ ].+
+ | Mirror[ ]Group[ ]\d+:
+ $
+ ''',
+ re.X
+)
+mirror_group_regex = re.compile(r'^Mirror Group \d+:$')
+array_regex = re.compile(r'^Array: (?P<id>[A-Z]+)$')
+drive_regex = re.compile(
+ r'''
+ ^
+ Logical[ ]Drive:[ ](?P<logical_drive_id>\d+)
+ | physicaldrive[ ](?P<fqn>[^:]+:\d+:\d+)
+ $
+ ''',
+ re.X
+)
+key_value_regex = re.compile(r'^(?P<key>[^:]+): ?(?P<value>.*)$')
+ld_status_regex = re.compile(r'^Status: (?P<status>[^,]+)(?:, (?P<percentage>[0-9.]+)% complete)?$')
+error_match = re.compile(r'Error:')
+
+
+class HPSSAException(Exception):
+ pass
+
+
+class HPSSA(object):
+ def __init__(self, lines):
+ self.lines = [line.strip() for line in lines if line.strip()]
+ self.current_line = 0
+ self.adapters = []
+ self.parse()
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.current_line == len(self.lines):
+ raise StopIteration
+
+ line = self.lines[self.current_line]
+ self.current_line += 1
+
+ return line
+
+ def next(self):
+ """
+ This is for Python 2.7 compatibility
+ """
+ return self.__next__()
+
+ def rewind(self):
+ self.current_line = max(self.current_line - 1, 0)
+
+ @staticmethod
+ def match_any(line, *regexes):
+ return any([regex.match(line) for regex in regexes])
+
+ def parse(self):
+ for line in self:
+ match = adapter_regex.match(line)
+ if match:
+ self.adapters.append(self.parse_adapter(**match.groupdict()))
+
+ def parse_adapter(self, slot, adapter_type):
+ adapter = {
+ 'slot': int(slot),
+ 'type': adapter_type,
+
+ 'controller': {
+ 'status': None,
+ 'temperature': None,
+ },
+ 'cache': {
+ 'present': False,
+ 'status': None,
+ 'temperature': None,
+ },
+ 'battery': {
+ 'status': None,
+ 'count': 0,
+ },
+
+ 'logical_drives': [],
+ 'physical_drives': [],
+ }
+
+ for line in self:
+ if error_match.match(line):
+ raise HPSSAException('Error: {}'.format(line))
+ elif adapter_regex.match(line):
+ self.rewind()
+ break
+ elif array_regex.match(line):
+ self.parse_array(adapter)
+ elif line == 'Unassigned' or line == 'HBA Drives':
+ self.parse_physical_drives(adapter)
+ elif ignored_sections_regex.match(line):
+ self.parse_ignored_section()
+ else:
+ match = key_value_regex.match(line)
+ if match:
+ key, value = match.group('key', 'value')
+ if key == 'Controller Status':
+ adapter['controller']['status'] = value == 'OK'
+ elif key == 'Controller Temperature (C)':
+ adapter['controller']['temperature'] = int(value)
+ elif key == 'Cache Board Present':
+ adapter['cache']['present'] = value == 'True'
+ elif key == 'Cache Status':
+ adapter['cache']['status'] = value == 'OK'
+ elif key == 'Cache Module Temperature (C)':
+ adapter['cache']['temperature'] = int(value)
+ elif key == 'Battery/Capacitor Count':
+ adapter['battery']['count'] = int(value)
+ elif key == 'Battery/Capacitor Status':
+ adapter['battery']['status'] = value == 'OK'
+ else:
+ raise HPSSAException('Cannot parse line: {}'.format(line))
+
+ return adapter
+
+ def parse_array(self, adapter):
+ for line in self:
+ if HPSSA.match_any(line, adapter_regex, array_regex, ignored_sections_regex):
+ self.rewind()
+ break
+
+ match = drive_regex.match(line)
+ if match:
+ data = match.groupdict()
+ if data['logical_drive_id']:
+ self.parse_logical_drive(adapter, int(data['logical_drive_id']))
+ else:
+ self.parse_physical_drive(adapter, data['fqn'])
+ elif not key_value_regex.match(line):
+ self.rewind()
+ break
+
+ def parse_physical_drives(self, adapter):
+ for line in self:
+ match = drive_regex.match(line)
+ if match:
+ self.parse_physical_drive(adapter, match.group('fqn'))
+ else:
+ self.rewind()
+ break
+
+ def parse_logical_drive(self, adapter, logical_drive_id):
+ ld = {
+ 'id': logical_drive_id,
+ 'status': None,
+ 'status_complete': None,
+ }
+
+ for line in self:
+ if mirror_group_regex.match(line):
+ self.parse_ignored_section()
+ continue
+
+ match = ld_status_regex.match(line)
+ if match:
+ ld['status'] = match.group('status') == 'OK'
+
+ if match.group('percentage'):
+ ld['status_complete'] = float(match.group('percentage')) / 100
+ elif HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex) \
+ or not key_value_regex.match(line):
+ self.rewind()
+ break
+
+ adapter['logical_drives'].append(ld)
+
+ def parse_physical_drive(self, adapter, fqn):
+ pd = {
+ 'fqn': fqn,
+ 'status': None,
+ 'temperature': None,
+ }
+
+ for line in self:
+ if HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex):
+ self.rewind()
+ break
+
+ match = key_value_regex.match(line)
+ if match:
+ key, value = match.group('key', 'value')
+ if key == 'Status':
+ pd['status'] = value == 'OK'
+ elif key == 'Current Temperature (C)':
+ pd['temperature'] = int(value)
+ else:
+ self.rewind()
+ break
+
+ adapter['physical_drives'].append(pd)
+
+ def parse_ignored_section(self):
+ for line in self:
+ if HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex) \
+ or not key_value_regex.match(line):
+ self.rewind()
+ break
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.ssacli_path = self.configuration.get('ssacli_path', 'ssacli')
+ self.use_sudo = self.configuration.get('use_sudo', True)
+ self.cmd = []
+
+ def get_adapters(self):
+ try:
+ adapters = HPSSA(self._get_raw_data(command=self.cmd)).adapters
+ if not adapters:
+ # If no adapters are returned, run the command again but capture stderr
+ err = self._get_raw_data(command=self.cmd, stderr=True)
+ if err:
+ raise HPSSAException('Error executing cmd {}: {}'.format(' '.join(self.cmd), '\n'.join(err)))
+ return adapters
+ except HPSSAException as ex:
+ self.error(ex)
+ return []
+
+ def check(self):
+ if not os.path.isfile(self.ssacli_path):
+ ssacli_path = find_binary(self.ssacli_path)
+ if ssacli_path:
+ self.ssacli_path = ssacli_path
+ else:
+ self.error('Cannot locate "{}" binary'.format(self.ssacli_path))
+ return False
+
+ if self.use_sudo:
+ sudo = find_binary('sudo')
+ if not sudo:
+ self.error('Cannot locate "{}" binary'.format('sudo'))
+ return False
+
+ allowed = self._get_raw_data(command=[sudo, '-n', '-l', self.ssacli_path])
+ if not allowed or allowed[0].strip() != os.path.realpath(self.ssacli_path):
+ self.error('Not allowed to run sudo for command {}'.format(self.ssacli_path))
+ return False
+
+ self.cmd = [sudo, '-n']
+
+ self.cmd.extend([self.ssacli_path, 'ctrl', 'all', 'show', 'config', 'detail'])
+ self.info('Command: {}'.format(self.cmd))
+
+ adapters = self.get_adapters()
+
+ self.info('Discovered adapters: {}'.format([adapter['type'] for adapter in adapters]))
+ if not adapters:
+ self.error('No adapters discovered')
+ return False
+
+ return True
+
+ def get_data(self):
+ netdata = {}
+
+ for adapter in self.get_adapters():
+ status_key = '{}_status'.format(adapter['slot'])
+ temperature_key = '{}_temperature'.format(adapter['slot'])
+ ld_key = 'ld_{}_'.format(adapter['slot'])
+
+ data = {
+ 'ctrl_status': {
+ 'ctrl_' + status_key: adapter['controller']['status'],
+ 'cache_' + status_key: adapter['cache']['present'] and adapter['cache']['status'],
+ 'battery_' + status_key:
+ adapter['battery']['status'] if adapter['battery']['count'] > 0 else None
+ },
+
+ 'ctrl_temperature': {
+ 'ctrl_' + temperature_key: adapter['controller']['temperature'],
+ 'cache_' + temperature_key: adapter['cache']['temperature'],
+ },
+
+ 'ld_status': {
+ ld_key + '{}_status'.format(ld['id']): ld['status'] for ld in adapter['logical_drives']
+ },
+
+ 'pd_status': {},
+ 'pd_temperature': {},
+ }
+
+ for pd in adapter['physical_drives']:
+ pd_key = 'pd_{}_{}'.format(adapter['slot'], pd['fqn'])
+ data['pd_status'][pd_key + '_status'] = pd['status']
+ data['pd_temperature'][pd_key + '_temperature'] = pd['temperature']
+
+ for chart, dimension_data in data.items():
+ for dimension_id, value in dimension_data.items():
+ if value is None:
+ continue
+
+ if dimension_id not in self.charts[chart]:
+ self.charts[chart].add_dimension([dimension_id])
+
+ netdata[dimension_id] = value
+
+ return netdata
diff --git a/collectors/python.d.plugin/unbound/unbound.conf b/collectors/python.d.plugin/hpssa/hpssa.conf
index 68561366b..cc50c9836 100644
--- a/collectors/python.d.plugin/unbound/unbound.conf
+++ b/collectors/python.d.plugin/hpssa/hpssa.conf
@@ -1,17 +1,9 @@
-# netdata python.d.plugin configuration for unbound
+# netdata python.d.plugin configuration for hpssa
#
# This file is in YaML format. Generally the format is:
#
# name: value
#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
# ----------------------------------------------------------------------
# Global Variables
@@ -20,7 +12,7 @@
# update_every sets the default data collection frequency.
# If unset, the python.d.plugin default is used.
-# update_every: 1
+# update_every: 5
# priority controls the order of charts at the netdata dashboard.
# Lower numbers move the charts towards the top of the page.
@@ -54,32 +46,16 @@
# name: myname # the JOB's name as it will appear at the
# # dashboard (by default is the job_name)
# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
+# update_every: 5 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
-# Additionally to the above, unbound also supports the following:
-#
-# host: localhost # The host to connect to.
-# port: 8953 # WHat port to use (defaults to 8953)
-# socket: /path/to/socket # A path to a UNIX socket to use instead
-# # of a TCP connection
-# tls_key_file: /path/to/key # The key file to use for authentication
-# tls_cert_file: /path/to/key # The certificate to use for authentication
-# extended: false # Whether to collect extended stats or not
-# per_thread: false # Whether to show charts for per-thread stats
+# Additionally to the above, hpssa also supports the following:
#
-# In addition to the above, you can set the following to try and
-# auto-detect most settings based on the unbound configuration:
-#
-# ubconf: /etc/unbound/unbound.conf
-#
-# Note that the SSL key and certificate need to be readable by the user
-# unbound runs as if you're using the regular control interface.
-# If you're using a UNIX socket, that has to be readable by the netdata user.
+# ssacli_path: /usr/sbin/ssacli # The path to the ssacli executable
+# use_sudo: True # Whether to use sudo or not
+# ----------------------------------------------------------------------
-# The following should work for most users if they have unbound configured
-# correctly.
-local:
- ubconf: /etc/unbound/unbound.conf
+# ssacli_path: /usr/sbin/ssacli
+# use_sudo: True
diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md
index 99b28cfeb..55aad52f0 100644
--- a/collectors/python.d.plugin/httpcheck/README.md
+++ b/collectors/python.d.plugin/httpcheck/README.md
@@ -1,6 +1,12 @@
-# httpcheck
+<!--
+title: "HTTP endpoint monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/httpcheck/README.md
+sidebar_label: "HTTP endpoints"
+-->
-Module monitors remote http server for availability and response time.
+# HTTP endpoint monitoring with Netdata
+
+Monitors remote http server for availability and response time.
Following charts are drawn per job:
@@ -17,7 +23,15 @@ Following charts are drawn per job:
- Connection failed: port not listening or blocked
- Connection timed out: host or port unreachable
-## configuration
+## Configuration
+
+Edit the `python.d/httpcheck.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/httpcheck.conf
+```
Sample configuration and their default values.
@@ -32,7 +46,7 @@ server:
redirect: yes # optional
```
-### notes
+### Notes
- The status chart is primarily intended for alarms, badges or for access via API.
- A system/service/firewall might block Netdata's access if a portscan or
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
index fd51370da..75718bb60 100644
--- a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
@@ -3,9 +3,10 @@
# Original Author: ccremer (github.com/ccremer)
# SPDX-License-Identifier: GPL-3.0-or-later
-import urllib3
import re
+import urllib3
+
try:
from time import monotonic as time
except ImportError:
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.conf b/collectors/python.d.plugin/httpcheck/httpcheck.conf
index 1e1dd0205..95adba270 100644
--- a/collectors/python.d.plugin/httpcheck/httpcheck.conf
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.conf
@@ -70,9 +70,12 @@ chart_cleanup: 0
# url: 'http[s]://host-ip-or-dns[:port][path]'
# # [required] the remote host url to connect to. If [:port] is missing, it defaults to 80
# # for HTTP and 443 for HTTPS. [path] is optional too, defaults to /
+# header: {'Content-Type': 'application/json'}
+# # [optional] the HTTP header sent with the request.
# method: GET # [optional] the HTTP request method (POST, PUT, DELETE, HEAD etc.)
# redirect: yes # [optional] If the remote host returns 3xx status codes, the redirection url will be
# # followed (default).
+# body: {'key': 'value'} # [optional] the body sent with the request (e.g. POST, PUT, PATCH).
# status_accepted: # [optional] By default, 200 is accepted. Anything else will result in 'bad status' in the
# # status chart, however: The response time will still be > 0, since the
# # host responded with something.
diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md
index eabfee0a6..90cdaa5dc 100644
--- a/collectors/python.d.plugin/icecast/README.md
+++ b/collectors/python.d.plugin/icecast/README.md
@@ -1,8 +1,14 @@
-# icecast
+<!--
+title: "Icecast monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/icecast/README.md
+sidebar_label: "Icecast"
+-->
-This module will monitor number of listeners for active sources.
+# Icecast monitoring with Netdata
-**Requirements:**
+Monitors the number of listeners for active sources.
+
+## Requirements
- icecast version >= 2.4.0
@@ -12,7 +18,15 @@ It produces the following charts:
- source number
-## configuration
+## Configuration
+
+Edit the `python.d/icecast.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/icecast.conf
+```
Needs only `url` to server's `/status-json.xsl`
diff --git a/collectors/python.d.plugin/icecast/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py
index e56e506e3..a967d1779 100644
--- a/collectors/python.d.plugin/icecast/icecast.chart.py
+++ b/collectors/python.d.plugin/icecast/icecast.chart.py
@@ -7,7 +7,6 @@ import json
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'listeners',
]
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
index 639631501..4d3b0ecbe 100644
--- a/collectors/python.d.plugin/ipfs/README.md
+++ b/collectors/python.d.plugin/ipfs/README.md
@@ -1,26 +1,49 @@
-# ipfs
+<!--
+title: "IPFS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ipfs/README.md
+sidebar_label: "IPFS"
+-->
-Module monitors [IPFS](https://ipfs.io) basic information.
+# IPFS monitoring with Netdata
-1. **Bandwidth** in kbits/s
+Collects [`IPFS`](https://ipfs.io) basic information like file system bandwidth, peers and repo metrics.
- - in
- - out
+## Charts
-2. **Peers**
+It produces the following charts:
- - peers
+- Bandwidth in `kilobits/s`
+- Peers in `peers`
+- Repo Size in `GiB`
+- Repo Objects in `objects`
-## configuration
+## Configuration
-Only url to IPFS server is needed.
+Edit the `python.d/ipfs.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-Sample:
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/ipfs.conf
+```
+
+---
+
+Calls to the following endpoints are disabled due to `IPFS` bugs:
+
+- `/api/v0/stats/repo` (https://github.com/ipfs/go-ipfs/issues/3874)
+- `/api/v0/pin/ls` (https://github.com/ipfs/go-ipfs/issues/7528)
+
+Can be enabled in the collector configuration file.
+
+The configuration needs only `url` to `IPFS` server, here is an example for 2 `IPFS` instances:
```yaml
localhost:
- name : 'local'
- url : 'http://localhost:5001'
+ url: 'http://localhost:5001'
+
+remote:
+ url: 'http://203.0.113.10::5001'
```
---
diff --git a/collectors/python.d.plugin/ipfs/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py
index 8c89b4be1..abfc9c492 100644
--- a/collectors/python.d.plugin/ipfs/ipfs.chart.py
+++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py
@@ -7,7 +7,6 @@ import json
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'bandwidth',
'peers',
@@ -64,7 +63,9 @@ class Service(UrlService):
self.order = ORDER
self.definitions = CHARTS
self.baseurl = self.configuration.get('url', 'http://localhost:5001')
+ self.method = "POST"
self.do_pinapi = self.configuration.get('pinapi')
+ self.do_repoapi = self.configuration.get('repoapi')
self.__storage_max = None
def _get_json(self, sub_url):
@@ -89,7 +90,7 @@ class Service(UrlService):
if store_max.endswith('b'):
val, units = store_max[:-2], store_max[-2]
if units in SI_zeroes:
- val += '0'*SI_zeroes[units]
+ val += '0' * SI_zeroes[units]
store_max = val
try:
store_max = int(store_max)
@@ -110,17 +111,33 @@ class Service(UrlService):
# suburl : List of (result-key, original-key, transform-func)
cfg = {
'/api/v0/stats/bw':
- [('in', 'RateIn', int), ('out', 'RateOut', int)],
+ [
+ ('in', 'RateIn', int),
+ ('out', 'RateOut', int),
+ ],
'/api/v0/swarm/peers':
- [('peers', 'Peers', len)],
- '/api/v0/stats/repo':
- [('size', 'RepoSize', int), ('objects', 'NumObjects', int), ('avail', 'StorageMax', self._storagemax)],
+ [
+ ('peers', 'Peers', len),
+ ],
}
+ if self.do_repoapi:
+ cfg.update({
+ '/api/v0/stats/repo':
+ [
+ ('size', 'RepoSize', int),
+ ('objects', 'NumObjects', int),
+ ('avail', 'StorageMax', self._storagemax),
+ ],
+ })
+
if self.do_pinapi:
- cfg.update({
- '/api/v0/pin/ls':
- [('pinned', 'Keys', len), ('recursive_pins', 'Keys', self._recursive_pins)]
- })
+ cfg.update({
+ '/api/v0/pin/ls':
+ [
+ ('pinned', 'Keys', len),
+ ('recursive_pins', 'Keys', self._recursive_pins),
+ ]
+ })
r = dict()
for suburl in cfg:
in_json = self._get_json(suburl)
diff --git a/collectors/python.d.plugin/ipfs/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf
index c7e186487..8b167b399 100644
--- a/collectors/python.d.plugin/ipfs/ipfs.conf
+++ b/collectors/python.d.plugin/ipfs/ipfs.conf
@@ -62,6 +62,10 @@
# Additionally to the above, ipfs also supports the following:
#
# url: 'URL' # URL to the IPFS API
+# repoapi: no # Collect repo metrics
+# # Currently defaults to disabled due to IPFS Bug
+# # https://github.com/ipfs/go-ipfs/issues/7528
+# # resulting in very high CPU Usage
# pinapi: no # Set status of IPFS pinned object polling
# # Currently defaults to disabled due to IPFS Bug
# # https://github.com/ipfs/go-ipfs/issues/3874
@@ -72,6 +76,7 @@
# only one of them will run (they have the same name)
localhost:
- name : 'local'
- url : 'http://localhost:5001'
- pinapi : no
+ name: 'local'
+ url: 'http://localhost:5001'
+ repoapi: no
+ pinapi: no
diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md
index f90cd041e..5830bd63e 100644
--- a/collectors/python.d.plugin/isc_dhcpd/README.md
+++ b/collectors/python.d.plugin/isc_dhcpd/README.md
@@ -1,11 +1,18 @@
-# isc_dhcpd
+<!--
+title: "ISC DHCP monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/isc_dhcpd/README.md
+sidebar_label: "ISC DHCP"
+-->
-Module monitor leases database to show all active leases for given pools.
+# ISC DHCP monitoring with Netdata
-**Requirements:**
+Monitors the leases database to show all active leases for given pools.
+
+## Requirements
- dhcpd leases file MUST BE readable by Netdata
- pools MUST BE in CIDR format
+- `python-ipaddress` package is needed in Python2
It produces:
@@ -21,17 +28,28 @@ It produces:
- leases (number of active leases in pool)
-## configuration
+## Configuration
+
+Edit the `python.d/isc_dhcpd.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/isc_dhcpd.conf
+```
Sample:
```yaml
local:
- leases_path : '/var/lib/dhcp/dhcpd.leases'
- pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
+ leases_path: '/var/lib/dhcp/dhcpd.leases'
+ pools:
+ office: '192.168.2.0/24' # name(dimension): pool in CIDR format
+ wifi: '192.168.3.10-192.168.3.20' # name(dimension): pool in IP Range format
+ 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
+ wifi-guest: '192.168.5.0/24 192.168.6.10-192.168.6.20' # name(dimension): pool in CIDR + IP Range format
```
-In case of python2 you need to install `py2-ipaddress` to make plugin work.
The module will not work If no configuration is given.
---
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
index a29439251..099c7d4e9 100644
--- a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
@@ -7,9 +7,9 @@ import os
import re
import time
-
try:
import ipaddress
+
HAVE_IP_ADDRESS = True
except ImportError:
HAVE_IP_ADDRESS = False
@@ -19,7 +19,6 @@ from copy import deepcopy
from bases.FrameworkServices.SimpleService import SimpleService
-
ORDER = [
'pools_utilization',
'pools_active_leases',
@@ -46,6 +45,19 @@ CHARTS = {
}
}
+POOL_CIDR = "CIDR"
+POOL_IP_RANGE = "IP_RANGE"
+POOL_UNKNOWN = "UNKNOWN"
+
+def detect_ip_type(ip):
+ ip_type = ip.split("-")
+ if len(ip_type) == 1:
+ return POOL_CIDR
+ elif len(ip_type) == 2:
+ return POOL_IP_RANGE
+ else:
+ return POOL_UNKNOWN
+
class DhcpdLeasesFile:
def __init__(self, path):
@@ -87,6 +99,32 @@ class Pool:
def __init__(self, name, network):
self.id = re.sub(r'[:/.-]+', '_', name)
self.name = name
+
+ self.networks = list()
+ for network in network.split(" "):
+ if not network:
+ continue
+
+ ip_type = detect_ip_type(ip=network)
+ if ip_type == POOL_CIDR:
+ self.networks.append(PoolCIDR(network=network))
+ elif ip_type == POOL_IP_RANGE:
+ self.networks.append(PoolIPRange(ip_range=network))
+ else:
+ raise ValueError('Network ({0}) incorrect syntax, expect CIDR or IPRange format.'.format(network))
+
+ def num_hosts(self):
+ return sum([network.num_hosts() for network in self.networks])
+
+ def __contains__(self, item):
+ for network in self.networks:
+ if item in network:
+ return True
+ return False
+
+
+class PoolCIDR:
+ def __init__(self, network):
self.network = ipaddress.ip_network(address=u'%s' % network)
def num_hosts(self):
@@ -96,6 +134,30 @@ class Pool:
return item.address in self.network
+class PoolIPRange:
+ def __init__(self, ip_range):
+ ip_range = ip_range.split("-")
+ self.networks = list(self._summarize_address_range(ip_range[0], ip_range[1]))
+
+ @staticmethod
+ def ip_address(ip):
+ return ipaddress.ip_address(u'%s' % ip)
+
+ def _summarize_address_range(self, first, last):
+ address_first = self.ip_address(first)
+ address_last = self.ip_address(last)
+ return ipaddress.summarize_address_range(address_first, address_last)
+
+ def num_hosts(self):
+ return sum([network.num_addresses for network in self.networks])
+
+ def __contains__(self, item):
+ for network in self.networks:
+ if item.address in network:
+ return True
+ return False
+
+
class Lease:
def __init__(self, address, ends, state):
self.address = ipaddress.ip_address(address=u'%s' % address)
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
index 8dcb5082f..c700947b4 100644
--- a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
@@ -63,9 +63,10 @@
#
# leases_path: 'PATH' # the path to dhcpd.leases file
# pools:
-# office: '192.168.2.0/24' # name(dimension): pool in CIDR format
-# wifi: '192.168.3.0/24' # name(dimension): pool in CIDR format
-# 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
+# office: '192.168.2.0/24' # name(dimension): pool in CIDR format
+# wifi: '192.168.3.10-192.168.3.20' # name(dimension): pool in IP Range format
+# 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
+# wifi-guest: '192.168.5.0/24 192.168.6.10-192.168.6.20' # name(dimension): pool in CIDR + IP Range format
#
#-----------------------------------------------------------------------
# IMPORTANT notes
diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md
index 586973bf0..2225773b7 100644
--- a/collectors/python.d.plugin/litespeed/README.md
+++ b/collectors/python.d.plugin/litespeed/README.md
@@ -1,6 +1,12 @@
-# litespeed
+<!--
+title: "LiteSpeed monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/litespeed/README.md
+sidebar_label: "LiteSpeed"
+-->
-Module monitor litespeed web server performance metrics.
+# LiteSpeed monitoring with Netdata
+
+Collects web server performance metrics for network, connection, requests, and cache.
It produces:
@@ -44,7 +50,15 @@ It produces:
- hits
-## configuration
+## Configuration
+
+Edit the `python.d/litespeed.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/litespeed.conf
+```
```yaml
local:
diff --git a/collectors/python.d.plugin/litespeed/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py
index 4b67ffb8a..7ef8189ea 100644
--- a/collectors/python.d.plugin/litespeed/litespeed.chart.py
+++ b/collectors/python.d.plugin/litespeed/litespeed.chart.py
@@ -1,30 +1,28 @@
# -*- coding: utf-8 -*-
# Description: litespeed netdata python.d module
-# Author: Ilya Maschenko (ilyam8)
+# Author: Ilya Mashchenko (ilyam8)
# SPDX-License-Identifier: GPL-3.0-or-later
import glob
-import re
import os
-
+import re
from collections import namedtuple
from bases.FrameworkServices.SimpleService import SimpleService
-
update_every = 10
# charts order (can be overridden if you want less charts, or different order)
ORDER = [
- 'net_throughput_http', # net throughput
+ 'net_throughput_http', # net throughput
'net_throughput_https', # net throughput
- 'connections_http', # connections
- 'connections_https', # connections
- 'requests', # requests
- 'requests_processing', # requests
- 'pub_cache_hits', # cache
- 'private_cache_hits', # cache
- 'static_hits', # static
+ 'connections_http', # connections
+ 'connections_https', # connections
+ 'requests', # requests
+ 'requests_processing', # requests
+ 'pub_cache_hits', # cache
+ 'private_cache_hits', # cache
+ 'static_hits', # static
]
CHARTS = {
@@ -178,7 +176,7 @@ class Service(SimpleService):
def parse_file(data, lines):
for line in lines:
- if not line.startswith(('BPS_IN:', 'MAXCONN:', 'REQ_RATE []:')):
+ if not line.startswith(('BPS_IN:', 'MAXCONN:', 'PLAINCONN:', 'REQ_RATE []:')):
continue
m = dict(RE.findall(line))
for v in T:
diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md
index 5aa1fa627..3e2d4c190 100644
--- a/collectors/python.d.plugin/logind/README.md
+++ b/collectors/python.d.plugin/logind/README.md
@@ -1,6 +1,12 @@
-# logind
+<!--
+title: "systemd-logind monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/logind/README.md
+sidebar_label: "systemd-logind"
+-->
-This module monitors active sessions, users, and seats tracked by systemd-logind or elogind.
+# Systemd-Logind monitoring with Netdata
+
+Monitors active sessions, users, and seats tracked by `systemd-logind` or `elogind`.
It provides the following charts:
@@ -20,9 +26,22 @@ It provides the following charts:
- Seats
-## configuration
+## Enable the collector
+
+The `logind` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `logind` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
+restart netdata`, or the appropriate method for your system, to finish enabling the `logind` collector.
-This module needs no configuration. Just make sure the `netdata` user
+## Configuration
+
+This module needs no configuration. Just make sure the `netdata` user
can run the `loginctl` command and get a session list without having to
specify a path.
@@ -35,7 +54,15 @@ specify it using the `command` key like so:
command: '/path/to/other/command'
```
-## notes
+Edit the `python.d/logind.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/logind.conf
+```
+
+## Notes
- This module's ability to track logins is dependent on what PAM services
are configured to register sessions with logind. In particular, for
diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md
index afc8cbda6..400a45973 100644
--- a/collectors/python.d.plugin/megacli/README.md
+++ b/collectors/python.d.plugin/megacli/README.md
@@ -1,12 +1,25 @@
-# megacli
+<!--
+title: "MegaRAID controller monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/megacli/README.md
+sidebar_label: "MegaRAID controllers"
+-->
-Module collects adapter, physical drives and battery stats.
+# MegaRAID controller monitoring with Netdata
-**Requirements:**
+Collects adapter, physical drives and battery stats.
+
+## Requirements
+
+Uses the `megacli` program, which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `megacli` as root without password.
+
+Add to `sudoers`:
+
+```
+netdata ALL=(root) NOPASSWD: /path/to/megacli
+```
-- `megacli` program
-- `sudo` program
-- `netdata` user needs to be able to be able to sudo the `megacli` program without password
To grab stats it executes:
@@ -25,19 +38,9 @@ It produces:
5. **Battery Cycle Count**
-## prerequisite
-This module uses `megacli` which can only be executed by root. It uses
-`sudo` and assumes that it is configured such that the `netdata` user can
-execute `megacli` as root without password.
-
-Add to `sudoers`:
-
-```
-netdata ALL=(root) NOPASSWD: /path/to/megacli
-```
-### configuration
+## Configuration
**megacli** is disabled by default. Should be explicitly enabled in `python.d.conf`.
@@ -45,7 +48,15 @@ netdata ALL=(root) NOPASSWD: /path/to/megacli
megacli: yes
```
-Battery stats disabled by default. To enable them modify `megacli.conf`.
+Edit the `python.d/megacli.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/megacli.conf
+```
+
+Battery stats disabled by default. To enable them, modify `megacli.conf`.
```yaml
do_battery: yes
diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py
index 4872eab80..ef35ff63f 100644
--- a/collectors/python.d.plugin/megacli/megacli.chart.py
+++ b/collectors/python.d.plugin/megacli/megacli.chart.py
@@ -9,7 +9,6 @@ import re
from bases.FrameworkServices.ExecutableService import ExecutableService
from bases.collection import find_binary
-
disabled_by_default = True
update_every = 5
@@ -27,7 +26,7 @@ def adapter_charts(ads):
'adapter_degraded': {
'options': [None, 'Adapter State', 'is degraded', 'adapter', 'megacli.adapter_degraded', 'line'],
'lines': dims(ads)
- },
+ },
}
return order, charts
@@ -111,7 +110,7 @@ def find_adapters(d):
def find_pds(d):
- keys = ('Slot Number', 'Media Error Count', 'Predictive Failure Count')
+ keys = ('Slot Number', 'Media Error Count', 'Predictive Failure Count')
d = ' '.join(v.strip() for v in d if v.startswith(keys))
return [PD(*v) for v in RE_VD.findall(d)]
diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md
index 169a5f7bf..abd93fd01 100644
--- a/collectors/python.d.plugin/memcached/README.md
+++ b/collectors/python.d.plugin/memcached/README.md
@@ -1,6 +1,13 @@
-# memcached
+<!--
+title: "Memcached monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/memcached/README.md
+sidebar_label: "Memcached"
+-->
+
+# Memcached monitoring with Netdata
+
+Collects memory-caching system performance metrics. It reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats)).
-Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats).
1. **Network** in kilobytes/s
@@ -66,7 +73,15 @@ Memcached monitoring module. Data grabbed from [stats interface](https://github.
- rate
-## configuration
+## Configuration
+
+Edit the `python.d/memcached.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/memcached.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/memcached/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py
index 9803dbb09..bb656a2d6 100644
--- a/collectors/python.d.plugin/memcached/memcached.chart.py
+++ b/collectors/python.d.plugin/memcached/memcached.chart.py
@@ -5,7 +5,6 @@
from bases.FrameworkServices.SocketService import SocketService
-
ORDER = [
'cache',
'net',
diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md
index fd694c1e5..c0df123d7 100644
--- a/collectors/python.d.plugin/mongodb/README.md
+++ b/collectors/python.d.plugin/mongodb/README.md
@@ -1,8 +1,14 @@
-# mongodb
+<!--
+title: "MongoDB monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/mongodb/README.md
+sidebar_label: "MongoDB"
+-->
-Module monitor mongodb performance and health metrics
+# MongoDB monitoring with Netdata
-**Requirements:**
+Monitors performance and health metrics of MongoDB.
+
+## Requirements
- `python-pymongo` package v2.4+.
@@ -74,7 +80,7 @@ Number of charts depends on mongodb version, storage engine and other features (
13. **Cache metrics** (WiredTiger):
- percentage of bytes currently in the cache (amount of space taken by cached data)
- - percantage of tracked dirty bytes in the cache (amount of space taken by dirty data)
+ - percentage of tracked dirty bytes in the cache (amount of space taken by dirty data)
14. **Pages evicted from cache** (WiredTiger):
@@ -175,7 +181,15 @@ db.createUser({
})
```
-### configuration
+## Configuration
+
+Edit the `python.d/mongodb.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/mongodb.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/mongodb/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py
index 0dbe82ff9..2e6fb220a 100644
--- a/collectors/python.d.plugin/mongodb/mongodb.chart.py
+++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py
@@ -12,13 +12,13 @@ from sys import exc_info
try:
from pymongo import MongoClient, ASCENDING, DESCENDING
from pymongo.errors import PyMongoError
+
PYMONGO = True
except ImportError:
PYMONGO = False
from bases.FrameworkServices.SimpleService import SimpleService
-
REPL_SET_STATES = [
('1', 'primary'),
('8', 'down'),
diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md
index a54b9d67f..fe1389687 100644
--- a/collectors/python.d.plugin/monit/README.md
+++ b/collectors/python.d.plugin/monit/README.md
@@ -1,4 +1,10 @@
-# monit
+<!--
+title: "Monit monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/monit/README.md
+sidebar_label: "Monit"
+-->
+
+# Monit monitoring with Netdata
Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official documentation). Mostly this plugin shows statuses of monit targets, i.e. [statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks).
@@ -19,7 +25,15 @@ Monit monitoring module. Data is grabbed from stats XML interface (exists for a
- Hosts (+latency)
- Network interfaces
-## configuration
+## Configuration
+
+Edit the `python.d/monit.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/monit.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/monit/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py
index 9f3270572..bfc182349 100644
--- a/collectors/python.d.plugin/monit/monit.chart.py
+++ b/collectors/python.d.plugin/monit/monit.chart.py
@@ -4,12 +4,10 @@
# SPDX-License-Identifier: GPL-3.0-or-later
import xml.etree.ElementTree as ET
-
from collections import namedtuple
from bases.FrameworkServices.UrlService import UrlService
-
MonitType = namedtuple('MonitType', ('index', 'name'))
# see enum Service_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h)
@@ -122,7 +120,7 @@ CHARTS = {
class BaseMonitService(object):
- def __init__(self, typ, name, status, monitor):
+ def __init__(self, typ, name, status, monitor):
self.type = typ
self.name = name
self.status = status
@@ -153,12 +151,21 @@ class BaseMonitService(object):
class ProcessMonitService(BaseMonitService):
- def __init__(self, typ, name, status, monitor):
+ def __init__(self, typ, name, status, monitor):
super(ProcessMonitService, self).__init__(typ, name, status, monitor)
self.uptime = None
self.threads = None
self.children = None
+ def __eq__(self, other):
+ return super(ProcessMonitService, self).__eq__(other)
+
+ def __ne__(self, other):
+ return super(ProcessMonitService, self).__ne__(other)
+
+ def __hash__(self):
+ return super(ProcessMonitService, self).__hash__()
+
def uptime_key(self):
return 'process_uptime_{0}'.format(self.name)
@@ -183,16 +190,25 @@ class ProcessMonitService(BaseMonitService):
class HostMonitService(BaseMonitService):
- def __init__(self, typ, name, status, monitor):
+ def __init__(self, typ, name, status, monitor):
super(HostMonitService, self).__init__(typ, name, status, monitor)
self.latency = None
+ def __eq__(self, other):
+ return super(HostMonitService, self).__eq__(other)
+
+ def __ne__(self, other):
+ return super(HostMonitService, self).__ne__(other)
+
+ def __hash__(self):
+ return super(HostMonitService, self).__hash__()
+
def latency_key(self):
return 'host_latency_{0}'.format(self.name)
def data(self):
base_data = super(HostMonitService, self).data()
- latency = float(self.latency) * 1000000 if self.latency else None
+ latency = float(self.latency) * 1000000 if self.latency else None
data = {self.latency_key(): latency}
data.update(base_data)
diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md
index 7dca8f406..d8d3c1d0b 100644
--- a/collectors/python.d.plugin/mysql/README.md
+++ b/collectors/python.d.plugin/mysql/README.md
@@ -1,8 +1,14 @@
-# mysql
+<!--
+title: "MySQL monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/mysql/README.md
+sidebar_label: "MySQL"
+-->
-Module monitors one or more mysql servers
+# MySQL monitoring with Netdata
-**Requirements:**
+Monitors one or more MySQL servers.
+
+## Requirements
- python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
- `netdata` local user to connect to the MySQL server.
@@ -39,7 +45,7 @@ This module will produce following charts (if data is available):
- cache hits
- replace
-4. **Handlerse** in handlers/s
+4. **Handlers** in handlers/s
- commit
- delete
@@ -61,7 +67,7 @@ This module will produce following charts (if data is available):
- immediate
- waited
-6. **Table Select Join Issuess** in joins/s
+6. **Table Select Join Issues** in joins/s
- full join
- full range join
@@ -69,7 +75,7 @@ This module will produce following charts (if data is available):
- range check
- scan
-7. **Table Sort Issuess** in joins/s
+7. **Table Sort Issues** in joins/s
- merge passes
- range
@@ -158,7 +164,7 @@ This module will produce following charts (if data is available):
- updated
- deleted
-24. **InnoDB Buffer Pool Pagess** in pages
+24. **InnoDB Buffer Pool Pages** in pages
- data
- dirty
@@ -328,7 +334,15 @@ This module will produce following charts (if data is available):
- update
- other
-## configuration
+## Configuration
+
+Edit the `python.d/mysql.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/mysql.conf
+```
You can provide, per server, the following:
@@ -371,7 +385,7 @@ remote:
```
If no configuration is given, the module will attempt to connect to MySQL server via a unix socket at
-`/var/run/mysqld/mysqld.sock` without password and with username `root`.
+`/var/run/mysqld/mysqld.sock` without password and with username `root` or `netdata` (you granted permissions for `netdata` user in the Requirements section of this document).
`userstats` graph works only if you enable the plugin in MariaDB server and set proper MySQL privileges (SUPER or
PROCESS). For more details, please check the [MariaDB User Statistics
diff --git a/collectors/python.d.plugin/mysql/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py
index f37315479..1737e16b4 100644
--- a/collectors/python.d.plugin/mysql/mysql.chart.py
+++ b/collectors/python.d.plugin/mysql/mysql.chart.py
@@ -6,7 +6,6 @@
from bases.FrameworkServices.MySQLService import MySQLService
-
# query executed on MySQL server
QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
QUERY_SLAVE = 'SHOW SLAVE STATUS;'
@@ -348,7 +347,7 @@ CHARTS = {
]
},
'threads_creation_rate': {
- 'options': [None, 'Threads Creation Rate', 'threads/s', 'threads', 'mysql.threads', 'line'],
+ 'options': [None, 'Threads Creation Rate', 'threads/s', 'threads', 'mysql.threads_creation_rate', 'line'],
'lines': [
['Threads_created', 'created', 'incremental'],
]
diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md
index ebbdb0f25..b55b01e7c 100644
--- a/collectors/python.d.plugin/nginx/README.md
+++ b/collectors/python.d.plugin/nginx/README.md
@@ -1,8 +1,14 @@
-# nginx
+<!--
+title: "NGINX monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nginx/README.md
+sidebar_label: "NGINX"
+-->
-This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote.
+# NGINX monitoring with Netdata
-**Requirements:**
+Monitors one or more NGINX servers depending on configuration. Servers can be either local or remote.
+
+## Requirements
- nginx with configured 'ngx_http_stub_status_module'
- 'location /stub_status'
@@ -30,9 +36,17 @@ It produces following charts:
- accepts
- handled
-## configuration
+## Configuration
+
+Edit the `python.d/nginx.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/nginx.conf
+```
-Needs only `url` to server's `stub_status`
+Needs only `url` to server's `stub_status`.
Here is an example for local server:
diff --git a/collectors/python.d.plugin/nginx/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py
index 84a5985e4..7548d6a42 100644
--- a/collectors/python.d.plugin/nginx/nginx.chart.py
+++ b/collectors/python.d.plugin/nginx/nginx.chart.py
@@ -5,7 +5,6 @@
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'connections',
'requests',
diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md
index 1110c5524..2580740c3 100644
--- a/collectors/python.d.plugin/nginx_plus/README.md
+++ b/collectors/python.d.plugin/nginx_plus/README.md
@@ -1,7 +1,12 @@
-# nginx_plus
+<!--
+title: "NGINX Plus monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nginx_plus/README.md
+sidebar_label: "NGINX Plus"
+-->
-This module will monitor one or more nginx_plus servers depending on configuration.
-Servers can be either local or remote.
+# NGINX Plus monitoring with Netdata
+
+Monitors one or more NGINX Plus servers depending on configuration. Servers can be either local or remote.
Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf'
@@ -134,11 +139,19 @@ For every cache:
- usage
-## configuration
+## Configuration
+
+Edit the `python.d/nginx_plus.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/nginx_plus.conf
+```
-Needs only `url` to server's `status`
+Needs only `url` to server's `status`.
-Here is an example for local server:
+Here is an example for a local server:
```yaml
local:
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
index 6cf35cb13..a6c035f68 100644
--- a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
@@ -16,7 +16,6 @@ except ImportError:
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'requests_total',
'requests_current',
diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md
index 61bc5c458..1e7b240e7 100644
--- a/collectors/python.d.plugin/nsd/README.md
+++ b/collectors/python.d.plugin/nsd/README.md
@@ -1,8 +1,14 @@
-# nsd
+<!--
+title: "NSD monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nsd/README.md
+sidebar_label: "NSD"
+-->
-Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
+# NSD monitoring with Netdata
-**Requirements:**
+Uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
+
+## Requirements
- Version of `nsd` must be 4.0+
- Netdata must have permissions to run `nsd-control stats_noreset`
diff --git a/collectors/python.d.plugin/nsd/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py
index 77b0d7bbf..6f9b2cec8 100644
--- a/collectors/python.d.plugin/nsd/nsd.chart.py
+++ b/collectors/python.d.plugin/nsd/nsd.chart.py
@@ -7,7 +7,6 @@ import re
from bases.FrameworkServices.ExecutableService import ExecutableService
-
update_every = 30
NSD_CONTROL_COMMAND = 'nsd-control stats_noreset'
diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md
index d4d0dc60d..0b08f12b8 100644
--- a/collectors/python.d.plugin/ntpd/README.md
+++ b/collectors/python.d.plugin/ntpd/README.md
@@ -1,8 +1,14 @@
-# ntpd
+<!--
+title: "NTP daemon monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ntpd/README.md
+sidebar_label: "NTP daemon"
+-->
-Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
+# NTP daemon monitoring with Netdata
-**Requirements:**
+Monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
+
+## Requirements
- Version: `NTPv4`
- Local interrogation allowed in `/etc/ntp.conf` (default):
@@ -41,7 +47,15 @@ It produces:
- ppoll
- precision
-## configuration
+## Configuration
+
+Edit the `python.d/ntpd.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/ntpd.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/ntpd/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py
index e33332cb3..275d2276c 100644
--- a/collectors/python.d.plugin/ntpd/ntpd.chart.py
+++ b/collectors/python.d.plugin/ntpd/ntpd.chart.py
@@ -4,12 +4,11 @@
# Author: Ilya Mashchenko (ilyam8)
# SPDX-License-Identifier: GPL-3.0-or-later
-import struct
import re
+import struct
from bases.FrameworkServices.SocketService import SocketService
-
# NTP Control Message Protocol constants
MODE = 6
HEADER_FORMAT = '!BBHHHHH'
diff --git a/collectors/python.d.plugin/nvidia_smi/README.md b/collectors/python.d.plugin/nvidia_smi/README.md
index 71e3e2889..9bfb2094b 100644
--- a/collectors/python.d.plugin/nvidia_smi/README.md
+++ b/collectors/python.d.plugin/nvidia_smi/README.md
@@ -1,42 +1,58 @@
-# nvidia_smi
+<!--
+title: "Nvidia GPU monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nvidia_smi/README.md
+sidebar_label: "Nvidia GPUs"
+-->
-This module monitors the `nvidia-smi` cli tool.
+# Nvidia GPU monitoring with Netdata
-**Requirements and Notes:**
+Monitors performance metrics (memory usage, fan speed, pcie bandwidth utilization, temperature, etc.) using `nvidia-smi` cli tool.
-- You must have the `nvidia-smi` tool installed and your NVIDIA GPU(s) must support the tool. Mostly the newer high end models used for AI / ML and Crypto or Pro range, read more about [nvidia_smi](https://developer.nvidia.com/nvidia-system-management-interface).
-- You must enable this plugin as its disabled by default due to minor performance issues.
+## Requirements and Notes
+- You must have the `nvidia-smi` tool installed and your NVIDIA GPU(s) must support the tool. Mostly the newer high end models used for AI / ML and Crypto or Pro range, read more about [nvidia_smi](https://developer.nvidia.com/nvidia-system-management-interface).
+- You must enable this plugin as its disabled by default due to minor performance issues.
- On some systems when the GPU is idle the `nvidia-smi` tool unloads and there is added latency again when it is next queried. If you are running GPUs under constant workload this isn't likely to be an issue.
-
- Currently the `nvidia-smi` tool is being queried via cli. Updating the plugin to use the nvidia c/c++ API directly should resolve this issue. See discussion here: <https://github.com/netdata/netdata/pull/4357>
-
- Contributions are welcome.
-
- Make sure `netdata` user can execute `/usr/bin/nvidia-smi` or wherever your binary is.
-
+- If `nvidia-smi` process [is not killed after netdata restart](https://github.com/netdata/netdata/issues/7143) you need to off `loop_mode`.
- `poll_seconds` is how often in seconds the tool is polled for as an integer.
-It produces:
+## Charts
+
+It produces the following charts:
-1. Per GPU
+- PCI Express Bandwidth Utilization in `KiB/s`
+- Fan Speed in `percentage`
+- GPU Utilization in `percentage`
+- Memory Bandwidth Utilization in `percentage`
+- Encoder/Decoder Utilization in `percentage`
+- Memory Usage in `MiB`
+- Temperature in `celsius`
+- Clock Frequencies in `MHz`
+- Power Utilization in `Watts`
+- Memory Used by Each Process in `MiB`
+- Memory Used by Each User in `MiB`
+- Number of User on GPU in `num`
- - GPU utilization
- - memory allocation
- - memory utilization
- - fan speed
- - power usage
- - temperature
- - clock speed
- - PCI bandwidth
+## Configuration
-## configuration
+Edit the `python.d/nvidia_smi.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/nvidia_smi.conf
+```
Sample:
```yaml
-poll_seconds: 1
+loop_mode : yes
+poll_seconds : 1
+exclude_zero_memory_users : yes
```
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnvidia_smi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
index 0bea268ef..9c69586dd 100644
--- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
@@ -2,21 +2,22 @@
# Description: nvidia-smi netdata python.d module
# Original Author: Steven Noonan (tycho)
# Author: Ilya Mashchenko (ilyam8)
+# User Memory Stat Author: Guido Scatena (scatenag)
import subprocess
import threading
+import os
+import pwd
+
import xml.etree.ElementTree as et
-from bases.collection import find_binary
from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import find_binary
disabled_by_default = True
-
NVIDIA_SMI = 'nvidia-smi'
-BAD_VALUE = 'N/A'
-
EMPTY_ROW = ''
EMPTY_ROW_LIMIT = 500
POLLER_BREAK_ROW = '</nvidia_smi_log>'
@@ -31,6 +32,8 @@ TEMPERATURE = 'temperature'
CLOCKS = 'clocks'
POWER = 'power'
PROCESSES_MEM = 'processes_mem'
+USER_MEM = 'user_mem'
+USER_NUM = 'user_num'
ORDER = [
PCI_BANDWIDTH,
@@ -43,6 +46,8 @@ ORDER = [
CLOCKS,
POWER,
PROCESSES_MEM,
+ USER_MEM,
+ USER_NUM,
]
@@ -76,7 +81,8 @@ def gpu_charts(gpu):
]
},
ENCODER_UTIL: {
- 'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization', 'line'],
+ 'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization',
+ 'line'],
'lines': [
['encoder_util', 'encoder'],
['decoder_util', 'decoder'],
@@ -114,6 +120,16 @@ def gpu_charts(gpu):
'options': [None, 'Memory Used by Each Process', 'MiB', fam, 'nvidia_smi.processes_mem', 'stacked'],
'lines': []
},
+ USER_MEM: {
+ 'options': [None, 'Memory Used by Each User', 'MiB', fam, 'nvidia_smi.user_mem', 'stacked'],
+ 'lines': []
+ },
+ USER_NUM: {
+ 'options': [None, 'Number of User on GPU', 'num', fam, 'nvidia_smi.user_num', 'line'],
+ 'lines': [
+ ['user_num', 'users'],
+ ]
+ },
}
idx = gpu.num
@@ -212,6 +228,7 @@ def handle_attr_error(method):
return method(*args, **kwargs)
except AttributeError:
return None
+
return on_call
@@ -221,13 +238,66 @@ def handle_value_error(method):
return method(*args, **kwargs)
except ValueError:
return None
+
return on_call
+HOST_PREFIX = os.getenv('NETDATA_HOST_PREFIX')
+ETC_PASSWD_PATH = '/etc/passwd'
+PROC_PATH = '/proc'
+
+IS_INSIDE_DOCKER = False
+
+if HOST_PREFIX:
+ ETC_PASSWD_PATH = os.path.join(HOST_PREFIX, ETC_PASSWD_PATH[1:])
+ PROC_PATH = os.path.join(HOST_PREFIX, PROC_PATH[1:])
+ IS_INSIDE_DOCKER = True
+
+
+def read_passwd_file():
+ data = dict()
+ with open(ETC_PASSWD_PATH, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith("#"):
+ continue
+ fields = line.split(":")
+ # name, passwd, uid, gid, comment, home_dir, shell
+ if len(fields) != 7:
+ continue
+ # uid, guid
+ fields[2], fields[3] = int(fields[2]), int(fields[3])
+ data[fields[2]] = fields
+ return data
+
+
+def read_passwd_file_safe():
+ try:
+ if IS_INSIDE_DOCKER:
+ return read_passwd_file()
+ return dict((k[2], k) for k in pwd.getpwall())
+ except (OSError, IOError):
+ return dict()
+
+
+def get_username_by_pid_safe(pid, passwd_file):
+ path = os.path.join(PROC_PATH, pid)
+ try:
+ uid = os.stat(path).st_uid
+ except (OSError, IOError):
+ return ''
+
+ try:
+ return passwd_file[uid][0]
+ except KeyError:
+ return str(uid)
+
+
class GPU:
- def __init__(self, num, root):
+ def __init__(self, num, root, exclude_zero_memory_users=False):
self.num = num
self.root = root
+ self.exclude_zero_memory_users = exclude_zero_memory_users
def id(self):
return self.root.get('id')
@@ -301,15 +371,22 @@ class GPU:
@handle_attr_error
def processes(self):
- p_nodes = self.root.find('processes').findall('process_info')
- ps = []
- for p in p_nodes:
- ps.append({
- 'pid': p.find('pid').text,
- 'process_name': p.find('process_name').text,
- 'used_memory': int(p.find('used_memory').text.split()[0]),
+ processes_info = self.root.find('processes').findall('process_info')
+ if not processes_info:
+ return list()
+
+ passwd_file = read_passwd_file_safe()
+ processes = list()
+
+ for info in processes_info:
+ pid = info.find('pid').text
+ processes.append({
+ 'pid': int(pid),
+ 'process_name': info.find('process_name').text,
+ 'used_memory': int(info.find('used_memory').text.split()[0]),
+ 'username': get_username_by_pid_safe(pid, passwd_file),
})
- return ps
+ return processes
def data(self):
data = {
@@ -330,11 +407,21 @@ class GPU:
'power_draw': self.power_draw(),
}
processes = self.processes() or []
- data.update({'process_mem_{0}'.format(p['pid']): p['used_memory'] for p in processes})
-
- return dict(
- ('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items() if v is not None and v != BAD_VALUE
- )
+ users = set()
+ for p in processes:
+ data['process_mem_{0}'.format(p['pid'])] = p['used_memory']
+ if p['username']:
+ if self.exclude_zero_memory_users and p['used_memory'] == 0:
+ continue
+ users.add(p['username'])
+ key = 'user_mem_{0}'.format(p['username'])
+ if key in data:
+ data[key] += p['used_memory']
+ else:
+ data[key] = p['used_memory']
+ data['user_num'] = len(users)
+
+ return dict(('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items())
class Service(SimpleService):
@@ -342,10 +429,12 @@ class Service(SimpleService):
super(Service, self).__init__(configuration=configuration, name=name)
self.order = list()
self.definitions = dict()
+ self.loop_mode = configuration.get('loop_mode', True)
poll = int(configuration.get('poll_seconds', 1))
+ self.exclude_zero_memory_users = configuration.get('exclude_zero_memory_users', False)
self.poller = NvidiaSMIPoller(poll)
- def get_data(self):
+ def get_data_loop_mode(self):
if not self.poller.is_started():
self.poller.start()
@@ -353,7 +442,17 @@ class Service(SimpleService):
self.debug('poller is off')
return None
- last_data = self.poller.data()
+ return self.poller.data()
+
+ def get_data_normal_mode(self):
+ return self.poller.run_once()
+
+ def get_data(self):
+ if self.loop_mode:
+ last_data = self.get_data_loop_mode()
+ else:
+ last_data = self.get_data_normal_mode()
+
if not last_data:
return None
@@ -363,9 +462,13 @@ class Service(SimpleService):
data = dict()
for idx, root in enumerate(parsed.findall('gpu')):
- gpu = GPU(idx, root)
- data.update(gpu.data())
+ gpu = GPU(idx, root, self.exclude_zero_memory_users)
+ gpu_data = gpu.data()
+ # self.debug(gpu_data)
+ gpu_data = dict((k, v) for k, v in gpu_data.items() if is_gpu_data_value_valid(v))
+ data.update(gpu_data)
self.update_processes_mem_chart(gpu)
+ self.update_processes_user_mem_chart(gpu)
return data or None
@@ -384,6 +487,24 @@ class Service(SimpleService):
if dim.id not in active_dim_ids:
chart.del_dimension(dim.id, hide=False)
+ def update_processes_user_mem_chart(self, gpu):
+ ps = gpu.processes()
+ if not ps:
+ return
+ chart = self.charts['gpu{0}_{1}'.format(gpu.num, USER_MEM)]
+ active_dim_ids = []
+ for p in ps:
+ if not p.get('username'):
+ continue
+ dim_id = 'gpu{0}_user_mem_{1}'.format(gpu.num, p['username'])
+ active_dim_ids.append(dim_id)
+ if dim_id not in chart:
+ chart.add_dimension([dim_id, '{0}'.format(p['username'])])
+
+ for dim in chart:
+ if dim.id not in active_dim_ids:
+ chart.del_dimension(dim.id, hide=False)
+
def check(self):
if not self.poller.has_smi():
self.error("couldn't find '{0}' binary".format(NVIDIA_SMI))
@@ -419,3 +540,11 @@ class Service(SimpleService):
order, charts = gpu_charts(GPU(idx, root))
self.order.extend(order)
self.definitions.update(charts)
+
+
+def is_gpu_data_value_valid(value):
+ try:
+ int(value)
+ except (TypeError, ValueError):
+ return False
+ return True
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
index 53e544a5d..3d2a30d41 100644
--- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
@@ -61,6 +61,8 @@
#
# Additionally to the above, example also supports the following:
#
-# poll_seconds: SECONDS # default is 1. Sets the frequency of seconds the nvidia-smi tool is polled.
+# loop_mode: yes/no # default is yes. If set to yes `nvidia-smi` is executed in a separate thread using `-l` option.
+# poll_seconds: SECONDS # default is 1. Sets the frequency of seconds the nvidia-smi tool is polled in loop mode.
+# exclude_zero_memory_users: yes/no # default is no. Whether to collect users metrics with 0Mb memory allocation.
#
# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/openldap/README.md b/collectors/python.d.plugin/openldap/README.md
index f1f9de581..4942d0f39 100644
--- a/collectors/python.d.plugin/openldap/README.md
+++ b/collectors/python.d.plugin/openldap/README.md
@@ -1,6 +1,12 @@
-# openldap
+<!--
+title: "OpenLDAP monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/openldap/README.md
+sidebar_label: "OpenLDAP"
+-->
-This module provides statistics information from openldap (slapd) server.
+# OpenLDAP monitoring with Netdata
+
+Provides statistics information from openldap (slapd) server.
Statistics are taken from LDAP monitoring interface. Manual page, slapd-monitor(5) is available.
**Requirement:**
@@ -47,7 +53,15 @@ Statistics are taken from LDAP monitoring interface. Manual page, slapd-monitor(
- read
- write
-### configuration
+## Configuration
+
+Edit the `python.d/openldap.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/openldap.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/openldap/openldap.chart.py b/collectors/python.d.plugin/openldap/openldap.chart.py
index 3266ce400..aba143954 100644
--- a/collectors/python.d.plugin/openldap/openldap.chart.py
+++ b/collectors/python.d.plugin/openldap/openldap.chart.py
@@ -5,18 +5,19 @@
try:
import ldap
+
HAS_LDAP = True
except ImportError:
HAS_LDAP = False
from bases.FrameworkServices.SimpleService import SimpleService
-
DEFAULT_SERVER = 'localhost'
DEFAULT_PORT = '389'
DEFAULT_TLS = False
DEFAULT_CERT_CHECK = True
DEFAULT_TIMEOUT = 1
+DEFAULT_START_TLS = False
ORDER = [
'total_connections',
@@ -49,7 +50,7 @@ CHARTS = {
]
},
'referrals_sent': {
- 'options': [None, 'Referrals', 'referals/s', 'ldap', 'openldap.referrals', 'line'],
+ 'options': [None, 'Referrals', 'referrals/s', 'ldap', 'openldap.referrals', 'line'],
'lines': [
['referrals_sent', 'sent', 'incremental']
]
@@ -110,7 +111,7 @@ SEARCH_LIST = {
'add_operations': (
'cn=Add,cn=Operations,cn=Monitor', 'monitorOpInitiated',
),
- 'delete_operations': (
+ 'delete_operations': (
'cn=Delete,cn=Operations,cn=Monitor', 'monitorOpCompleted',
),
'modify_operations': (
@@ -143,6 +144,7 @@ class Service(SimpleService):
self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT)
self.use_tls = configuration.get('use_tls', DEFAULT_TLS)
self.cert_check = configuration.get('cert_check', DEFAULT_CERT_CHECK)
+ self.use_start_tls = configuration.get('use_start_tls', DEFAULT_START_TLS)
self.alive = False
self.conn = None
@@ -159,8 +161,13 @@ class Service(SimpleService):
else:
self.conn = ldap.initialize('ldap://%s:%s' % (self.server, self.port))
self.conn.set_option(ldap.OPT_NETWORK_TIMEOUT, self.timeout)
- if self.use_tls and not self.cert_check:
+ if (self.use_tls or self.use_start_tls) and not self.cert_check:
self.conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
+ if self.use_start_tls or self.use_tls:
+ self.conn.set_option(ldap.OPT_X_TLS_NEWCTX, 0)
+ if self.use_start_tls:
+ self.conn.protocol_version = ldap.VERSION3
+ self.conn.start_tls_s()
if self.username and self.password:
self.conn.simple_bind(self.username, self.password)
except ldap.LDAPError as error:
@@ -193,17 +200,17 @@ class Service(SimpleService):
num = self.conn.search(dn, ldap.SCOPE_BASE, 'objectClass=*', [attr, ])
result_type, result_data = self.conn.result(num, 1)
except ldap.LDAPError as error:
- self.error("Empty result. Check bind username/password. Message: ",error)
+ self.error("Empty result. Check bind username/password. Message: ", error)
self.alive = False
return None
+ if result_type != 101:
+ continue
+
try:
- if result_type == 101:
- val = int(result_data[0][1].values()[0][0])
+ data[key] = int(list(result_data[0][1].values())[0][0])
except (ValueError, IndexError) as error:
self.debug(error)
continue
- data[key] = val
-
return data
diff --git a/collectors/python.d.plugin/openldap/openldap.conf b/collectors/python.d.plugin/openldap/openldap.conf
index 73e8636ed..5fd99a525 100644
--- a/collectors/python.d.plugin/openldap/openldap.conf
+++ b/collectors/python.d.plugin/openldap/openldap.conf
@@ -65,10 +65,11 @@ update_every: 10
# Set here your LDAP connection settings
-#username : "cn=admin,dc=example,dc=com" # The bind user with right to access monitor statistics
-#password : "yourpass" # The password for the binded user
-#server : 'localhost' # The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for.
-#port : 389 # The listening port of the LDAP server. Change to 636 port in case of TLS connection
-#use_tls : False # Make True if a TLS connection is used
-#cert_check : True # False if you want to ignore certificate check
-#timeout : 1 # Seconds to timeout if no connection exi
+#username : "cn=admin,dc=example,dc=com" # The bind user with right to access monitor statistics
+#password : "yourpass" # The password for the binded user
+#server : 'localhost' # The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for.
+#port : 389 # The listening port of the LDAP server. Change to 636 port in case of TLS connection
+#use_tls : False # Make True if a TLS connection is used over ldaps://
+#use_start_tls: False # Make True if a TLS connection is used over ldap://
+#cert_check : True # False if you want to ignore certificate check
+#timeout : 1 # Seconds to timeout if no connection exi
diff --git a/collectors/python.d.plugin/oracledb/README.md b/collectors/python.d.plugin/oracledb/README.md
index 708f261d9..d61c7d2ad 100644
--- a/collectors/python.d.plugin/oracledb/README.md
+++ b/collectors/python.d.plugin/oracledb/README.md
@@ -1,8 +1,14 @@
-# oracledb
+<!--
+title: "OracleDB monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/oracledb/README.md
+sidebar_label: "OracleDB"
+-->
-Module monitor oracledb performance and health metrics.
+# OracleDB monitoring with Netdata
-**Requirements:**
+Monitors the performance and health metrics of the Oracle database.
+
+## Requirements
- `cx_Oracle` package.
- Oracle Client (using `cx_Oracle` requires Oracle Client libraries to be installed).
@@ -35,14 +41,19 @@ It produces following charts:
- Size
- Usage
- Usage In Percent
+- allocated space
+ - Size
+ - Usage
+ - Usage In Percent
## prerequisite
To use the Oracle module do the following:
-1. Install `cx_Oracle` package ([link](https://cx-oracle.readthedocs.io/en/latest/installation.html#install-cx-oracle)).
+1. Install `cx_Oracle` package ([link](https://cx-oracle.readthedocs.io/en/latest/user_guide/installation.html)).
-2. Install Oracle Client libraries ([link](https://cx-oracle.readthedocs.io/en/latest/installation.html#install-oracle-client)).
+2. Install Oracle Client libraries
+ ([link](https://cx-oracle.readthedocs.io/en/latest/user_guide/installation.html#install-oracle-client)).
3. Create a read-only `netdata` user with proper access to your Oracle Database Server.
@@ -57,7 +68,15 @@ GRANT CONNECT TO netdata;
GRANT SELECT_CATALOG_ROLE TO netdata;
```
-### configuration
+## Configuration
+
+Edit the `python.d/oracledb.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/oracledb.conf
+```
```yaml
local:
diff --git a/collectors/python.d.plugin/oracledb/oracledb.chart.py b/collectors/python.d.plugin/oracledb/oracledb.chart.py
index 9490b6218..28ef8db10 100644
--- a/collectors/python.d.plugin/oracledb/oracledb.chart.py
+++ b/collectors/python.d.plugin/oracledb/oracledb.chart.py
@@ -9,11 +9,11 @@ from bases.FrameworkServices.SimpleService import SimpleService
try:
import cx_Oracle
+
HAS_ORACLE = True
except ImportError:
HAS_ORACLE = False
-
ORDER = [
'session_count',
'session_limit_usage',
@@ -34,6 +34,9 @@ ORDER = [
'tablespace_size',
'tablespace_usage',
'tablespace_usage_in_percent',
+ 'allocated_size',
+ 'allocated_usage',
+ 'allocated_usage_in_percent',
]
CHARTS = {
@@ -170,9 +173,20 @@ CHARTS = {
'options': [None, 'Usage', '%', 'tablespace', 'oracledb.tablespace_usage_in_percent', 'line'],
'lines': [],
},
+ 'allocated_size': {
+ 'options': [None, 'Size', 'B', 'tablespace', 'oracledb.allocated_size', 'line'],
+ 'lines': [],
+ },
+ 'allocated_usage': {
+ 'options': [None, 'Usage', 'B', 'tablespace', 'oracledb.allocated_usage', 'line'],
+ 'lines': [],
+ },
+ 'allocated_usage_in_percent': {
+ 'options': [None, 'Usage', '%', 'tablespace', 'oracledb.allocated_usage_in_percent', 'line'],
+ 'lines': [],
+ },
}
-
CX_CONNECT_STRING = "{0}/{1}@//{2}/{3}"
QUERY_SYSTEM = '''
@@ -194,6 +208,27 @@ FROM
dba_tablespace_usage_metrics m
JOIN dba_tablespaces t ON m.tablespace_name = t.tablespace_name
'''
+QUERY_ALLOCATED = '''
+SELECT
+ nvl(b.tablespace_name,nvl(a.tablespace_name,'UNKNOWN')) tablespace_name,
+ bytes_alloc used_bytes,
+ bytes_alloc-nvl(bytes_free,0) max_bytes,
+ ((bytes_alloc-nvl(bytes_free,0))/ bytes_alloc)*100 used_percent
+FROM
+ (SELECT
+ sum(bytes) bytes_free,
+ tablespace_name
+ FROM sys.dba_free_space
+ GROUP BY tablespace_name
+ ) a,
+ (SELECT
+ sum(bytes) bytes_alloc,
+ tablespace_name
+ FROM sys.dba_data_files
+ GROUP BY tablespace_name
+ ) b
+WHERE a.tablespace_name (+) = b.tablespace_name
+'''
QUERY_ACTIVITIES_COUNT = '''
SELECT
name,
@@ -398,6 +433,26 @@ class Service(SimpleService):
data['{0}_tablespace_used'.format(name)] = int(used * 1000)
data['{0}_tablespace_used_in_percent'.format(name)] = int(used_in_percent * 1000)
+ # ALLOCATED SPACE
+ try:
+ rv = self.gather_allocated_metrics()
+ except cx_Oracle.Error as error:
+ self.error(error)
+ self.alive = False
+ return None
+ else:
+ for name, offline, size, used, used_in_percent in rv:
+ # TODO: skip offline?
+ if not (not offline and self.charts):
+ continue
+ # TODO: remove inactive?
+ if name not in self.active_tablespaces:
+ self.active_tablespaces.add(name)
+ self.add_tablespace_to_charts(name)
+ data['{0}_allocated_size'.format(name)] = int(size * 1000)
+ data['{0}_allocated_used'.format(name)] = int(used * 1000)
+ data['{0}_allocated_used_in_percent'.format(name)] = int(used_in_percent * 1000)
+
return data or None
def gather_system_metrics(self):
@@ -613,6 +668,44 @@ class Service(SimpleService):
)
return metrics
+ def gather_allocated_metrics(self):
+ """
+ :return:
+
+ [['SYSTEM', 874250240.0, 3233169408.0, 27.040038107400033, 0],
+ ['SYSAUX', 498860032.0, 3233169408.0, 15.429443033997678, 0],
+ ['TEMP', 0.0, 3233177600.0, 0.0, 0],
+ ['USERS', 1048576.0, 3233169408.0, 0.03243182981397305, 0]]
+ """
+ metrics = list()
+ with self.conn.cursor() as cursor:
+ cursor.execute(QUERY_ALLOCATED)
+ for tablespace_name, used_bytes, max_bytes, used_percent in cursor.fetchall():
+ if used_bytes is None:
+ offline = True
+ used = 0
+ else:
+ offline = False
+ used = float(used_bytes)
+ if max_bytes is None:
+ size = 0
+ else:
+ size = float(max_bytes)
+ if used_percent is None:
+ used_percent = 0
+ else:
+ used_percent = float(used_percent)
+ metrics.append(
+ [
+ tablespace_name,
+ offline,
+ size,
+ used,
+ used_percent,
+ ]
+ )
+ return metrics
+
def gather_wait_time_metrics(self):
"""
:return:
@@ -712,3 +805,27 @@ class Service(SimpleService):
1,
1000,
])
+ self.charts['allocated_size'].add_dimension(
+ [
+ '{0}_allocated_size'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
+ self.charts['allocated_usage'].add_dimension(
+ [
+ '{0}_allocated_used'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
+ self.charts['allocated_usage_in_percent'].add_dimension(
+ [
+ '{0}_allocated_used_in_percent'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
diff --git a/collectors/python.d.plugin/ovpn_status_log/README.md b/collectors/python.d.plugin/ovpn_status_log/README.md
index 053e3f0de..8fa8cb833 100644
--- a/collectors/python.d.plugin/ovpn_status_log/README.md
+++ b/collectors/python.d.plugin/ovpn_status_log/README.md
@@ -1,8 +1,14 @@
-# ovpn_status_log
+<!--
+title: "OpenVPN monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ovpn_status_log/README.md
+sidebar_label: "OpenVPN"
+-->
-Module monitor openvpn-status log file.
+# OpenVPN monitoring with Netdata
-**Requirements:**
+Parses server log files and provides summary (client, traffic) metrics.
+
+## Requirements
- If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
so that multiple instances do not overwrite each other's output files.
@@ -22,7 +28,15 @@ It produces:
- in
- out
-## configuration
+## Configuration
+
+Edit the `python.d/ovpn_status_log.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/ovpn_status_log.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
index f094ab7c1..cfc87be36 100644
--- a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
@@ -7,7 +7,6 @@ import re
from bases.FrameworkServices.SimpleService import SimpleService
-
update_every = 10
ORDER = [
@@ -72,7 +71,7 @@ class Service(SimpleService):
break
if found:
return True
- self.error('Failed to parse ovpenvpn log file')
+ self.error('Failed to parse openvpn log file')
return False
def _get_raw_data(self):
diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md
index 5f8284330..9d0dbb580 100644
--- a/collectors/python.d.plugin/phpfpm/README.md
+++ b/collectors/python.d.plugin/phpfpm/README.md
@@ -1,41 +1,47 @@
-# phpfpm
+<!--
+title: "PHP-FPM monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/phpfpm/README.md
+sidebar_label: "PHP-FPM"
+-->
-This module will monitor one or more php-fpm instances depending on configuration.
+# PHP-FPM monitoring with Netdata
-**Requirements:**
+Monitors one or more PHP-FPM instances depending on configuration.
-- php-fpm with enabled `status` page
-- access to `status` page via web server
-
-It produces following charts:
+## Requirements
-1. **Active Connections**
-
- - active
- - maxActive
- - idle
+- `PHP-FPM` with [enabled `status` page](https://easyengine.io/tutorials/php/fpm-status-page/)
+- access to `status` page via web server
-2. **Requests** in requests/s
+## Charts
- - requests
+It produces following charts:
-3. **Performance**
+- Active Connections in `connections`
+- Requests in `requests/s`
+- Performance in `status`
+- Requests Duration Among All Idle Processes in `milliseconds`
+- Last Request CPU Usage Among All Idle Processes in `percentage`
+- Last Request Memory Usage Among All Idle Processes in `KB`
- - reached
- - slow
+## Configuration
-## configuration
+Edit the `python.d/phpfpm.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-Needs only `url` to server's `status`
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/phpfpm.conf
+```
-Here is an example for local instance:
+Needs only `url` to server's `status`. Here is an example for local and remote instances:
```yaml
-update_every : 3
-priority : 90100
-
local:
- url : 'http://localhost/status'
+ url : 'http://localhost/status?full&json'
+
+remote:
+ url : 'http://203.0.113.10/status?full&json'
```
Without configuration, module attempts to connect to `http://localhost/status`
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
index d0e9960e0..226df99c6 100644
--- a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
@@ -9,7 +9,6 @@ import re
from bases.FrameworkServices.UrlService import UrlService
-
REGEX = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
POOL_INFO = [
@@ -71,7 +70,8 @@ CHARTS = {
]
},
'request_duration': {
- 'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'request duration', 'phpfpm.request_duration',
+ 'options': [None, 'PHP-FPM Requests Duration Among All Idle Processes', 'milliseconds', 'request duration',
+ 'phpfpm.request_duration',
'line'],
'lines': [
['minReqDur', 'min', 'absolute', 1, 1000],
@@ -80,7 +80,8 @@ CHARTS = {
]
},
'request_cpu': {
- 'options': [None, 'PHP-FPM Request CPU', 'percentage', 'request CPU', 'phpfpm.request_cpu', 'line'],
+ 'options': [None, 'PHP-FPM Last Request CPU Usage Among All Idle Processes', 'percentage', 'request CPU',
+ 'phpfpm.request_cpu', 'line'],
'lines': [
['minReqCpu', 'min'],
['maxReqCpu', 'max'],
@@ -88,7 +89,8 @@ CHARTS = {
]
},
'request_mem': {
- 'options': [None, 'PHP-FPM Request Memory', 'KB', 'request memory', 'phpfpm.request_mem', 'line'],
+ 'options': [None, 'PHP-FPM Last Request Memory Usage Among All Idle Processes', 'KB', 'request memory',
+ 'phpfpm.request_mem', 'line'],
'lines': [
['minReqMem', 'min', 'absolute', 1, 1024],
['maxReqMem', 'max', 'absolute', 1, 1024],
diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md
index 2bbea10c7..35521b2ad 100644
--- a/collectors/python.d.plugin/portcheck/README.md
+++ b/collectors/python.d.plugin/portcheck/README.md
@@ -1,6 +1,12 @@
-# portcheck
+<!--
+title: "TCP endpoint monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/portcheck/README.md
+sidebar_label: "TCP endpoints"
+-->
-Module monitors a remote TCP service.
+# TCP endpoint monitoring with Netdata
+
+Monitors TCP endpoint availability and response time.
Following charts are drawn per host:
@@ -16,7 +22,15 @@ Following charts are drawn per host:
- Connection refused: port not listening or blocked
- Connection timed out: host or port unreachable
-## configuration
+## Configuration
+
+Edit the `python.d/portcheck.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/portcheck.conf
+```
```yaml
server:
diff --git a/collectors/python.d.plugin/portcheck/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py
index 8479e38e4..818ac765d 100644
--- a/collectors/python.d.plugin/portcheck/portcheck.chart.py
+++ b/collectors/python.d.plugin/portcheck/portcheck.chart.py
@@ -12,7 +12,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-
PORT_LATENCY = 'connect'
PORT_SUCCESS = 'success'
diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md
index 5d2822c1d..53073ea8d 100644
--- a/collectors/python.d.plugin/postfix/README.md
+++ b/collectors/python.d.plugin/postfix/README.md
@@ -1,6 +1,14 @@
-# postfix
+<!--
+title: "Postfix monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/postfix/README.md
+sidebar_label: "Postfix"
+-->
-Simple module executing `postfix -p` to grab postfix queue.
+# Postfix monitoring with Netdata
+
+Monitors MTA email queue statistics using postqueue tool.
+
+Execute `postqueue -p` to grab postfix queue.
It produces only two charts:
diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md
index 29dd85a5c..dc9b18467 100644
--- a/collectors/python.d.plugin/postgres/README.md
+++ b/collectors/python.d.plugin/postgres/README.md
@@ -1,10 +1,16 @@
-# postgres
+<!--
+title: "PostgreSQL monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/postgres/README.md
+sidebar_label: "PostgreSQL"
+-->
-Module monitors one or more postgres servers.
+# PostgreSQL monitoring with Netdata
-**Requirements:**
+Collects database health and performance metrics.
-- `python-psycopg2` package. You have to install it manually.
+## Requirements
+
+- `python-psycopg2` package. You have to install it manually and make sure that it is available to the `netdata` user, either using `pip`, the package manager of your Linux distribution, or any other method you prefer.
Following charts are drawn:
@@ -16,50 +22,63 @@ Following charts are drawn:
- active
-3. **Write-Ahead Logging Statistics** files/s
+3. **Current Backend Process Usage** percentage
+
+ - used
+ - available
+
+4. **Write-Ahead Logging Statistics** files/s
- total
- ready
- done
-4. **Checkpoints** writes/s
+5. **Checkpoints** writes/s
- scheduled
- requested
-5. **Current connections to db** count
+6. **Current connections to db** count
- connections
-6. **Tuples returned from db** tuples/s
+7. **Tuples returned from db** tuples/s
- sequential
- bitmap
-7. **Tuple reads from db** reads/s
+8. **Tuple reads from db** reads/s
- disk
- cache
-8. **Transactions on db** transactions/s
+9. **Transactions on db** transactions/s
- committed
- rolled back
-9. **Tuples written to db** writes/s
+10. **Tuples written to db** writes/s
- inserted
- updated
- deleted
- conflicts
-10. **Locks on db** count per type
+11. **Locks on db** count per type
- locks
-## configuration
+## Configuration
-For all available options please see module [configuration file](postgres.conf).
+Edit the `python.d/postgres.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/postgres.conf
+```
+
+When no configuration file is found, the module tries to connect to TCP/IP socket: `localhost:5432`.
```yaml
socket:
@@ -75,8 +94,6 @@ tcp:
port : 5432
```
-When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`.
-
---
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostgres%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/postgres/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py
index 9e3020358..bd28dd9b7 100644
--- a/collectors/python.d.plugin/postgres/postgres.chart.py
+++ b/collectors/python.d.plugin/postgres/postgres.chart.py
@@ -39,6 +39,7 @@ CONN_PARAM_SSL_KEY = 'sslkey'
QUERY_NAME_WAL = 'WAL'
QUERY_NAME_ARCHIVE = 'ARCHIVE'
QUERY_NAME_BACKENDS = 'BACKENDS'
+QUERY_NAME_BACKEND_USAGE = 'BACKEND_USAGE'
QUERY_NAME_TABLE_STATS = 'TABLE_STATS'
QUERY_NAME_INDEX_STATS = 'INDEX_STATS'
QUERY_NAME_DATABASE = 'DATABASE'
@@ -76,6 +77,10 @@ METRICS = {
'backends_active',
'backends_idle'
],
+ QUERY_NAME_BACKEND_USAGE: [
+ 'available',
+ 'used'
+ ],
QUERY_NAME_INDEX_STATS: [
'index_count',
'index_size'
@@ -139,6 +144,10 @@ METRICS = {
NO_VERSION = 0
DEFAULT = 'DEFAULT'
+V72 = 'V72'
+V82 = 'V82'
+V91 = 'V91'
+V92 = 'V92'
V96 = 'V96'
V10 = 'V10'
V11 = 'V11'
@@ -235,6 +244,76 @@ FROM pg_stat_activity;
""",
}
+QUERY_BACKEND_USAGE = {
+ DEFAULT: """
+SELECT
+ COUNT(1) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - COUNT(1) AS available
+FROM pg_catalog.pg_stat_activity
+WHERE backend_type IN ('client backend', 'background worker');
+""",
+ V10: """
+SELECT
+ SUM(s.conn) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - SUM(s.conn) AS available
+FROM (
+ SELECT 's' as type, COUNT(1) as conn
+ FROM pg_catalog.pg_stat_activity
+ WHERE backend_type IN ('client backend', 'background worker')
+ UNION ALL
+ SELECT 'r', COUNT(1)
+ FROM pg_catalog.pg_stat_replication
+) as s;
+""",
+ V92: """
+SELECT
+ SUM(s.conn) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - SUM(s.conn) AS available
+FROM (
+ SELECT 's' as type, COUNT(1) as conn
+ FROM pg_catalog.pg_stat_activity
+ WHERE query NOT LIKE 'autovacuum: %%'
+ UNION ALL
+ SELECT 'r', COUNT(1)
+ FROM pg_catalog.pg_stat_replication
+) as s;
+""",
+ V91: """
+SELECT
+ SUM(s.conn) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - SUM(s.conn) AS available
+FROM (
+ SELECT 's' as type, COUNT(1) as conn
+ FROM pg_catalog.pg_stat_activity
+ WHERE current_query NOT LIKE 'autovacuum: %%'
+ UNION ALL
+ SELECT 'r', COUNT(1)
+ FROM pg_catalog.pg_stat_replication
+) as s;
+""",
+ V82: """
+SELECT
+ COUNT(1) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - COUNT(1) AS available
+FROM pg_catalog.pg_stat_activity
+WHERE current_query NOT LIKE 'autovacuum: %%';
+""",
+ V72: """
+SELECT
+ COUNT(1) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - COUNT(1) AS available
+FROM pg_catalog.pg_stat_activity s
+JOIN pg_catalog.pg_database d ON d.oid = s.datid
+WHERE d.datallowconn;
+""",
+}
+
QUERY_TABLE_STATS = {
DEFAULT: """
SELECT
@@ -315,7 +394,7 @@ FROM pg_stat_database
WHERE
has_database_privilege(
(SELECT current_user), datname, 'connect')
- AND NOT datname ~* '^template\d ';
+ AND NOT datname ~* '^template\d';
""",
}
@@ -528,10 +607,21 @@ SELECT
""",
}
-
def query_factory(name, version=NO_VERSION):
if name == QUERY_NAME_BACKENDS:
return QUERY_BACKEND[DEFAULT]
+ elif name == QUERY_NAME_BACKEND_USAGE:
+ if version < 80200:
+ return QUERY_BACKEND_USAGE[V72]
+ if version < 90100:
+ return QUERY_BACKEND_USAGE[V82]
+ if version < 90200:
+ return QUERY_BACKEND_USAGE[V91]
+ if version < 100000:
+ return QUERY_BACKEND_USAGE[V92]
+ elif version < 120000:
+ return QUERY_BACKEND_USAGE[V10]
+ return QUERY_BACKEND_USAGE[DEFAULT]
elif name == QUERY_NAME_TABLE_STATS:
return QUERY_TABLE_STATS[DEFAULT]
elif name == QUERY_NAME_INDEX_STATS:
@@ -588,6 +678,7 @@ ORDER = [
'db_stat_connections',
'database_size',
'backend_process',
+ 'backend_usage',
'index_count',
'index_size',
'table_count',
@@ -674,6 +765,13 @@ CHARTS = {
['backends_idle', 'idle', 'absolute']
]
},
+ 'backend_usage': {
+ 'options': [None, '% of Connections in use', 'percentage', 'backend processes', 'postgres.backend_usage', 'stacked'],
+ 'lines': [
+ ['available', 'available', 'percentage-of-absolute-row'],
+ ['used', 'used', 'percentage-of-absolute-row']
+ ]
+ },
'index_count': {
'options': [None, 'Total indexes', 'index', 'indexes', 'postgres.index_count', 'line'],
'lines': [
@@ -970,6 +1068,7 @@ class Service(SimpleService):
def populate_queries(self):
self.queries[query_factory(QUERY_NAME_DATABASE)] = METRICS[QUERY_NAME_DATABASE]
self.queries[query_factory(QUERY_NAME_BACKENDS)] = METRICS[QUERY_NAME_BACKENDS]
+ self.queries[query_factory(QUERY_NAME_BACKEND_USAGE, self.server_version)] = METRICS[QUERY_NAME_BACKEND_USAGE]
self.queries[query_factory(QUERY_NAME_LOCKS)] = METRICS[QUERY_NAME_LOCKS]
self.queries[query_factory(QUERY_NAME_BGWRITER)] = METRICS[QUERY_NAME_BGWRITER]
self.queries[query_factory(QUERY_NAME_DIFF_LSN, self.server_version)] = METRICS[QUERY_NAME_WAL_WRITES]
@@ -1063,7 +1162,7 @@ def zero_lock_types(databases):
def hide_password(config):
- return dict((k, v if k != 'password' else '*****') for k, v in config.items())
+ return dict((k, v if k != 'password' or not v else '*****') for k, v in config.items())
def add_database_lock_chart(order, definitions, database_name):
diff --git a/collectors/python.d.plugin/postgres/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf
index 3dd461408..1970a7a27 100644
--- a/collectors/python.d.plugin/postgres/postgres.conf
+++ b/collectors/python.d.plugin/postgres/postgres.conf
@@ -81,7 +81,7 @@
# sslkey : path/to/key # the location of the client key file
#
# SSL connection parameters description: https://www.postgresql.org/docs/current/libpq-ssl.html
-#
+#
# Additionally, the following options allow selective disabling of charts
#
# table_stats : false
@@ -93,6 +93,10 @@
# a postgres user for netdata and add its password below to allow
# netdata connect.
#
+# Please note that when running Postgres from inside the container,
+# the client (Netdata) is not considered local, unless it runs from inside
+# the same container.
+#
# Postgres supported versions are :
# - 9.3 (without autovacuum)
# - 9.4
@@ -116,6 +120,7 @@ tcp:
name : 'local'
database : 'postgres'
user : 'postgres'
+ password : 'postgres'
host : 'localhost'
port : 5432
@@ -123,6 +128,7 @@ tcpipv4:
name : 'local'
database : 'postgres'
user : 'postgres'
+ password : 'postgres'
host : '127.0.0.1'
port : 5432
@@ -130,5 +136,6 @@ tcpipv6:
name : 'local'
database : 'postgres'
user : 'postgres'
+ password : 'postgres'
host : '::1'
port : 5432
diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md
index ac6e12f3a..610a665de 100644
--- a/collectors/python.d.plugin/powerdns/README.md
+++ b/collectors/python.d.plugin/powerdns/README.md
@@ -1,6 +1,12 @@
-# powerdns
+<!--
+title: "PowerDNS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/powerdns/README.md
+sidebar_label: "PowerDNS"
+-->
-Module monitor powerdns performance and health metrics.
+# PowerDNS monitoring with Netdata
+
+Monitors authoritative server and recursor statistics.
Powerdns charts:
@@ -75,7 +81,15 @@ Powerdns charts:
- packetcache-entries
- negcache-entries
-## configuration
+## Configuration
+
+Edit the `python.d/powerdns.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/powerdns.conf
+```
```yaml
local:
diff --git a/collectors/python.d.plugin/powerdns/powerdns.chart.py b/collectors/python.d.plugin/powerdns/powerdns.chart.py
index bcf9f0d2d..b951e0c1a 100644
--- a/collectors/python.d.plugin/powerdns/powerdns.chart.py
+++ b/collectors/python.d.plugin/powerdns/powerdns.chart.py
@@ -8,7 +8,6 @@ from json import loads
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'questions',
'cache_usage',
diff --git a/collectors/python.d.plugin/proxysql/README.md b/collectors/python.d.plugin/proxysql/README.md
index 23a67751a..f1b369a44 100644
--- a/collectors/python.d.plugin/proxysql/README.md
+++ b/collectors/python.d.plugin/proxysql/README.md
@@ -1,6 +1,21 @@
-# proxysql
+<!--
+title: "ProxySQL monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/proxysql/README.md
+sidebar_label: "ProxySQL"
+-->
-This module monitors proxysql backend and frontend performance metrics.
+# ProxySQL monitoring with Netdata
+
+Monitors database backend and frontend performance metrics.
+
+## Requirements
+
+- python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
+- `netdata` local user to connect to the ProxySQL server.
+
+To create the `netdata` user, follow [the documentation](https://github.com/sysown/proxysql/wiki/Users-configuration#creating-a-new-user).
+
+## Charts
It produces:
@@ -16,7 +31,7 @@ It produces:
- questions: total number of queries sent from frontends
- slow_queries: number of queries that ran for longer than the threshold in milliseconds defined in global variable `mysql-long_query_time`
-3. **Overall Bandwith (backends)**
+3. **Overall Bandwidth (backends)**
- in
- out
@@ -30,7 +45,7 @@ It produces:
- `4=OFFLINE_HARD`: when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren't accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work
- `-1`: Unknown status
-5. **Bandwith (backends)**
+5. **Bandwidth (backends)**
- Backends
- in
@@ -65,7 +80,15 @@ It produces:
- Commands
- 100us, 500us, ..., 10s, inf: the total number of commands of the given type which executed within the specified time limit and the previous one.
-## configuration
+## Configuration
+
+Edit the `python.d/proxysql.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/proxysql.conf
+```
```yaml
tcpipv4:
diff --git a/collectors/python.d.plugin/proxysql/proxysql.chart.py b/collectors/python.d.plugin/proxysql/proxysql.chart.py
index c97147486..982c28ee7 100644
--- a/collectors/python.d.plugin/proxysql/proxysql.chart.py
+++ b/collectors/python.d.plugin/proxysql/proxysql.chart.py
@@ -189,7 +189,8 @@ CHARTS = {
'lines': []
},
'commands_duration': {
- 'options': [None, 'ProxySQL Commands Duration', 'milliseconds', 'commands', 'proxysql.commands_duration', 'line'],
+ 'options': [None, 'ProxySQL Commands Duration', 'milliseconds', 'commands', 'proxysql.commands_duration',
+ 'line'],
'lines': []
}
}
@@ -289,7 +290,7 @@ class Service(MySQLService):
@staticmethod
def histogram_chart(cmd):
return [
- 'commands_historgram_' + cmd['name'],
+ 'commands_histogram_' + cmd['name'],
None,
'ProxySQL {0} Command Histogram'.format(cmd['name'].title()),
'commands',
diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md
index 295db0140..9b7c0a2c3 100644
--- a/collectors/python.d.plugin/puppet/README.md
+++ b/collectors/python.d.plugin/puppet/README.md
@@ -1,4 +1,10 @@
-# puppet
+<!--
+title: "Puppet monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/puppet/README.md
+sidebar_label: "Puppet"
+-->
+
+# Puppet monitoring with Netdata
Monitor status of Puppet Server and Puppet DB.
@@ -24,7 +30,15 @@ Following charts are drawn:
- max
- used
-## configuration
+## Configuration
+
+Edit the `python.d/puppet.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/puppet.conf
+```
```yaml
puppetdb:
diff --git a/collectors/python.d.plugin/puppet/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py
index 30e219da4..f8adf6006 100644
--- a/collectors/python.d.plugin/puppet/puppet.chart.py
+++ b/collectors/python.d.plugin/puppet/puppet.chart.py
@@ -12,14 +12,12 @@
#
import socket
-
from json import loads
from bases.FrameworkServices.UrlService import UrlService
update_every = 5
-
MiB = 1 << 20
CPU_SCALE = 1000
@@ -83,7 +81,7 @@ class Service(UrlService):
# NOTE: there are several ways to retrieve data
# 1. Only PE versions:
# https://puppet.com/docs/pe/2018.1/api_status/status_api_metrics_endpoints.html
- # 2. Inidividual Metrics API (JMX):
+ # 2. Individual Metrics API (JMX):
# https://puppet.com/docs/pe/2018.1/api_status/metrics_api.html
# 3. Extended status at debug level:
# https://puppet.com/docs/pe/2018.1/api_status/status_api_json_endpoints.html
@@ -108,8 +106,8 @@ class Service(UrlService):
non_heap_mem = jvm_metrics['non-heap-memory']
for k in ['max', 'committed', 'used', 'init']:
- data['jvm_heap_'+k] = heap_mem[k]
- data['jvm_nonheap_'+k] = non_heap_mem[k]
+ data['jvm_heap_' + k] = heap_mem[k]
+ data['jvm_nonheap_' + k] = non_heap_mem[k]
fd_open = jvm_metrics['file-descriptors']
data['fd_max'] = fd_open['max']
diff --git a/collectors/python.d.plugin/python.d.conf b/collectors/python.d.plugin/python.d.conf
index 08d59c4d3..61cfd6093 100644
--- a/collectors/python.d.plugin/python.d.conf
+++ b/collectors/python.d.plugin/python.d.conf
@@ -29,7 +29,9 @@ gc_interval: 300
# apache_cache has been replaced by web_log
# adaptec_raid: yes
+# alarms: yes
# am2320: yes
+# anomalies: no
apache_cache: no
# beanstalk: yes
# bind_rndc: yes
@@ -58,6 +60,7 @@ gunicorn_log: no
# haproxy: yes
# hddtemp: yes
# httpcheck: yes
+hpssa: no
# icecast: yes
# ipfs: yes
# isc_dhcpd: yes
diff --git a/collectors/python.d.plugin/python.d.plugin b/collectors/python.d.plugin/python.d.plugin
deleted file mode 100644
index 7b27acdd5..000000000
--- a/collectors/python.d.plugin/python.d.plugin
+++ /dev/null
@@ -1,784 +0,0 @@
-#!/usr/bin/env bash
-'''':;
-pybinary=$(which python || which python3 || which python2)
-filtered=()
-for arg in "$@"
-do
- case $arg in
- -p*) pybinary=${arg:2}
- shift 1 ;;
- *) filtered+=("$arg") ;;
- esac
-done
-if [ "$pybinary" = "" ]
-then
- echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM"
- exit 1
-fi
-exec "$pybinary" "$0" "${filtered[@]}" # '''
-
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (l2isbad)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import collections
-import copy
-import gc
-import json
-import os
-import pprint
-import re
-import sys
-import time
-import threading
-import types
-
-try:
- from queue import Queue
-except ImportError:
- from Queue import Queue
-
-PY_VERSION = sys.version_info[:2] # (major=3, minor=7, micro=3, releaselevel='final', serial=0)
-
-
-if PY_VERSION > (3, 1):
- from importlib.machinery import SourceFileLoader
-else:
- from imp import load_source as SourceFileLoader
-
-
-ENV_NETDATA_USER_CONFIG_DIR = 'NETDATA_USER_CONFIG_DIR'
-ENV_NETDATA_STOCK_CONFIG_DIR = 'NETDATA_STOCK_CONFIG_DIR'
-ENV_NETDATA_PLUGINS_DIR = 'NETDATA_PLUGINS_DIR'
-ENV_NETDATA_LIB_DIR = 'NETDATA_LIB_DIR'
-ENV_NETDATA_UPDATE_EVERY = 'NETDATA_UPDATE_EVERY'
-
-
-def add_pythond_packages():
- pluginsd = os.getenv(ENV_NETDATA_PLUGINS_DIR, os.path.dirname(__file__))
- pythond = os.path.abspath(pluginsd + '/../python.d')
- packages = os.path.join(pythond, 'python_modules')
- sys.path.append(packages)
-
-
-add_pythond_packages()
-
-
-from bases.collection import safe_print
-from bases.loggers import PythonDLogger
-from bases.loaders import load_config
-
-try:
- from collections import OrderedDict
-except ImportError:
- from third_party.ordereddict import OrderedDict
-
-
-def dirs():
- var_lib = os.getenv(
- ENV_NETDATA_LIB_DIR,
- '/var/lib/netdata',
- )
- plugin_user_config = os.getenv(
- ENV_NETDATA_USER_CONFIG_DIR,
- '/etc/netdata',
- )
- plugin_stock_config = os.getenv(
- ENV_NETDATA_STOCK_CONFIG_DIR,
- '/usr/lib/netdata/conf.d',
- )
- pluginsd = os.getenv(
- ENV_NETDATA_PLUGINS_DIR,
- os.path.dirname(__file__),
- )
- modules_user_config = os.path.join(plugin_user_config, 'python.d')
- modules_stock_config = os.path.join(plugin_stock_config, 'python.d')
- modules = os.path.abspath(pluginsd + '/../python.d')
-
- Dirs = collections.namedtuple(
- 'Dirs',
- [
- 'plugin_user_config',
- 'plugin_stock_config',
- 'modules_user_config',
- 'modules_stock_config',
- 'modules',
- 'var_lib',
- ]
- )
- return Dirs(
- plugin_user_config,
- plugin_stock_config,
- modules_user_config,
- modules_stock_config,
- modules,
- var_lib,
- )
-
-
-DIRS = dirs()
-
-IS_ATTY = sys.stdout.isatty()
-
-MODULE_SUFFIX = '.chart.py'
-
-
-def available_modules():
- obsolete = (
- 'apache_cache', # replaced by web_log
- 'cpuidle', # rewritten in C
- 'cpufreq', # rewritten in C
- 'gunicorn_log', # replaced by web_log
- 'linux_power_supply', # rewritten in C
- 'nginx_log', # replaced by web_log
- 'mdstat', # rewritten in C
- 'sslcheck', # rewritten in Go, memory leak bug https://github.com/netdata/netdata/issues/5624
- 'unbound', # rewritten in Go
- )
-
- files = sorted(os.listdir(DIRS.modules))
- modules = [m[:-len(MODULE_SUFFIX)] for m in files if m.endswith(MODULE_SUFFIX)]
- avail = [m for m in modules if m not in obsolete]
- return tuple(avail)
-
-
-AVAILABLE_MODULES = available_modules()
-
-JOB_BASE_CONF = {
- 'update_every': int(os.getenv(ENV_NETDATA_UPDATE_EVERY, 1)),
- 'priority': 60000,
- 'autodetection_retry': 0,
- 'chart_cleanup': 10,
- 'penalty': True,
- 'name': str(),
-}
-
-PLUGIN_BASE_CONF = {
- 'enabled': True,
- 'default_run': True,
- 'gc_run': True,
- 'gc_interval': 300,
-}
-
-
-def multi_path_find(name, *paths):
- for path in paths:
- abs_name = os.path.join(path, name)
- if os.path.isfile(abs_name):
- return abs_name
- return str()
-
-
-def load_module(name):
- abs_path = os.path.join(DIRS.modules, '{0}{1}'.format(name, MODULE_SUFFIX))
- module = SourceFileLoader(name, abs_path)
- if isinstance(module, types.ModuleType):
- return module
- return module.load_module()
-
-
-class ModuleConfig:
- def __init__(self, name, config=None):
- self.name = name
- self.config = config or OrderedDict()
-
- def load(self, abs_path):
- self.config.update(load_config(abs_path) or dict())
-
- def defaults(self):
- keys = (
- 'update_every',
- 'priority',
- 'autodetection_retry',
- 'chart_cleanup',
- 'penalty',
- )
- return dict((k, self.config[k]) for k in keys if k in self.config)
-
- def create_job(self, job_name, job_config=None):
- job_config = job_config or dict()
-
- config = OrderedDict()
- config.update(job_config)
- config['job_name'] = job_name
- for k, v in self.defaults().items():
- config.setdefault(k, v)
-
- return config
-
- def job_names(self):
- return [v for v in self.config if isinstance(self.config.get(v), dict)]
-
- def single_job(self):
- return [self.create_job(self.name, self.config)]
-
- def multi_job(self):
- return [self.create_job(n, self.config[n]) for n in self.job_names()]
-
- def create_jobs(self):
- return self.multi_job() or self.single_job()
-
-
-class JobsConfigsBuilder:
- def __init__(self, config_dirs):
- self.config_dirs = config_dirs
- self.log = PythonDLogger()
- self.job_defaults = None
- self.module_defaults = None
- self.min_update_every = None
-
- def load_module_config(self, module_name):
- name = '{0}.conf'.format(module_name)
- self.log.debug("[{0}] looking for '{1}' in {2}".format(module_name, name, self.config_dirs))
- config = ModuleConfig(module_name)
-
- abs_path = multi_path_find(name, *self.config_dirs)
- if not abs_path:
- self.log.warning("[{0}] '{1}' was not found".format(module_name, name))
- return config
-
- self.log.debug("[{0}] loading '{1}'".format(module_name, abs_path))
- try:
- config.load(abs_path)
- except Exception as error:
- self.log.error("[{0}] error on loading '{1}' : {2}".format(module_name, abs_path, repr(error)))
- return None
-
- self.log.debug("[{0}] '{1}' is loaded".format(module_name, abs_path))
- return config
-
- @staticmethod
- def apply_defaults(jobs, defaults):
- if defaults is None:
- return
- for k, v in defaults.items():
- for job in jobs:
- job.setdefault(k, v)
-
- def set_min_update_every(self, jobs, min_update_every):
- if min_update_every is None:
- return
- for job in jobs:
- if 'update_every' in job and job['update_every'] < self.min_update_every:
- job['update_every'] = self.min_update_every
-
- def build(self, module_name):
- config = self.load_module_config(module_name)
- if config is None:
- return None
-
- configs = config.create_jobs()
- self.log.info("[{0}] built {1} job(s) configs".format(module_name, len(configs)))
-
- self.apply_defaults(configs, self.module_defaults)
- self.apply_defaults(configs, self.job_defaults)
- self.set_min_update_every(configs, self.min_update_every)
-
- return configs
-
-
-JOB_STATUS_ACTIVE = 'active'
-JOB_STATUS_RECOVERING = 'recovering'
-JOB_STATUS_DROPPED = 'dropped'
-JOB_STATUS_INIT = 'initial'
-
-
-class Job(threading.Thread):
- inf = -1
-
- def __init__(self, service, module_name, config):
- threading.Thread.__init__(self)
- self.daemon = True
- self.service = service
- self.module_name = module_name
- self.config = config
- self.real_name = config['job_name']
- self.actual_name = config['override_name'] or self.real_name
- self.autodetection_retry = config['autodetection_retry']
- self.checks = self.inf
- self.job = None
- self.status = JOB_STATUS_INIT
-
- def is_inited(self):
- return self.job is not None
-
- def init(self):
- self.job = self.service(configuration=copy.deepcopy(self.config))
-
- def check(self):
- ok = self.job.check()
- self.checks -= self.checks != self.inf and not ok
- return ok
-
- def create(self):
- self.job.create()
-
- def need_to_recheck(self):
- return self.autodetection_retry != 0 and self.checks != 0
-
- def run(self):
- self.job.run()
-
-
-class ModuleSrc:
- def __init__(self, name):
- self.name = name
- self.src = None
-
- def load(self):
- self.src = load_module(self.name)
-
- def get(self, key):
- return getattr(self.src, key, None)
-
- def service(self):
- return self.get('Service')
-
- def defaults(self):
- keys = (
- 'update_every',
- 'priority',
- 'autodetection_retry',
- 'chart_cleanup',
- 'penalty',
- )
- return dict((k, self.get(k)) for k in keys if self.get(k) is not None)
-
- def is_disabled_by_default(self):
- return bool(self.get('disabled_by_default'))
-
-
-class JobsStatuses:
- def __init__(self):
- self.items = OrderedDict()
-
- def dump(self):
- return json.dumps(self.items, indent=2)
-
- def get(self, module_name, job_name):
- if module_name not in self.items:
- return None
- return self.items[module_name].get(job_name)
-
- def has(self, module_name, job_name):
- return self.get(module_name, job_name) is not None
-
- def from_file(self, path):
- with open(path) as f:
- data = json.load(f)
- return self.from_json(data)
-
- @staticmethod
- def from_json(items):
- if not isinstance(items, dict):
- raise Exception('items obj has wrong type : {0}'.format(type(items)))
- if not items:
- return JobsStatuses()
-
- v = OrderedDict()
- for mod_name in sorted(items):
- if not items[mod_name]:
- continue
- v[mod_name] = OrderedDict()
- for job_name in sorted(items[mod_name]):
- v[mod_name][job_name] = items[mod_name][job_name]
-
- rv = JobsStatuses()
- rv.items = v
- return rv
-
- @staticmethod
- def from_jobs(jobs):
- v = OrderedDict()
- for job in jobs:
- status = job.status
- if status not in (JOB_STATUS_ACTIVE, JOB_STATUS_RECOVERING):
- continue
- if job.module_name not in v:
- v[job.module_name] = OrderedDict()
- v[job.module_name][job.real_name] = status
-
- rv = JobsStatuses()
- rv.items = v
- return rv
-
-
-class StdoutSaver:
- @staticmethod
- def save(dump):
- print(dump)
-
-
-class CachedFileSaver:
- def __init__(self, path):
- self.last_save_success = False
- self.last_saved_dump = str()
- self.path = path
-
- def save(self, dump):
- if self.last_save_success and self.last_saved_dump == dump:
- return
- try:
- with open(self.path, 'w') as out:
- out.write(dump)
- except Exception:
- self.last_save_success = False
- raise
- self.last_saved_dump = dump
- self.last_save_success = True
-
-
-class PluginConfig(dict):
- def __init__(self, *args):
- dict.__init__(self, *args)
-
- def is_module_explicitly_enabled(self, module_name):
- return self._is_module_enabled(module_name, True)
-
- def is_module_enabled(self, module_name):
- return self._is_module_enabled(module_name, False)
-
- def _is_module_enabled(self, module_name, explicit):
- if module_name in self:
- return self[module_name]
- if explicit:
- return False
- return self['default_run']
-
-
-class Plugin:
- config_name = 'python.d.conf'
- jobs_status_dump_name = 'pythond-jobs-statuses.json'
-
- def __init__(self, modules_to_run, min_update_every):
- self.modules_to_run = modules_to_run
- self.min_update_every = min_update_every
- self.config = PluginConfig(PLUGIN_BASE_CONF)
- self.log = PythonDLogger()
- self.started_jobs = collections.defaultdict(dict)
- self.jobs = list()
- self.saver = None
- self.runs = 0
-
- def load_config(self):
- paths = [
- DIRS.plugin_user_config,
- DIRS.plugin_stock_config,
- ]
- self.log.debug("looking for '{0}' in {1}".format(self.config_name, paths))
- abs_path = multi_path_find(self.config_name, *paths)
- if not abs_path:
- self.log.warning("'{0}' was not found, using defaults".format(self.config_name))
- return True
-
- self.log.debug("loading '{0}'".format(abs_path))
- try:
- config = load_config(abs_path)
- except Exception as error:
- self.log.error("error on loading '{0}' : {1}".format(abs_path, repr(error)))
- return False
-
- self.log.debug("'{0}' is loaded".format(abs_path))
- self.config.update(config)
- return True
-
- def load_job_statuses(self):
- self.log.debug("looking for '{0}' in {1}".format(self.jobs_status_dump_name, DIRS.var_lib))
- abs_path = multi_path_find(self.jobs_status_dump_name, DIRS.var_lib)
- if not abs_path:
- self.log.warning("'{0}' was not found".format(self.jobs_status_dump_name))
- return
-
- self.log.debug("loading '{0}'".format(abs_path))
- try:
- statuses = JobsStatuses().from_file(abs_path)
- except Exception as error:
- self.log.warning("error on loading '{0}' : {1}".format(abs_path, repr(error)))
- return None
- self.log.debug("'{0}' is loaded".format(abs_path))
- return statuses
-
- def create_jobs(self, job_statuses=None):
- paths = [
- DIRS.modules_user_config,
- DIRS.modules_stock_config,
- ]
-
- builder = JobsConfigsBuilder(paths)
- builder.job_defaults = JOB_BASE_CONF
- builder.min_update_every = self.min_update_every
-
- jobs = list()
- for mod_name in self.modules_to_run:
- if not self.config.is_module_enabled(mod_name):
- self.log.info("[{0}] is disabled in the configuration file, skipping it".format(mod_name))
- continue
-
- src = ModuleSrc(mod_name)
- try:
- src.load()
- except Exception as error:
- self.log.warning("[{0}] error on loading source : {1}, skipping it".format(mod_name, repr(error)))
- continue
-
- if not (src.service() and callable(src.service())):
- self.log.warning("[{0}] has no callable Service object, skipping it".format(mod_name))
- continue
-
- if src.is_disabled_by_default() and not self.config.is_module_explicitly_enabled(mod_name):
- self.log.info("[{0}] is disabled by default, skipping it".format(mod_name))
- continue
-
- builder.module_defaults = src.defaults()
- configs = builder.build(mod_name)
- if not configs:
- self.log.info("[{0}] has no job configs, skipping it".format(mod_name))
- continue
-
- for config in configs:
- config['job_name'] = re.sub(r'\s+', '_', config['job_name'])
- config['override_name'] = re.sub(r'\s+', '_', config.pop('name'))
-
- job = Job(src.service(), mod_name, config)
-
- was_previously_active = job_statuses and job_statuses.has(job.module_name, job.real_name)
- if was_previously_active and job.autodetection_retry == 0:
- self.log.debug('{0}[{1}] was previously active, applying recovering settings'.format(
- job.module_name, job.real_name))
- job.checks = 11
- job.autodetection_retry = 30
-
- jobs.append(job)
-
- return jobs
-
- def setup(self):
- if not self.load_config():
- return False
-
- if not self.config['enabled']:
- self.log.info('disabled in the configuration file')
- return False
-
- statuses = self.load_job_statuses()
-
- self.jobs = self.create_jobs(statuses)
- if not self.jobs:
- self.log.info('no jobs to run')
- return False
-
- if not IS_ATTY:
- abs_path = os.path.join(DIRS.var_lib, self.jobs_status_dump_name)
- self.saver = CachedFileSaver(abs_path)
- return True
-
- def start_jobs(self, *jobs):
- for job in jobs:
- if job.status not in (JOB_STATUS_INIT, JOB_STATUS_RECOVERING):
- continue
-
- if job.actual_name in self.started_jobs[job.module_name]:
- self.log.info('{0}[{1}] : already served by another job, skipping it'.format(
- job.module_name, job.real_name))
- job.status = JOB_STATUS_DROPPED
- continue
-
- if not job.is_inited():
- try:
- job.init()
- except Exception as error:
- self.log.warning("{0}[{1}] : unhandled exception on init : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
- job.status = JOB_STATUS_DROPPED
- continue
-
- try:
- ok = job.check()
- except Exception as error:
- self.log.warning("{0}[{1}] : unhandled exception on check : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
- job.status = JOB_STATUS_DROPPED
- continue
- if not ok:
- self.log.info('{0}[{1}] : check failed'.format(job.module_name, job.real_name))
- job.status = JOB_STATUS_RECOVERING if job.need_to_recheck() else JOB_STATUS_DROPPED
- continue
- self.log.info('{0}[{1}] : check success'.format(job.module_name, job.real_name))
-
- try:
- job.create()
- except Exception as error:
- self.log.warning("{0}[{1}] : unhandled exception on create : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
- job.status = JOB_STATUS_DROPPED
- continue
-
- self.started_jobs[job.module_name] = job.actual_name
- job.status = JOB_STATUS_ACTIVE
- job.start()
-
- @staticmethod
- def keep_alive():
- if not IS_ATTY:
- safe_print('\n')
-
- def garbage_collection(self):
- if self.config['gc_run'] and self.runs % self.config['gc_interval'] == 0:
- v = gc.collect()
- self.log.debug('GC collection run result: {0}'.format(v))
-
- def restart_recovering_jobs(self):
- for job in self.jobs:
- if job.status != JOB_STATUS_RECOVERING:
- continue
- if self.runs % job.autodetection_retry != 0:
- continue
- self.start_jobs(job)
-
- def cleanup_jobs(self):
- self.jobs = [j for j in self.jobs if j.status != JOB_STATUS_DROPPED]
-
- def have_alive_jobs(self):
- return next(
- (True for job in self.jobs if job.status in (JOB_STATUS_RECOVERING, JOB_STATUS_ACTIVE)),
- False,
- )
-
- def save_job_statuses(self):
- if self.saver is None:
- return
- if self.runs % 10 != 0:
- return
- dump = JobsStatuses().from_jobs(self.jobs).dump()
- try:
- self.saver.save(dump)
- except Exception as error:
- self.log.error("error on saving jobs statuses dump : {0}".format(repr(error)))
-
- def serve_once(self):
- if not self.have_alive_jobs():
- self.log.info('no jobs to serve')
- return False
-
- time.sleep(1)
- self.runs += 1
-
- self.keep_alive()
- self.garbage_collection()
- self.cleanup_jobs()
- self.restart_recovering_jobs()
- self.save_job_statuses()
- return True
-
- def serve(self):
- while self.serve_once():
- pass
-
- def run(self):
- self.start_jobs(*self.jobs)
- self.serve()
-
-
-def parse_command_line():
- opts = sys.argv[:][1:]
-
- debug = False
- trace = False
- update_every = 1
- modules_to_run = list()
-
- def find_first_positive_int(values):
- return next((v for v in values if v.isdigit() and int(v) >= 1), None)
-
- u = find_first_positive_int(opts)
- if u is not None:
- update_every = int(u)
- opts.remove(u)
- if 'debug' in opts:
- debug = True
- opts.remove('debug')
- if 'trace' in opts:
- trace = True
- opts.remove('trace')
- if opts:
- modules_to_run = list(opts)
-
- cmd = collections.namedtuple(
- 'CMD',
- [
- 'update_every',
- 'debug',
- 'trace',
- 'modules_to_run',
- ])
- return cmd(
- update_every,
- debug,
- trace,
- modules_to_run
- )
-
-
-def guess_module(modules, *names):
- def guess(n):
- found = None
- for i, _ in enumerate(n):
- cur = [x for x in modules if x.startswith(name[:i + 1])]
- if not cur:
- return found
- found = cur
- return found
-
- guessed = list()
- for name in names:
- name = name.lower()
- m = guess(name)
- if m:
- guessed.extend(m)
- return sorted(set(guessed))
-
-
-def disable():
- if not IS_ATTY:
- safe_print('DISABLE')
- exit(0)
-
-
-def main():
- cmd = parse_command_line()
- log = PythonDLogger()
-
- if cmd.debug:
- log.logger.severity = 'DEBUG'
- if cmd.trace:
- log.log_traceback = True
-
- log.info('using python v{0}'.format(PY_VERSION[0]))
-
- unknown = set(cmd.modules_to_run) - set(AVAILABLE_MODULES)
- if unknown:
- log.error('unknown modules : {0}'.format(sorted(list(unknown))))
- guessed = guess_module(AVAILABLE_MODULES, *cmd.modules_to_run)
- if guessed:
- log.info('probably you meant : \n{0}'.format(pprint.pformat(guessed, width=1)))
- return
-
- p = Plugin(
- cmd.modules_to_run or AVAILABLE_MODULES,
- cmd.update_every,
- )
-
- try:
- if not p.setup():
- return
- p.run()
- except KeyboardInterrupt:
- pass
- log.info('exiting from main...')
-
-
-if __name__ == "__main__":
- main()
- disable()
diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in
index 44b6671cb..9d575d86f 100644
--- a/collectors/python.d.plugin/python.d.plugin.in
+++ b/collectors/python.d.plugin/python.d.plugin.in
@@ -42,18 +42,17 @@ except ImportError:
PY_VERSION = sys.version_info[:2] # (major=3, minor=7, micro=3, releaselevel='final', serial=0)
-
if PY_VERSION > (3, 1):
from importlib.machinery import SourceFileLoader
else:
from imp import load_source as SourceFileLoader
-
ENV_NETDATA_USER_CONFIG_DIR = 'NETDATA_USER_CONFIG_DIR'
ENV_NETDATA_STOCK_CONFIG_DIR = 'NETDATA_STOCK_CONFIG_DIR'
ENV_NETDATA_PLUGINS_DIR = 'NETDATA_PLUGINS_DIR'
ENV_NETDATA_LIB_DIR = 'NETDATA_LIB_DIR'
ENV_NETDATA_UPDATE_EVERY = 'NETDATA_UPDATE_EVERY'
+ENV_NETDATA_LOCK_DIR = 'NETDATA_LOCK_DIR'
def add_pythond_packages():
@@ -65,10 +64,10 @@ def add_pythond_packages():
add_pythond_packages()
-
from bases.collection import safe_print
from bases.loggers import PythonDLogger
from bases.loaders import load_config
+from third_party import filelock
try:
from collections import OrderedDict
@@ -93,6 +92,10 @@ def dirs():
ENV_NETDATA_PLUGINS_DIR,
os.path.dirname(__file__),
)
+ locks = os.getenv(
+ ENV_NETDATA_LOCK_DIR,
+ os.path.join('@varlibdir_POST@', 'lock')
+ )
modules_user_config = os.path.join(plugin_user_config, 'python.d')
modules_stock_config = os.path.join(plugin_stock_config, 'python.d')
modules = os.path.abspath(pluginsd + '/../python.d')
@@ -106,6 +109,7 @@ def dirs():
'modules_stock_config',
'modules',
'var_lib',
+ 'locks',
]
)
return Dirs(
@@ -115,6 +119,7 @@ def dirs():
modules_stock_config,
modules,
var_lib,
+ locks,
)
@@ -173,7 +178,7 @@ def multi_path_find(name, *paths):
def load_module(name):
abs_path = os.path.join(DIRS.modules, '{0}{1}'.format(name, MODULE_SUFFIX))
- module = SourceFileLoader(name, abs_path)
+ module = SourceFileLoader('pythond_' + name, abs_path)
if isinstance(module, types.ModuleType):
return module
return module.load_module()
@@ -307,6 +312,9 @@ class Job(threading.Thread):
def init(self):
self.job = self.service(configuration=copy.deepcopy(self.config))
+ def full_name(self):
+ return self.job.name
+
def check(self):
ok = self.job.check()
self.checks -= self.checks != self.inf and not ok
@@ -448,15 +456,45 @@ class PluginConfig(dict):
return self['default_run']
+class FileLockRegistry:
+ def __init__(self, path):
+ self.path = path
+ self.locks = dict()
+
+ def register(self, name):
+ if name in self.locks:
+ return
+ file = os.path.join(self.path, '{0}.collector.lock'.format(name))
+ lock = filelock.FileLock(file)
+ lock.acquire(timeout=0)
+ self.locks[name] = lock
+
+ def unregister(self, name):
+ if name not in self.locks:
+ return
+ lock = self.locks[name]
+ lock.release()
+ del self.locks[name]
+
+
+class DummyRegistry:
+ def register(self, name):
+ pass
+
+ def unregister(self, name):
+ pass
+
+
class Plugin:
config_name = 'python.d.conf'
jobs_status_dump_name = 'pythond-jobs-statuses.json'
- def __init__(self, modules_to_run, min_update_every):
+ def __init__(self, modules_to_run, min_update_every, registry):
self.modules_to_run = modules_to_run
self.min_update_every = min_update_every
self.config = PluginConfig(PLUGIN_BASE_CONF)
self.log = PythonDLogger()
+ self.registry = registry
self.started_jobs = collections.defaultdict(dict)
self.jobs = list()
self.saver = None
@@ -590,7 +628,7 @@ class Plugin:
job.init()
except Exception as error:
self.log.warning("{0}[{1}] : unhandled exception on init : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
+ job.module_name, job.real_name, repr(error)))
job.status = JOB_STATUS_DROPPED
continue
@@ -598,7 +636,7 @@ class Plugin:
ok = job.check()
except Exception as error:
self.log.warning("{0}[{1}] : unhandled exception on check : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
+ job.module_name, job.real_name, repr(error)))
job.status = JOB_STATUS_DROPPED
continue
if not ok:
@@ -608,11 +646,29 @@ class Plugin:
self.log.info('{0}[{1}] : check success'.format(job.module_name, job.real_name))
try:
+ self.registry.register(job.full_name())
+ except filelock.Timeout as error:
+ self.log.info('{0}[{1}] : already registered by another process, skipping the job ({2})'.format(
+ job.module_name, job.real_name, error))
+ job.status = JOB_STATUS_DROPPED
+ continue
+ except Exception as error:
+ self.log.warning('{0}[{1}] : registration failed: {2}, skipping the job'.format(
+ job.module_name, job.real_name, error))
+ job.status = JOB_STATUS_DROPPED
+ continue
+
+ try:
job.create()
except Exception as error:
self.log.warning("{0}[{1}] : unhandled exception on create : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
+ job.module_name, job.real_name, repr(error)))
job.status = JOB_STATUS_DROPPED
+ try:
+ self.registry.unregister(job.full_name())
+ except Exception as error:
+ self.log.warning('{0}[{1}] : deregistration failed: {2}'.format(
+ job.module_name, job.real_name, error))
continue
self.started_jobs[job.module_name] = job.actual_name
@@ -686,6 +742,7 @@ def parse_command_line():
debug = False
trace = False
+ nolock = False
update_every = 1
modules_to_run = list()
@@ -702,6 +759,9 @@ def parse_command_line():
if 'trace' in opts:
trace = True
opts.remove('trace')
+ if 'nolock' in opts:
+ nolock = True
+ opts.remove('nolock')
if opts:
modules_to_run = list(opts)
@@ -711,13 +771,15 @@ def parse_command_line():
'update_every',
'debug',
'trace',
+ 'nolock',
'modules_to_run',
])
return cmd(
update_every,
debug,
trace,
- modules_to_run
+ nolock,
+ modules_to_run,
)
@@ -765,9 +827,15 @@ def main():
log.info('probably you meant : \n{0}'.format(pprint.pformat(guessed, width=1)))
return
+ if DIRS.locks and not cmd.nolock:
+ registry = FileLockRegistry(DIRS.locks)
+ else:
+ registry = DummyRegistry()
+
p = Plugin(
cmd.modules_to_run or AVAILABLE_MODULES,
cmd.update_every,
+ registry,
)
try:
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
index f63cb7c2f..dea50eea0 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
@@ -22,12 +22,14 @@ class ExecutableService(SimpleService):
Get raw data from executed command
:return: <list>
"""
+ command = command or self.command
+ self.debug("Executing command '{0}'".format(' '.join(command)))
try:
- p = Popen(command if command else self.command, stdout=PIPE, stderr=PIPE)
+ p = Popen(command, stdout=PIPE, stderr=PIPE)
except Exception as error:
- self.error('Executing command {command} resulted in error: {error}'.format(command=command or self.command,
- error=error))
+ self.error('Executing command {0} resulted in error: {1}'.format(command, error))
return None
+
data = list()
std = p.stderr if stderr else p.stdout
for line in std:
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
index 354d09ad8..7f5c7d221 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
@@ -51,11 +51,7 @@ class MySQLService(SimpleService):
properties['host'] = conf['host']
properties['port'] = int(conf.get('port', 3306))
elif conf.get('my.cnf'):
- if MySQLdb.__name__ == 'pymysql':
- # TODO: this is probablt wrong, it depends on version
- self.error('"my.cnf" parsing is not working for pymysql')
- else:
- properties['read_default_file'] = conf['my.cnf']
+ properties['read_default_file'] = conf['my.cnf']
if conf.get('ssl'):
properties['ssl'] = conf['ssl']
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
index 4dfd226b0..c304ccec2 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
@@ -55,11 +55,18 @@ class RuntimeCounters:
self.penalty = round(min(self.retries * self.update_every / 2, MAX_PENALTY))
+def clean_module_name(name):
+ if name.startswith('pythond_'):
+ return name[8:]
+ return name
+
+
class SimpleService(PythonDLimitedLogger, object):
"""
Prototype of Service class.
Implemented basic functionality to run jobs by `python.d.plugin`
"""
+
def __init__(self, configuration, name=''):
"""
:param configuration: <dict>
@@ -70,7 +77,7 @@ class SimpleService(PythonDLimitedLogger, object):
self.order = list()
self.definitions = dict()
- self.module_name = self.__module__
+ self.module_name = clean_module_name(self.__module__)
self.job_name = configuration.pop('job_name')
self.override_name = configuration.pop('override_name')
self.fake_name = None
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
index 337bf57d8..bef3792da 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
@@ -247,7 +247,7 @@ class SocketService(SimpleService):
if self._check_raw_data(data):
break
- self.debug('final response: {0}'.format(data))
+ self.debug(u'final response: {0}'.format(data))
return data
def _get_raw_data(self, raw=False, request=None):
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
index cfc7899e5..1faf036a4 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
@@ -47,6 +47,7 @@ class UrlService(SimpleService):
self.proxy_url = self.configuration.get('proxy_url')
self.method = self.configuration.get('method', 'GET')
self.header = self.configuration.get('header')
+ self.body = self.configuration.get('body')
self.request_timeout = self.configuration.get('timeout', 1)
self.respect_retry_after_header = self.configuration.get('respect_retry_after_header')
self.tls_verify = self.configuration.get('tls_verify')
@@ -119,15 +120,17 @@ class UrlService(SimpleService):
:return: str
"""
try:
- status, data = self._get_raw_data_with_status(url, manager, **kwargs)
+ response = self._do_request(url, manager, **kwargs)
except Exception as error:
self.error('Url: {url}. Error: {error}'.format(url=url or self.url, error=error))
return None
- if status == 200:
- return data
+ if response.status == 200:
+ if isinstance(response.data, str):
+ return response.data
+ return response.data.decode(errors='ignore')
else:
- self.debug('Url: {url}. Http response status code: {code}'.format(url=url or self.url, code=status))
+ self.debug('Url: {url}. Http response status code: {code}'.format(url=url or self.url, code=response.status))
return None
def _get_raw_data_with_status(self, url=None, manager=None, retries=1, redirect=True, **kwargs):
@@ -135,12 +138,26 @@ class UrlService(SimpleService):
Get status and response body content from http request. Does not catch exceptions
:return: int, str
"""
+ response = self._do_request(url, manager, retries, redirect, **kwargs)
+
+ if isinstance(response.data, str):
+ return response.status, response.data
+ return response.status, response.data.decode(errors='ignore')
+
+ def _do_request(self, url=None, manager=None, retries=1, redirect=True, **kwargs):
+ """
+ Get response from http request. Does not catch exceptions
+ :return: HTTPResponse
+ """
url = url or self.url
manager = manager or self._manager
retry = urllib3.Retry(retries)
if hasattr(retry, 'respect_retry_after_header'):
retry.respect_retry_after_header = bool(self.respect_retry_after_header)
+ if self.body:
+ kwargs['body'] = self.body
+
response = manager.request(
method=self.method,
url=url,
@@ -150,9 +167,7 @@ class UrlService(SimpleService):
redirect=redirect,
**kwargs
)
- if isinstance(response.data, str):
- return response.status, response.data
- return response.status, response.data.decode()
+ return response
def check(self):
"""
diff --git a/collectors/python.d.plugin/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py
index 6e78ed6e7..93be43d14 100644
--- a/collectors/python.d.plugin/python_modules/bases/charts.py
+++ b/collectors/python.d.plugin/python_modules/bases/charts.py
@@ -16,8 +16,7 @@ CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n'
CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
"{chart_type} {priority} {update_every} '{hidden}' 'python.d.plugin' '{module_name}'\n"
CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
- "{chart_type} {priority} {update_every} '{hidden} obsolete'\n"
-
+ "{chart_type} {priority} {update_every} '{hidden} obsolete'\n"
DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden} {obsolete}'\n"
DIMENSION_SET = "SET '{id}' = {value}\n"
@@ -40,13 +39,17 @@ def create_runtime_chart(func):
:param func: class method
:return:
"""
+
def wrapper(*args, **kwargs):
self = args[0]
+ chart = RUNTIME_CHART_CREATE.format(
+ job_name=self.name,
+ update_every=self._runtime_counters.update_every,
+ )
+ safe_print(chart)
ok = func(*args, **kwargs)
- if ok:
- safe_print(RUNTIME_CHART_CREATE.format(job_name=self.name,
- update_every=self._runtime_counters.update_every))
return ok
+
return wrapper
@@ -72,6 +75,7 @@ class Charts:
All charts stored in a dict.
Chart is a instance of Chart class.
Charts adding must be done using Charts.add_chart() method only"""
+
def __init__(self, job_name, priority, cleanup, get_update_every, module_name):
"""
:param job_name: <bound method>
@@ -138,6 +142,7 @@ class Charts:
class Chart:
"""Represent a chart"""
+
def __init__(self, params):
"""
:param params: <list>
@@ -281,6 +286,7 @@ class Chart:
class Dimension:
"""Represent a dimension"""
+
def __init__(self, params):
"""
:param params: <list>
@@ -346,6 +352,7 @@ class Dimension:
class ChartVariable:
"""Represent a chart variable"""
+
def __init__(self, params):
"""
:param params: <list>
diff --git a/collectors/python.d.plugin/python_modules/bases/collection.py b/collectors/python.d.plugin/python_modules/bases/collection.py
index 4c25aafd5..93bf8cf05 100644
--- a/collectors/python.d.plugin/python_modules/bases/collection.py
+++ b/collectors/python.d.plugin/python_modules/bases/collection.py
@@ -5,6 +5,8 @@
import os
+from threading import Lock
+
PATH = os.getenv('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin').split(':')
CHART_BEGIN = 'BEGIN {0} {1}\n'
@@ -12,6 +14,8 @@ CHART_CREATE = "CHART {0} '{1}' '{2}' '{3}' '{4}' '{5}' {6} {7} {8}\n"
DIMENSION_CREATE = "DIMENSION '{0}' '{1}' {2} {3} {4} '{5}'\n"
DIMENSION_SET = "SET '{0}' = {1}\n"
+print_lock = Lock()
+
def setdefault_values(config, base_dict):
for key, value in base_dict.items():
@@ -23,10 +27,11 @@ def run_and_exit(func):
def wrapper(*args, **kwargs):
func(*args, **kwargs)
exit(1)
+
return wrapper
-def on_try_except_finally(on_except=(None, ), on_finally=(None, )):
+def on_try_except_finally(on_except=(None,), on_finally=(None,)):
except_func = on_except[0]
finally_func = on_finally[0]
@@ -40,7 +45,9 @@ def on_try_except_finally(on_except=(None, ), on_finally=(None, )):
finally:
if finally_func:
finally_func(*on_finally[1:])
+
return wrapper
+
return decorator
@@ -49,6 +56,7 @@ def static_vars(**kwargs):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
+
return decorate
@@ -58,7 +66,9 @@ def safe_print(*msg):
:param msg:
:return:
"""
+ print_lock.acquire()
print(''.join(msg))
+ print_lock.release()
def find_binary(binary):
@@ -67,7 +77,7 @@ def find_binary(binary):
:return:
"""
for directory in PATH:
- binary_name = '/'.join([directory, binary])
+ binary_name = os.path.join(directory, binary)
if os.path.isfile(binary_name) and os.access(binary_name, os.X_OK):
return binary_name
return None
@@ -82,3 +92,26 @@ def read_last_line(f):
break
result = opened.readline()
return result.decode()
+
+
+def unicode_str(arg):
+ """Return the argument as a unicode string.
+
+ The `unicode` function has been removed from Python3 and `str` takes its
+ place. This function is a helper which will try using Python 2's `unicode`
+ and if it doesn't exist, assume we're using Python 3 and use `str`.
+
+ :param arg:
+ :return: <str>
+ """
+ # TODO: fix
+ try:
+ # https://github.com/netdata/netdata/issues/7613
+ if isinstance(arg, unicode):
+ return arg
+ return unicode(arg, errors='ignore')
+ # https://github.com/netdata/netdata/issues/7642
+ except TypeError:
+ return unicode(arg)
+ except NameError:
+ return str(arg)
diff --git a/collectors/python.d.plugin/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py
index 9bf2e086b..47f196a6d 100644
--- a/collectors/python.d.plugin/python_modules/bases/loggers.py
+++ b/collectors/python.d.plugin/python_modules/bases/loggers.py
@@ -13,7 +13,7 @@ try:
except ImportError:
from time import time
-from bases.collection import on_try_except_finally
+from bases.collection import on_try_except_finally, unicode_str
LOGGING_LEVELS = {'CRITICAL': 50,
@@ -121,23 +121,23 @@ class BaseLogger(object):
self.logger.setLevel(LOGGING_LEVELS[level])
def debug(self, *msg, **kwargs):
- self.logger.debug(' '.join(map(str, msg)), **kwargs)
+ self.logger.debug(' '.join(map(unicode_str, msg)), **kwargs)
def info(self, *msg, **kwargs):
- self.logger.info(' '.join(map(str, msg)), **kwargs)
+ self.logger.info(' '.join(map(unicode_str, msg)), **kwargs)
def warning(self, *msg, **kwargs):
- self.logger.warning(' '.join(map(str, msg)), **kwargs)
+ self.logger.warning(' '.join(map(unicode_str, msg)), **kwargs)
def error(self, *msg, **kwargs):
- self.logger.error(' '.join(map(str, msg)), **kwargs)
+ self.logger.error(' '.join(map(unicode_str, msg)), **kwargs)
def alert(self, *msg, **kwargs):
- self.logger.critical(' '.join(map(str, msg)), **kwargs)
+ self.logger.critical(' '.join(map(unicode_str, msg)), **kwargs)
@on_try_except_finally(on_finally=(exit, 1))
def fatal(self, *msg, **kwargs):
- self.logger.critical(' '.join(map(str, msg)), **kwargs)
+ self.logger.critical(' '.join(map(unicode_str, msg)), **kwargs)
class PythonDLogger(object):
diff --git a/collectors/python.d.plugin/python_modules/third_party/filelock.py b/collectors/python.d.plugin/python_modules/third_party/filelock.py
new file mode 100644
index 000000000..4c981672b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/filelock.py
@@ -0,0 +1,451 @@
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# For more information, please refer to <http://unlicense.org>
+
+"""
+A platform independent file lock that supports the with-statement.
+"""
+
+
+# Modules
+# ------------------------------------------------
+import logging
+import os
+import threading
+import time
+try:
+ import warnings
+except ImportError:
+ warnings = None
+
+try:
+ import msvcrt
+except ImportError:
+ msvcrt = None
+
+try:
+ import fcntl
+except ImportError:
+ fcntl = None
+
+
+# Backward compatibility
+# ------------------------------------------------
+try:
+ TimeoutError
+except NameError:
+ TimeoutError = OSError
+
+
+# Data
+# ------------------------------------------------
+__all__ = [
+ "Timeout",
+ "BaseFileLock",
+ "WindowsFileLock",
+ "UnixFileLock",
+ "SoftFileLock",
+ "FileLock"
+]
+
+__version__ = "3.0.12"
+
+
+_logger = None
+def logger():
+ """Returns the logger instance used in this module."""
+ global _logger
+ _logger = _logger or logging.getLogger(__name__)
+ return _logger
+
+
+# Exceptions
+# ------------------------------------------------
+class Timeout(TimeoutError):
+ """
+ Raised when the lock could not be acquired in *timeout*
+ seconds.
+ """
+
+ def __init__(self, lock_file):
+ """
+ """
+ #: The path of the file lock.
+ self.lock_file = lock_file
+ return None
+
+ def __str__(self):
+ temp = "The file lock '{}' could not be acquired."\
+ .format(self.lock_file)
+ return temp
+
+
+# Classes
+# ------------------------------------------------
+
+# This is a helper class which is returned by :meth:`BaseFileLock.acquire`
+# and wraps the lock to make sure __enter__ is not called twice when entering
+# the with statement.
+# If we would simply return *self*, the lock would be acquired again
+# in the *__enter__* method of the BaseFileLock, but not released again
+# automatically.
+#
+# :seealso: issue #37 (memory leak)
+class _Acquire_ReturnProxy(object):
+
+ def __init__(self, lock):
+ self.lock = lock
+ return None
+
+ def __enter__(self):
+ return self.lock
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.lock.release()
+ return None
+
+
+class BaseFileLock(object):
+ """
+ Implements the base class of a file lock.
+ """
+
+ def __init__(self, lock_file, timeout = -1):
+ """
+ """
+ # The path to the lock file.
+ self._lock_file = lock_file
+
+ # The file descriptor for the *_lock_file* as it is returned by the
+ # os.open() function.
+ # This file lock is only NOT None, if the object currently holds the
+ # lock.
+ self._lock_file_fd = None
+
+ # The default timeout value.
+ self.timeout = timeout
+
+ # We use this lock primarily for the lock counter.
+ self._thread_lock = threading.Lock()
+
+ # The lock counter is used for implementing the nested locking
+ # mechanism. Whenever the lock is acquired, the counter is increased and
+ # the lock is only released, when this value is 0 again.
+ self._lock_counter = 0
+ return None
+
+ @property
+ def lock_file(self):
+ """
+ The path to the lock file.
+ """
+ return self._lock_file
+
+ @property
+ def timeout(self):
+ """
+ You can set a default timeout for the filelock. It will be used as
+ fallback value in the acquire method, if no timeout value (*None*) is
+ given.
+
+ If you want to disable the timeout, set it to a negative value.
+
+ A timeout of 0 means, that there is exactly one attempt to acquire the
+ file lock.
+
+ .. versionadded:: 2.0.0
+ """
+ return self._timeout
+
+ @timeout.setter
+ def timeout(self, value):
+ """
+ """
+ self._timeout = float(value)
+ return None
+
+ # Platform dependent locking
+ # --------------------------------------------
+
+ def _acquire(self):
+ """
+ Platform dependent. If the file lock could be
+ acquired, self._lock_file_fd holds the file descriptor
+ of the lock file.
+ """
+ raise NotImplementedError()
+
+ def _release(self):
+ """
+ Releases the lock and sets self._lock_file_fd to None.
+ """
+ raise NotImplementedError()
+
+ # Platform independent methods
+ # --------------------------------------------
+
+ @property
+ def is_locked(self):
+ """
+ True, if the object holds the file lock.
+
+ .. versionchanged:: 2.0.0
+
+ This was previously a method and is now a property.
+ """
+ return self._lock_file_fd is not None
+
+ def acquire(self, timeout=None, poll_intervall=0.05):
+ """
+ Acquires the file lock or fails with a :exc:`Timeout` error.
+
+ .. code-block:: python
+
+ # You can use this method in the context manager (recommended)
+ with lock.acquire():
+ pass
+
+ # Or use an equivalent try-finally construct:
+ lock.acquire()
+ try:
+ pass
+ finally:
+ lock.release()
+
+ :arg float timeout:
+ The maximum time waited for the file lock.
+ If ``timeout < 0``, there is no timeout and this method will
+ block until the lock could be acquired.
+ If ``timeout`` is None, the default :attr:`~timeout` is used.
+
+ :arg float poll_intervall:
+ We check once in *poll_intervall* seconds if we can acquire the
+ file lock.
+
+ :raises Timeout:
+ if the lock could not be acquired in *timeout* seconds.
+
+ .. versionchanged:: 2.0.0
+
+ This method returns now a *proxy* object instead of *self*,
+ so that it can be used in a with statement without side effects.
+ """
+ # Use the default timeout, if no timeout is provided.
+ if timeout is None:
+ timeout = self.timeout
+
+ # Increment the number right at the beginning.
+ # We can still undo it, if something fails.
+ with self._thread_lock:
+ self._lock_counter += 1
+
+ lock_id = id(self)
+ lock_filename = self._lock_file
+ start_time = time.time()
+ try:
+ while True:
+ with self._thread_lock:
+ if not self.is_locked:
+ logger().debug('Attempting to acquire lock %s on %s', lock_id, lock_filename)
+ self._acquire()
+
+ if self.is_locked:
+ logger().info('Lock %s acquired on %s', lock_id, lock_filename)
+ break
+ elif timeout >= 0 and time.time() - start_time > timeout:
+ logger().debug('Timeout on acquiring lock %s on %s', lock_id, lock_filename)
+ raise Timeout(self._lock_file)
+ else:
+ logger().debug(
+ 'Lock %s not acquired on %s, waiting %s seconds ...',
+ lock_id, lock_filename, poll_intervall
+ )
+ time.sleep(poll_intervall)
+ except:
+ # Something did go wrong, so decrement the counter.
+ with self._thread_lock:
+ self._lock_counter = max(0, self._lock_counter - 1)
+
+ raise
+ return _Acquire_ReturnProxy(lock = self)
+
+ def release(self, force = False):
+ """
+ Releases the file lock.
+
+ Please note, that the lock is only completly released, if the lock
+ counter is 0.
+
+ Also note, that the lock file itself is not automatically deleted.
+
+ :arg bool force:
+ If true, the lock counter is ignored and the lock is released in
+ every case.
+ """
+ with self._thread_lock:
+
+ if self.is_locked:
+ self._lock_counter -= 1
+
+ if self._lock_counter == 0 or force:
+ lock_id = id(self)
+ lock_filename = self._lock_file
+
+ logger().debug('Attempting to release lock %s on %s', lock_id, lock_filename)
+ self._release()
+ self._lock_counter = 0
+ logger().info('Lock %s released on %s', lock_id, lock_filename)
+
+ return None
+
+ def __enter__(self):
+ self.acquire()
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.release()
+ return None
+
+ def __del__(self):
+ self.release(force = True)
+ return None
+
+
+# Windows locking mechanism
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+class WindowsFileLock(BaseFileLock):
+ """
+ Uses the :func:`msvcrt.locking` function to hard lock the lock file on
+ windows systems.
+ """
+
+ def _acquire(self):
+ open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
+
+ try:
+ fd = os.open(self._lock_file, open_mode)
+ except OSError:
+ pass
+ else:
+ try:
+ msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
+ except (IOError, OSError):
+ os.close(fd)
+ else:
+ self._lock_file_fd = fd
+ return None
+
+ def _release(self):
+ fd = self._lock_file_fd
+ self._lock_file_fd = None
+ msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
+ os.close(fd)
+
+ try:
+ os.remove(self._lock_file)
+ # Probably another instance of the application
+ # that acquired the file lock.
+ except OSError:
+ pass
+ return None
+
+# Unix locking mechanism
+# ~~~~~~~~~~~~~~~~~~~~~~
+
+class UnixFileLock(BaseFileLock):
+ """
+ Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.
+ """
+
+ def _acquire(self):
+ open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
+ fd = os.open(self._lock_file, open_mode)
+
+ try:
+ fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except (IOError, OSError):
+ os.close(fd)
+ else:
+ self._lock_file_fd = fd
+ return None
+
+ def _release(self):
+ # Do not remove the lockfile:
+ #
+ # https://github.com/benediktschmitt/py-filelock/issues/31
+ # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
+ fd = self._lock_file_fd
+ self._lock_file_fd = None
+ fcntl.flock(fd, fcntl.LOCK_UN)
+ os.close(fd)
+ return None
+
+# Soft lock
+# ~~~~~~~~~
+
+class SoftFileLock(BaseFileLock):
+ """
+ Simply watches the existence of the lock file.
+ """
+
+ def _acquire(self):
+ open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
+ try:
+ fd = os.open(self._lock_file, open_mode)
+ except (IOError, OSError):
+ pass
+ else:
+ self._lock_file_fd = fd
+ return None
+
+ def _release(self):
+ os.close(self._lock_file_fd)
+ self._lock_file_fd = None
+
+ try:
+ os.remove(self._lock_file)
+ # The file is already deleted and that's what we want.
+ except OSError:
+ pass
+ return None
+
+
+# Platform filelock
+# ~~~~~~~~~~~~~~~~~
+
+#: Alias for the lock, which should be used for the current platform. On
+#: Windows, this is an alias for :class:`WindowsFileLock`, on Unix for
+#: :class:`UnixFileLock` and otherwise for :class:`SoftFileLock`.
+FileLock = None
+
+if msvcrt:
+ FileLock = WindowsFileLock
+elif fcntl:
+ FileLock = UnixFileLock
+else:
+ FileLock = SoftFileLock
+
+ if warnings is not None:
+ warnings.warn("only soft file lock is available")
diff --git a/collectors/python.d.plugin/python_modules/third_party/monotonic.py b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
index da04bb857..4ebd556c3 100644
--- a/collectors/python.d.plugin/python_modules/third_party/monotonic.py
+++ b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
@@ -54,6 +54,41 @@ except AttributeError:
import os
import sys
import threading
+
+
+ def clock_clock_gettime_c_library():
+ return ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True).clock_gettime
+
+
+ def clock_clock_gettime_rt_library():
+ return ctypes.CDLL(ctypes.util.find_library('rt'), use_errno=True).clock_gettime
+
+
+ def clock_clock_gettime_c_library_synology6():
+ return ctypes.CDLL('/usr/lib/libc.so.6', use_errno=True).clock_gettime
+
+
+ def clock_clock_gettime_rt_library_synology6():
+ return ctypes.CDLL('/usr/lib/librt.so.1', use_errno=True).clock_gettime
+
+
+ def clock_gettime_linux():
+ # see https://github.com/netdata/netdata/issues/7976
+ order = [
+ clock_clock_gettime_c_library,
+ clock_clock_gettime_rt_library,
+ clock_clock_gettime_c_library_synology6,
+ clock_clock_gettime_rt_library_synology6,
+ ]
+
+ for gettime in order:
+ try:
+ return gettime()
+ except (RuntimeError, AttributeError, OSError):
+ continue
+ raise RuntimeError('can not find c and rt libraries')
+
+
try:
if sys.platform == 'darwin': # OS X, iOS
# See Technical Q&A QA1398 of the Mac Developer Library:
@@ -132,12 +167,7 @@ except AttributeError:
return final_milliseconds / 1000.0
else:
- try:
- clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'),
- use_errno=True).clock_gettime
- except Exception:
- clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'),
- use_errno=True).clock_gettime
+ clock_gettime = clock_gettime_linux()
class timespec(ctypes.Structure):
"""Time specification, as described in clock_gettime(3)."""
diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md
index 1d7ad956d..2130a7b3a 100644
--- a/collectors/python.d.plugin/rabbitmq/README.md
+++ b/collectors/python.d.plugin/rabbitmq/README.md
@@ -1,6 +1,13 @@
-# rabbitmq
+<!--
+title: "RabbitMQ monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rabbitmq/README.md
+sidebar_label: "RabbitMQ"
+-->
+
+# RabbitMQ monitoring with Netdata
+
+Collects message broker global and per virtual host metrics.
-This module monitors [RabbitMQ](https://www.rabbitmq.com/) performance and health metrics.
Following charts are drawn:
@@ -62,7 +69,38 @@ Per Vhost charts:
- redeliver
- return_unroutable
-## configuration
+2. Per Queue charts:
+
+ 1. **Queued Messages**
+
+ - messages
+ - paged_out
+ - persistent
+ - ready
+ - unacknowledged
+
+ 2. **Queue Messages stats**
+
+ - ack
+ - confirm
+ - deliver
+ - get
+ - get_no_ack
+ - publish
+ - redeliver
+ - return_unroutable
+
+## Configuration
+
+Edit the `python.d/rabbitmq.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/rabbitmq.conf
+```
+
+When no configuration file is found, module tries to connect to: `localhost:15672`.
```yaml
socket:
@@ -73,8 +111,6 @@ socket:
pass : 'guest'
```
-When no configuration file is found, module tries to connect to: `localhost:15672`.
-
---
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frabbitmq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
index d581c14e7..866b777f7 100644
--- a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
@@ -9,6 +9,7 @@ from bases.FrameworkServices.UrlService import UrlService
API_NODE = 'api/nodes'
API_OVERVIEW = 'api/overview'
+API_QUEUES = 'api/queues'
API_VHOSTS = 'api/vhosts'
NODE_STATS = [
@@ -31,7 +32,30 @@ OVERVIEW_STATS = [
'message_stats.ack',
'message_stats.redeliver',
'message_stats.deliver',
- 'message_stats.publish'
+ 'message_stats.publish',
+ 'churn_rates.connection_created_details.rate',
+ 'churn_rates.connection_closed_details.rate',
+ 'churn_rates.channel_created_details.rate',
+ 'churn_rates.channel_closed_details.rate',
+ 'churn_rates.queue_created_details.rate',
+ 'churn_rates.queue_declared_details.rate',
+ 'churn_rates.queue_deleted_details.rate'
+]
+
+QUEUE_STATS = [
+ 'messages',
+ 'messages_paged_out',
+ 'messages_persistent',
+ 'messages_ready',
+ 'messages_unacknowledged',
+ 'message_stats.ack',
+ 'message_stats.confirm',
+ 'message_stats.deliver',
+ 'message_stats.get',
+ 'message_stats.get_no_ack',
+ 'message_stats.publish',
+ 'message_stats.redeliver',
+ 'message_stats.return_unroutable',
]
VHOST_MESSAGE_STATS = [
@@ -47,6 +71,9 @@ VHOST_MESSAGE_STATS = [
ORDER = [
'queued_messages',
+ 'connection_churn_rates',
+ 'channel_churn_rates',
+ 'queue_churn_rates',
'message_rates',
'global_counts',
'file_descriptors',
@@ -104,6 +131,28 @@ CHARTS = {
['object_totals_exchanges', 'exchanges', 'absolute']
]
},
+ 'connection_churn_rates': {
+ 'options': [None, 'Connection Churn Rates', 'operations/s', 'overview', 'rabbitmq.connection_churn_rates', 'line'],
+ 'lines': [
+ ['churn_rates_connection_created_details_rate', 'created', 'absolute'],
+ ['churn_rates_connection_closed_details_rate', 'closed', 'absolute']
+ ]
+ },
+ 'channel_churn_rates': {
+ 'options': [None, 'Channel Churn Rates', 'operations/s', 'overview', 'rabbitmq.channel_churn_rates', 'line'],
+ 'lines': [
+ ['churn_rates_channel_created_details_rate', 'created', 'absolute'],
+ ['churn_rates_channel_closed_details_rate', 'closed', 'absolute']
+ ]
+ },
+ 'queue_churn_rates': {
+ 'options': [None, 'Queue Churn Rates', 'operations/s', 'overview', 'rabbitmq.queue_churn_rates', 'line'],
+ 'lines': [
+ ['churn_rates_queue_created_details_rate', 'created', 'absolute'],
+ ['churn_rates_queue_declared_details_rate', 'declared', 'absolute'],
+ ['churn_rates_queue_deleted_details_rate', 'deleted', 'absolute']
+ ]
+ },
'queued_messages': {
'options': [None, 'Queued Messages', 'messages', 'overview', 'rabbitmq.queued_messages', 'stacked'],
'lines': [
@@ -148,6 +197,44 @@ def vhost_chart_template(name):
return order, charts
+def queue_chart_template(queue_id):
+ vhost, name = queue_id
+ order = [
+ 'vhost_{0}_queue_{1}_queued_message'.format(vhost, name),
+ 'vhost_{0}_queue_{1}_messages_stats'.format(vhost, name),
+ ]
+ family = 'vhost {0}'.format(vhost)
+
+ charts = {
+ order[0]: {
+ 'options': [
+ None, 'Queue "{0}" in "{1}" queued messages'.format(name, vhost), 'messages', family, 'rabbitmq.queue_messages', 'line'],
+ 'lines': [
+ ['vhost_{0}_queue_{1}_messages'.format(vhost, name), 'messages', 'absolute'],
+ ['vhost_{0}_queue_{1}_messages_paged_out'.format(vhost, name), 'paged_out', 'absolute'],
+ ['vhost_{0}_queue_{1}_messages_persistent'.format(vhost, name), 'persistent', 'absolute'],
+ ['vhost_{0}_queue_{1}_messages_ready'.format(vhost, name), 'ready', 'absolute'],
+ ['vhost_{0}_queue_{1}_messages_unacknowledged'.format(vhost, name), 'unack', 'absolute'],
+ ]
+ },
+ order[1]: {
+ 'options': [
+ None, 'Queue "{0}" in "{1}" messages stats'.format(name, vhost), 'messages/s', family, 'rabbitmq.queue_messages_stats', 'line'],
+ 'lines': [
+ ['vhost_{0}_queue_{1}_message_stats_ack'.format(vhost, name), 'ack', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_confirm'.format(vhost, name), 'confirm', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_deliver'.format(vhost, name), 'deliver', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_get'.format(vhost, name), 'get', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_get_no_ack'.format(vhost, name), 'get_no_ack', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_publish'.format(vhost, name), 'publish', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_redeliver'.format(vhost, name), 'redeliver', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_return_unroutable'.format(vhost, name), 'return_unroutable', 'incremental'],
+ ]
+ },
+ }
+
+ return order, charts
+
class VhostStatsBuilder:
def __init__(self):
@@ -167,6 +254,21 @@ class VhostStatsBuilder:
stats = fetch_data(raw_data=self.stats, metrics=VHOST_MESSAGE_STATS)
return dict(('vhost_{0}_{1}'.format(name, k), v) for k, v in stats.items())
+class QueueStatsBuilder:
+ def __init__(self):
+ self.stats = None
+
+ def set(self, raw_stats):
+ self.stats = raw_stats
+
+ def id(self):
+ return self.stats['vhost'], self.stats['name']
+
+ def queue_stats(self):
+ vhost, name = self.id()
+ stats = fetch_data(raw_data=self.stats, metrics=QUEUE_STATS)
+ return dict(('vhost_{0}_queue_{1}_{2}'.format(vhost, name, k), v) for k, v in stats.items())
+
class Service(UrlService):
def __init__(self, configuration=None, name=None):
@@ -181,6 +283,11 @@ class Service(UrlService):
self.node_name = str()
self.vhost = VhostStatsBuilder()
self.collected_vhosts = set()
+ self.collect_queues_metrics = configuration.get('collect_queues_metrics', False)
+ self.debug("collect_queues_metrics is {0}".format("enabled" if self.collect_queues_metrics else "disabled"))
+ if self.collect_queues_metrics:
+ self.queue = QueueStatsBuilder()
+ self.collected_queues = set()
def _get_data(self):
data = dict()
@@ -201,6 +308,11 @@ class Service(UrlService):
if stats:
data.update(stats)
+ if self.collect_queues_metrics:
+ stats = self.get_queues_stats()
+ if stats:
+ data.update(stats)
+
return data or None
def get_overview_stats(self):
@@ -260,6 +372,31 @@ class Service(UrlService):
self.debug("number of vhosts: {0}, metrics: {1}".format(len(vhosts), len(data)))
return data
+ def get_queues_stats(self):
+ url = '{0}/{1}'.format(self.url, API_QUEUES)
+ self.debug("doing http request to '{0}'".format(url))
+ raw = self._get_raw_data(url)
+ if not raw:
+ return None
+
+ data = dict()
+ queues = loads(raw)
+ charts_initialized = len(self.charts) > 0
+
+ for queue in queues:
+ self.queue.set(queue)
+ if self.queue.id()[0] not in self.collected_vhosts:
+ continue
+
+ if charts_initialized and self.queue.id() not in self.collected_queues:
+ self.collected_queues.add(self.queue.id())
+ self.add_queue_charts(self.queue.id())
+
+ data.update(self.queue.queue_stats())
+
+ self.debug("number of queues: {0}, metrics: {1}".format(len(queues), len(data)))
+ return data
+
def add_vhost_charts(self, vhost_name):
order, charts = vhost_chart_template(vhost_name)
@@ -271,6 +408,17 @@ class Service(UrlService):
for dimension in dimensions:
new_chart.add_dimension(dimension)
+ def add_queue_charts(self, queue_id):
+ order, charts = queue_chart_template(queue_id)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
+
def fetch_data(raw_data, metrics):
data = dict()
@@ -291,5 +439,5 @@ def handle_disabled_disk_monitoring(node_stats):
# https://github.com/netdata/netdata/issues/7218
# can be "disk_free": "disk_free_monitoring_disabled"
v = node_stats.get('disk_free')
- if v and isinstance(v, str):
+ if v and not isinstance(v, int):
del node_stats['disk_free']
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
index ae0dbdb75..47d47a1bf 100644
--- a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
@@ -70,6 +70,12 @@
# user: 'username'
# pass: 'password'
#
+# Rabbitmq plugin can also collect stats per vhost per queues, which is disabled
+# by default. Please note that enabling this can induced a serious overhead on
+# both netdata and rabbitmq if a look of queues are configured and used.
+#
+# collect_queues_metrics: 'yes/no'
+#
# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md
index e7ddd382c..9fab56c33 100644
--- a/collectors/python.d.plugin/redis/README.md
+++ b/collectors/python.d.plugin/redis/README.md
@@ -1,6 +1,12 @@
-# redis
+<!--
+title: "Redis monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/redis/README.md
+sidebar_label: "Redis"
+-->
-Get INFO data from redis instance.
+# Redis monitoring with Netdata
+
+Monitors database status. It reads server response to `INFO` command.
Following charts are drawn:
@@ -30,7 +36,15 @@ Following charts are drawn:
- connected
-## configuration
+## Configuration
+
+Edit the `python.d/redis.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/redis.conf
+```
```yaml
socket:
diff --git a/collectors/python.d.plugin/redis/redis.chart.py b/collectors/python.d.plugin/redis/redis.chart.py
index 40ccb5274..e09916d86 100644
--- a/collectors/python.d.plugin/redis/redis.chart.py
+++ b/collectors/python.d.plugin/redis/redis.chart.py
@@ -5,7 +5,6 @@
# SPDX-License-Identifier: GPL-3.0-or-later
import re
-
from copy import deepcopy
from bases.FrameworkServices.SocketService import SocketService
@@ -37,7 +36,6 @@ PIKA_ORDER = [
'uptime',
]
-
CHARTS = {
'operations': {
'options': [None, 'Operations', 'operations/s', 'operations', 'redis.operations', 'line'],
@@ -53,8 +51,9 @@ CHARTS = {
]
},
'memory': {
- 'options': [None, 'Memory utilization', 'KiB', 'memory', 'redis.memory', 'line'],
+ 'options': [None, 'Memory utilization', 'KiB', 'memory', 'redis.memory', 'area'],
'lines': [
+ ['maxmemory', 'max', 'absolute', 1, 1024],
['used_memory', 'total', 'absolute', 1, 1024],
['used_memory_lua', 'lua', 'absolute', 1, 1024]
]
@@ -156,6 +155,7 @@ class Service(SocketService):
self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None
self.request = 'INFO\r\n'.encode()
self.bgsave_time = 0
+ self.keyspace_dbs = set()
def do_auth(self):
resp = self._get_raw_data(request=self.auth_request)
@@ -189,23 +189,38 @@ class Service(SocketService):
:return: dict
"""
data = self.get_raw_and_parse()
-
if not data:
return None
+ self.calc_hit_rate(data)
+ self.calc_redis_keys(data)
+ self.calc_redis_rdb_save_operations(data)
+ return data
+
+ @staticmethod
+ def calc_hit_rate(data):
try:
- data['hit_rate'] = (
- (int(data['keyspace_hits']) * 100) / (int(data['keyspace_hits']) + int(data['keyspace_misses']))
- )
+ hits = int(data['keyspace_hits'])
+ misses = int(data['keyspace_misses'])
+ data['hit_rate'] = hits * 100 / (hits + misses)
except (KeyError, ZeroDivisionError):
data['hit_rate'] = 0
- if data.get('redis_version') and data.get('rdb_bgsave_in_progress'):
- self.get_data_redis_specific(data)
-
- return data
-
- def get_data_redis_specific(self, data):
+ def calc_redis_keys(self, data):
+ if not data.get('redis_version'):
+ return
+ # db0:keys=2,expires=0,avg_ttl=0
+ new_keyspace_dbs = [k for k in data if k.startswith('db') and k not in self.keyspace_dbs]
+ for db in new_keyspace_dbs:
+ self.keyspace_dbs.add(db)
+ self.charts['keys_redis'].add_dimension([db, None, 'absolute'])
+ for db in self.keyspace_dbs:
+ if db not in data:
+ data[db] = 0
+
+ def calc_redis_rdb_save_operations(self, data):
+ if not (data.get('redis_version') and data.get('rdb_bgsave_in_progress')):
+ return
if data['rdb_bgsave_in_progress'] != '0':
self.bgsave_time += self.update_every
else:
@@ -229,11 +244,6 @@ class Service(SocketService):
for n in self.order:
self.definitions.update(copy_chart(n))
- if data.get('redis_version'):
- for k in data:
- if k.startswith('db'):
- self.definitions['keys_redis']['lines'].append([k, None, 'absolute'])
-
return True
def _check_raw_data(self, data):
diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md
index 277154336..85cebd96a 100644
--- a/collectors/python.d.plugin/rethinkdbs/README.md
+++ b/collectors/python.d.plugin/rethinkdbs/README.md
@@ -1,6 +1,12 @@
-# rethinkdbs
+<!--
+title: "RethinkDB monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rethinkdbs/README.md
+sidebar_label: "RethinkDB"
+-->
-Module monitor rethinkdb health metrics.
+# RethinkDB monitoring with Netdata
+
+Collects database server and cluster statistics.
Following charts are drawn:
@@ -21,7 +27,15 @@ Following charts are drawn:
- documents
-## configuration
+## Configuration
+
+Edit the `python.d/rethinkdbs.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/rethinkdbs.conf
+```
```yaml
localhost:
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
index 80cc1cf18..e3fbc3632 100644
--- a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
@@ -5,6 +5,7 @@
try:
import rethinkdb as rdb
+
HAS_RETHINKDB = True
except ImportError:
HAS_RETHINKDB = False
diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md
index 9a82f2ff7..d8bd3a914 100644
--- a/collectors/python.d.plugin/retroshare/README.md
+++ b/collectors/python.d.plugin/retroshare/README.md
@@ -1,3 +1,47 @@
-# retroshare
+<!--
+title: "RetroShare monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/retroshare/README.md
+sidebar_label: "RetroShare"
+-->
+
+# RetroShare monitoring with Netdata
+
+Monitors application bandwidth, peers and DHT metrics.
+
+This module will monitor one or more `RetroShare` applications, depending on your configuration.
+
+## Charts
+
+This module produces the following charts:
+
+- Bandwidth in `kilobits/s`
+- Peers in `peers`
+- DHT in `peers`
+
+
+## Configuration
+
+Edit the `python.d/retroshare.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/retroshare.conf
+```
+
+Here is an example for 2 servers:
+
+```yaml
+localhost:
+ url : 'http://localhost:9090'
+ user : "user"
+ password : "pass"
+
+remote:
+ url : 'http://203.0.113.1:9090'
+ user : "user"
+ password : "pass"
+```
+---
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fretroshare%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/retroshare/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py
index feb871fbd..3f9593e94 100644
--- a/collectors/python.d.plugin/retroshare/retroshare.chart.py
+++ b/collectors/python.d.plugin/retroshare/retroshare.chart.py
@@ -7,7 +7,6 @@ import json
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'bandwidth',
'peers',
diff --git a/collectors/python.d.plugin/riakkv/README.md b/collectors/python.d.plugin/riakkv/README.md
index 04343dd99..d0ea9a137 100644
--- a/collectors/python.d.plugin/riakkv/README.md
+++ b/collectors/python.d.plugin/riakkv/README.md
@@ -1,8 +1,14 @@
-# riakkv
+<!--
+title: "Riak KV monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/riakkv/README.md
+sidebar_label: "Riak KV"
+-->
-Monitors one or more Riak KV servers.
+# Riak KV monitoring with Netdata
-**Requirements:**
+Collects database stats from `/stats` endpoint.
+
+## Requirements
- An accessible `/stats` endpoint. See [the Riak KV configuration reference documentation](https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces)
for how to enable this.
@@ -94,7 +100,15 @@ listed
- bad_entry
- extract_fail
-## configuration
+## Configuration
+
+Edit the `python.d/riakkv.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/riakkv.conf
+```
The module needs to be passed the full URL to Riak's stats endpoint.
For example:
@@ -110,3 +124,5 @@ With no explicit configuration given, the module will attempt to connect to
The default update frequency for the plugin is set to 2 seconds as Riak
internally updates the metrics every second. If we were to update the metrics
every second, the resulting graph would contain odd jitter.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Friakkv%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/riakkv/riakkv.chart.py b/collectors/python.d.plugin/riakkv/riakkv.chart.py
index f81e177a5..c390c8bc0 100644
--- a/collectors/python.d.plugin/riakkv/riakkv.chart.py
+++ b/collectors/python.d.plugin/riakkv/riakkv.chart.py
@@ -67,14 +67,16 @@ ORDER = [
CHARTS = {
# Throughput metrics
"kv.node_operations": {
- "options": [None, "Reads & writes coordinated by this node", "operations/s", "throughput", "riak.kv.throughput", "line"],
+ "options": [None, "Reads & writes coordinated by this node", "operations/s", "throughput", "riak.kv.throughput",
+ "line"],
"lines": [
["node_gets_total", "gets", "incremental"],
["node_puts_total", "puts", "incremental"]
]
},
"dt.vnode_updates": {
- "options": [None, "Update operations coordinated by local vnodes by data type", "operations/s", "throughput", "riak.dt.vnode_updates", "line"],
+ "options": [None, "Update operations coordinated by local vnodes by data type", "operations/s", "throughput",
+ "riak.dt.vnode_updates", "line"],
"lines": [
["vnode_counter_update_total", "counters", "incremental"],
["vnode_set_update_total", "sets", "incremental"],
@@ -94,7 +96,8 @@ CHARTS = {
]
},
"consistent.operations": {
- "options": [None, "Consistent node operations", "operations/s", "throughput", "riak.consistent.operations", "line"],
+ "options": [None, "Consistent node operations", "operations/s", "throughput", "riak.consistent.operations",
+ "line"],
"lines": [
["consistent_gets_total", "gets", "incremental"],
["consistent_puts_total", "puts", "incremental"],
@@ -103,7 +106,8 @@ CHARTS = {
# Latency metrics
"kv.latency.get": {
- "options": [None, "Time between reception of a client GET request and subsequent response to client", "ms", "latency", "riak.kv.latency.get", "line"],
+ "options": [None, "Time between reception of a client GET request and subsequent response to client", "ms",
+ "latency", "riak.kv.latency.get", "line"],
"lines": [
["node_get_fsm_time_mean", "mean", "absolute", 1, 1000],
["node_get_fsm_time_median", "median", "absolute", 1, 1000],
@@ -113,7 +117,8 @@ CHARTS = {
]
},
"kv.latency.put": {
- "options": [None, "Time between reception of a client PUT request and subsequent response to client", "ms", "latency", "riak.kv.latency.put", "line"],
+ "options": [None, "Time between reception of a client PUT request and subsequent response to client", "ms",
+ "latency", "riak.kv.latency.put", "line"],
"lines": [
["node_put_fsm_time_mean", "mean", "absolute", 1, 1000],
["node_put_fsm_time_median", "median", "absolute", 1, 1000],
@@ -123,7 +128,8 @@ CHARTS = {
]
},
"dt.latency.counter": {
- "options": [None, "Time it takes to perform an Update Counter operation", "ms", "latency", "riak.dt.latency.counter_merge", "line"],
+ "options": [None, "Time it takes to perform an Update Counter operation", "ms", "latency",
+ "riak.dt.latency.counter_merge", "line"],
"lines": [
["object_counter_merge_time_mean", "mean", "absolute", 1, 1000],
["object_counter_merge_time_median", "median", "absolute", 1, 1000],
@@ -133,7 +139,8 @@ CHARTS = {
]
},
"dt.latency.set": {
- "options": [None, "Time it takes to perform an Update Set operation", "ms", "latency", "riak.dt.latency.set_merge", "line"],
+ "options": [None, "Time it takes to perform an Update Set operation", "ms", "latency",
+ "riak.dt.latency.set_merge", "line"],
"lines": [
["object_set_merge_time_mean", "mean", "absolute", 1, 1000],
["object_set_merge_time_median", "median", "absolute", 1, 1000],
@@ -143,7 +150,8 @@ CHARTS = {
]
},
"dt.latency.map": {
- "options": [None, "Time it takes to perform an Update Map operation", "ms", "latency", "riak.dt.latency.map_merge", "line"],
+ "options": [None, "Time it takes to perform an Update Map operation", "ms", "latency",
+ "riak.dt.latency.map_merge", "line"],
"lines": [
["object_map_merge_time_mean", "mean", "absolute", 1, 1000],
["object_map_merge_time_median", "median", "absolute", 1, 1000],
@@ -164,7 +172,8 @@ CHARTS = {
]
},
"search.latency.index": {
- "options": [None, "Time it takes Search to index a new document", "ms", "latency", "riak.search.latency.index", "line"],
+ "options": [None, "Time it takes Search to index a new document", "ms", "latency", "riak.search.latency.index",
+ "line"],
"lines": [
["search_index_latency_median", "median", "absolute", 1, 1000],
["search_index_latency_min", "min", "absolute", 1, 1000],
@@ -205,7 +214,8 @@ CHARTS = {
]
},
"vm.memory.processes": {
- "options": [None, "Memory allocated & used by Erlang processes", "MB", "vm", "riak.vm.memory.processes", "line"],
+ "options": [None, "Memory allocated & used by Erlang processes", "MB", "vm", "riak.vm.memory.processes",
+ "line"],
"lines": [
["memory_processes", "allocated", "absolute", 1, 1024 * 1024],
["memory_processes_used", "used", "absolute", 1, 1024 * 1024]
@@ -214,7 +224,8 @@ CHARTS = {
# General Riak Load/Health metrics
"kv.siblings_encountered.get": {
- "options": [None, "Number of siblings encountered during GET operations by this node during the past minute", "siblings", "load", "riak.kv.siblings_encountered.get", "line"],
+ "options": [None, "Number of siblings encountered during GET operations by this node during the past minute",
+ "siblings", "load", "riak.kv.siblings_encountered.get", "line"],
"lines": [
["node_get_fsm_siblings_mean", "mean", "absolute"],
["node_get_fsm_siblings_median", "median", "absolute"],
@@ -224,7 +235,8 @@ CHARTS = {
]
},
"kv.objsize.get": {
- "options": [None, "Object size encountered by this node during the past minute", "KB", "load", "riak.kv.objsize.get", "line"],
+ "options": [None, "Object size encountered by this node during the past minute", "KB", "load",
+ "riak.kv.objsize.get", "line"],
"lines": [
["node_get_fsm_objsize_mean", "mean", "absolute", 1, 1024],
["node_get_fsm_objsize_median", "median", "absolute", 1, 1024],
@@ -234,7 +246,9 @@ CHARTS = {
]
},
"search.vnodeq_size": {
- "options": [None, "Number of unprocessed messages in the vnode message queues of Search on this node in the past minute", "messages", "load", "riak.search.vnodeq_size", "line"],
+ "options": [None,
+ "Number of unprocessed messages in the vnode message queues of Search on this node in the past minute",
+ "messages", "load", "riak.search.vnodeq_size", "line"],
"lines": [
["riak_search_vnodeq_mean", "mean", "absolute"],
["riak_search_vnodeq_median", "median", "absolute"],
@@ -244,20 +258,23 @@ CHARTS = {
]
},
"search.index_errors": {
- "options": [None, "Number of document index errors encountered by Search", "errors", "load", "riak.search.index", "line"],
+ "options": [None, "Number of document index errors encountered by Search", "errors", "load",
+ "riak.search.index", "line"],
"lines": [
["search_index_fail_count", "errors", "absolute"]
]
},
"core.pbc": {
- "options": [None, "Protocol buffer connections by status", "connections", "load", "riak.core.protobuf_connections", "line"],
+ "options": [None, "Protocol buffer connections by status", "connections", "load",
+ "riak.core.protobuf_connections", "line"],
"lines": [
["pbc_active", "active", "absolute"],
# ["pbc_connects", "established_pastmin", "absolute"]
]
},
"core.repairs": {
- "options": [None, "Number of repair operations this node has coordinated", "repairs", "load", "riak.core.repairs", "line"],
+ "options": [None, "Number of repair operations this node has coordinated", "repairs", "load",
+ "riak.core.repairs", "line"],
"lines": [
["read_repairs", "read", "absolute"]
]
@@ -275,7 +292,8 @@ CHARTS = {
# Writing "Sidejob's" here seems to cause some weird issues: it results in this chart being rendered in
# its own context and additionally, moves the entire Riak graph all the way up to the top of the Netdata
# dashboard for some reason.
- "options": [None, "Finite state machines being rejected by Sidejobs overload protection", "fsms", "load", "riak.core.fsm_rejected", "line"],
+ "options": [None, "Finite state machines being rejected by Sidejobs overload protection", "fsms", "load",
+ "riak.core.fsm_rejected", "line"],
"lines": [
["node_get_fsm_rejected", "get", "absolute"],
["node_put_fsm_rejected", "put", "absolute"]
@@ -284,7 +302,8 @@ CHARTS = {
# General Riak Search Load / Health metrics
"search.errors": {
- "options": [None, "Number of writes to Search failed due to bad data format by reason", "writes", "load", "riak.search.index", "line"],
+ "options": [None, "Number of writes to Search failed due to bad data format by reason", "writes", "load",
+ "riak.search.index", "line"],
"lines": [
["search_index_bad_entry_count", "bad_entry", "absolute"],
["search_index_extract_fail_count", "extract_fail", "absolute"],
diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md
index ad99deade..ed26d2871 100644
--- a/collectors/python.d.plugin/samba/README.md
+++ b/collectors/python.d.plugin/samba/README.md
@@ -1,8 +1,14 @@
-# samba
+<!--
+title: "Samba monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/samba/README.md
+sidebar_label: "Samba"
+-->
-Performance metrics of Samba file sharing.
+# Samba monitoring with Netdata
-**Requirements:**
+Monitors the performance metrics of Samba file sharing.
+
+## Requirements
- `smbstatus` program
- `sudo` program
@@ -15,7 +21,7 @@ It produces the following charts:
1. **Syscall R/Ws** in kilobytes/s
- sendfile
- - recvfle
+ - recvfile
2. **Smb2 R/Ws** in kilobytes/s
@@ -67,14 +73,22 @@ Add to `sudoers`:
netdata ALL=(root) NOPASSWD: /path/to/smbstatus
```
-## configuration
+## Configuration
- **samba** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+**samba** is disabled by default. Should be explicitly enabled in `python.d.conf`.
```yaml
samba: yes
```
+Edit the `python.d/samba.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/samba.conf
+```
+
---
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsamba%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/samba/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py
index ac89c29b0..8eebcd60c 100644
--- a/collectors/python.d.plugin/samba/samba.chart.py
+++ b/collectors/python.d.plugin/samba/samba.chart.py
@@ -17,10 +17,10 @@
# (like find and notify... good examples).
import re
+import os
-from bases.collection import find_binary
from bases.FrameworkServices.ExecutableService import ExecutableService
-
+from bases.collection import find_binary
disabled_by_default = True
@@ -96,6 +96,9 @@ CHARTS = {
}
}
+SUDO = 'sudo'
+SMBSTATUS = 'smbstatus'
+
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
@@ -105,20 +108,26 @@ class Service(ExecutableService):
self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)')
def check(self):
- sudo_binary, smbstatus_binary = find_binary('sudo'), find_binary('smbstatus')
-
- if not (sudo_binary and smbstatus_binary):
- self.error("Can\'t locate 'sudo' or 'smbstatus' binary")
+ smbstatus_binary = find_binary(SMBSTATUS)
+ if not smbstatus_binary:
+ self.error("can't locate '{0}' binary".format(SMBSTATUS))
return False
- self.command = [sudo_binary, '-v']
- err = self._get_raw_data(stderr=True)
- if err:
- self.error(''.join(err))
+ if os.getuid() == 0:
+ self.command = ' '.join([smbstatus_binary, '-P'])
+ return ExecutableService.check(self)
+
+ sudo_binary = find_binary(SUDO)
+ if not sudo_binary:
+ self.error("can't locate '{0}' binary".format(SUDO))
+ return False
+ command = [sudo_binary, '-n', '-l', smbstatus_binary, '-P']
+ smbstatus = '{0} -P'.format(smbstatus_binary)
+ allowed = self._get_raw_data(command=command)
+ if not (allowed and allowed[0].strip() == smbstatus):
+ self.error("not allowed to run sudo for command '{0}'".format(smbstatus))
return False
-
self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P'])
-
return ExecutableService.check(self)
def _get_data(self):
diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md
index 1c0613c72..5d2934844 100644
--- a/collectors/python.d.plugin/sensors/README.md
+++ b/collectors/python.d.plugin/sensors/README.md
@@ -1,12 +1,24 @@
-# sensors
+<!--
+title: "Linux machine sensors monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/sensors/README.md
+sidebar_label: "Linux machine sensors"
+-->
-System sensors information.
+# Linux machine sensors monitoring with Netdata
+
+Reads system sensors information (temperature, voltage, electric current, power, etc.).
Charts are created dynamically.
-## configuration
+## Configuration
+
+Edit the `python.d/sensors.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-For detailed configuration information please read [`sensors.conf`](sensors.conf) file.
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/sensors.conf
+```
### possible issues
diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py
index 6b54ea601..8c0cde6bb 100644
--- a/collectors/python.d.plugin/sensors/sensors.chart.py
+++ b/collectors/python.d.plugin/sensors/sensors.chart.py
@@ -3,10 +3,8 @@
# Author: Pawel Krupa (paulfantom)
# SPDX-License-Identifier: GPL-3.0-or-later
-from third_party import lm_sensors as sensors
-
from bases.FrameworkServices.SimpleService import SimpleService
-
+from third_party import lm_sensors as sensors
ORDER = [
'temperature',
@@ -162,4 +160,4 @@ class Service(SimpleService):
self.create_definitions()
- return True
+ return bool(self.get_data())
diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md
index 6f4dda50c..a1b41f408 100644
--- a/collectors/python.d.plugin/smartd_log/README.md
+++ b/collectors/python.d.plugin/smartd_log/README.md
@@ -1,8 +1,14 @@
-# smartd_log
+<!--
+title: "Storage devices monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/smartd_log/README.md
+sidebar_label: "S.M.A.R.T. attributes"
+-->
-Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
+# Storage devices monitoring with Netdata
-**Requirements:**
+Monitors `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
+
+## Requirements
- `smartmontools`
@@ -97,7 +103,15 @@ Otherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontool
`smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files.
-## configuration
+## Configuration
+
+Edit the `python.d/smartd_log.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/smartd_log.conf
+```
```yaml
local:
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
index f121ab2e0..8f10a5351 100644
--- a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
@@ -5,13 +5,11 @@
import os
import re
-
from copy import deepcopy
from time import time
-from bases.collection import read_last_line
from bases.FrameworkServices.SimpleService import SimpleService
-
+from bases.collection import read_last_line
INCREMENTAL = 'incremental'
ABSOLUTE = 'absolute'
@@ -59,7 +57,6 @@ ATTR_VERIFY_ERR_COR = 'verify-total-err-corrected'
ATTR_VERIFY_ERR_UNC = 'verify-total-unc-errors'
ATTR_TEMPERATURE = 'temperature'
-
RE_ATA = re.compile(
'(\d+);' # attribute
'(\d+);' # normalized value
@@ -265,7 +262,7 @@ CHARTS = {
'line'],
'lines': [],
'attrs': [ATTR5],
- 'algo': INCREMENTAL,
+ 'algo': ABSOLUTE,
},
'reserved_block_count': {
'options': [None, 'Reserved Block Count', 'percentage', 'wear', 'smartd_log.reserved_block_count', 'line'],
@@ -533,7 +530,9 @@ def handle_error(*errors):
return method(*args)
except errors:
return None
+
return on_call
+
return on_method
@@ -653,10 +652,10 @@ class Service(SimpleService):
current_time = time()
for disk in self.disks[:]:
if any(
- [
- not disk.alive,
- not disk.log_file.is_active(current_time, self.age),
- ]
+ [
+ not disk.alive,
+ not disk.log_file.is_active(current_time, self.age),
+ ]
):
self.disks.remove(disk.raw_name)
self.remove_disk_from_charts(disk)
@@ -673,7 +672,7 @@ class Service(SimpleService):
return len(self.disks)
- def create_disk_from_file(self, full_name, current_time):
+ def create_disk_from_file(self, full_name, current_time):
if not full_name.endswith(CSV):
self.debug('skipping {0}: not a csv file'.format(full_name))
return None
diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md
index 8b74913de..9b297f639 100644
--- a/collectors/python.d.plugin/spigotmc/README.md
+++ b/collectors/python.d.plugin/spigotmc/README.md
@@ -1,6 +1,12 @@
-# spigotmc
+<!--
+title: "SpigotMC monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/spigotmc/README.md
+sidebar_label: "SpigotMC"
+-->
-This module does some really basic monitoring for Spigot Minecraft servers.
+# SpigotMC monitoring with Netdata
+
+Performs basic monitoring for Spigot Minecraft servers.
It provides two charts, one tracking server-side ticks-per-second in
1, 5 and 15 minute averages, and one tracking the number of currently
@@ -9,7 +15,15 @@ active users.
This is not compatible with Spigot plugins which change the format of
the data returned by the `tps` or `list` console commands.
-## configuration
+## Configuration
+
+Edit the `python.d/spigotmc.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/spigotmc.conf
+```
```yaml
host: localhost
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
index 79d17058c..f334113e4 100644
--- a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
@@ -3,12 +3,11 @@
# Author: Austin S. Hemmelgarn (Ferroin)
# SPDX-License-Identifier: GPL-3.0-or-later
-import socket
import platform
import re
+import socket
from bases.FrameworkServices.SimpleService import SimpleService
-
from third_party import mcrcon
# Update only every 5 seconds because collection takes in excess of
@@ -43,9 +42,8 @@ CHARTS = {
}
}
-
_TPS_REGEX = re.compile(
- r'^.*: .*?' # Message lead-in
+ r'^.*: .*?' # Message lead-in
r'(\d{1,2}.\d+), .*?' # 1-minute TPS value
r'(\d{1,2}.\d+), .*?' # 5-minute TPS value
r'(\d{1,2}\.\d+).*$', # 15-minute TPS value
@@ -107,10 +105,10 @@ class Service(SimpleService):
def is_alive(self):
if any(
- [
- not self.alive,
- self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1
- ]
+ [
+ not self.alive,
+ self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1
+ ]
):
return self.reconnect()
return True
@@ -131,7 +129,8 @@ class Service(SimpleService):
else:
self.error('Unable to process TPS values.')
if not raw:
- self.error("'{0}' command returned no value, make sure you set correct password".format(COMMAND_TPS))
+ self.error(
+ "'{0}' command returned no value, make sure you set correct password".format(COMMAND_TPS))
except mcrcon.MCRconException:
self.error('Unable to fetch TPS values.')
except socket.error:
diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md
index 37b4dd7cb..f38e8bf05 100644
--- a/collectors/python.d.plugin/springboot/README.md
+++ b/collectors/python.d.plugin/springboot/README.md
@@ -1,6 +1,12 @@
-# springboot
+<!--
+title: "Java Spring Boot 2 application monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/springboot/README.md
+sidebar_label: "Java Spring Boot 2 applications"
+-->
-This module will monitor one or more Java Spring-boot applications depending on configuration.
+# Java Spring Boot 2 application monitoring with Netdata
+
+Monitors one or more Java Spring-boot applications depending on configuration.
Netdata can be used to monitor running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library.
## Configuration
@@ -87,14 +93,20 @@ Please refer [Spring Boot Actuator: Production-ready Features](https://docs.spri
- MarkSweep
- ...
-4. **Heap Mmeory Usage** in KB
+4. **Heap Memory Usage** in KB
- used
- committed
## Usage
-The springboot module is enabled by default. It looks up `http://localhost:8080/metrics` and `http://127.0.0.1:8080/metrics` to detect Spring Boot application by default. You can change it by editing `/etc/netdata/python.d/springboot.conf` (to edit it on your system run `/etc/netdata/edit-config python.d/springboot.conf`).
+Edit the `python.d/springboot.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/springboot.conf
+```
This module defines some common charts, and you can add custom charts by change the configurations.
@@ -126,6 +138,8 @@ You can disable the default charts by set `defaults.<chart-id>: false`.
The dimension name of extras charts should replace `.` to `_`.
-Please check [springboot.conf](springboot.conf) for more examples.
+Please check
+[springboot.conf](https://raw.githubusercontent.com/netdata/netdata/master/collectors/python.d.plugin/springboot/springboot.conf)
+for more examples.
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspringboot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/springboot/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py
index eec870ebf..dbe11d6b8 100644
--- a/collectors/python.d.plugin/springboot/springboot.chart.py
+++ b/collectors/python.d.plugin/springboot/springboot.chart.py
@@ -4,8 +4,8 @@
# SPDX-License-Identifier: GPL-3.0-or-later
import json
-from bases.FrameworkServices.UrlService import UrlService
+from bases.FrameworkServices.UrlService import UrlService
DEFAULT_ORDER = [
'response_code',
@@ -92,7 +92,7 @@ class Service(UrlService):
try:
data = json.loads(raw_data)
except ValueError:
- self.debug('%s is not a vaild JSON page' % self.url)
+ self.debug('%s is not a valid JSON page' % self.url)
return None
result = {
@@ -146,7 +146,7 @@ class Service(UrlService):
}
for line in lines:
- dimension = line.get('dimension', None) or self.die('dimension is missing: %s' % chart_id)
+ dimension = line.get('dimension', None) or self.die('dimension is missing: %s' % chart_id)
name = line.get('name', dimension)
algorithm = line.get('algorithm', 'absolute')
multiplier = line.get('multiplier', 1)
diff --git a/collectors/python.d.plugin/springboot/springboot.conf b/collectors/python.d.plugin/springboot/springboot.conf
index 13a398955..0cb369cd8 100644
--- a/collectors/python.d.plugin/springboot/springboot.conf
+++ b/collectors/python.d.plugin/springboot/springboot.conf
@@ -75,7 +75,7 @@
#
# Configuration example
# ---------------------
-# expample:
+# example:
# name: 'example'
# url: 'http://localhost:8080/metrics'
# defaults:
@@ -96,17 +96,17 @@
# options: { title: 'Eden Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_eden', charttype: 'area' }
# lines:
# - { dimension: 'mempool_eden_used', name: 'used'}
-# - { dimension: 'mempool_eden_committed', name: 'commited'}
+# - { dimension: 'mempool_eden_committed', name: 'committed'}
# - id: 'heap_survivor'
# options: { title: 'Survivor Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_survivor', charttype: 'area' }
# lines:
# - { dimension: 'mempool_survivor_used', name: 'used'}
-# - { dimension: 'mempool_survivor_committed', name: 'commited'}
+# - { dimension: 'mempool_survivor_committed', name: 'committed'}
# - id: 'heap_tenured'
# options: { title: 'Tenured Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_tenured', charttype: 'area' }
# lines:
# - { dimension: 'mempool_tenured_used', name: 'used'}
-# - { dimension: 'mempool_tenured_committed', name: 'commited'}
+# - { dimension: 'mempool_tenured_committed', name: 'committed'}
local:
diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md
index e1e3d0741..e3ed4e0df 100644
--- a/collectors/python.d.plugin/squid/README.md
+++ b/collectors/python.d.plugin/squid/README.md
@@ -1,6 +1,12 @@
-# squid
+<!--
+title: "Squid monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/squid/README.md
+sidebar_label: "Squid"
+-->
-This module will monitor one or more squid instances depending on configuration.
+# Squid monitoring with Netdata
+
+Monitors one or more squid instances depending on configuration.
It produces following charts:
@@ -26,7 +32,15 @@ It produces following charts:
- requests
- errors
-## configuration
+## Configuration
+
+Edit the `python.d/squid.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/squid.conf
+```
```yaml
priority : 50000
diff --git a/collectors/python.d.plugin/squid/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py
index c00556b56..bcae2d892 100644
--- a/collectors/python.d.plugin/squid/squid.chart.py
+++ b/collectors/python.d.plugin/squid/squid.chart.py
@@ -5,7 +5,6 @@
from bases.FrameworkServices.SocketService import SocketService
-
ORDER = [
'clients_net',
'clients_requests',
diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md
index 4d492c2d0..f9f2ffe31 100644
--- a/collectors/python.d.plugin/tomcat/README.md
+++ b/collectors/python.d.plugin/tomcat/README.md
@@ -1,6 +1,12 @@
-# tomcat
+<!--
+title: "Apache Tomcat monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tomcat/README.md
+sidebar_label: "Tomcat"
+-->
-Present tomcat containers memory utilization.
+# Apache Tomcat monitoring with Netdata
+
+Presents memory utilization of tomcat containers.
Charts:
@@ -21,7 +27,15 @@ Charts:
- jvm
-## configuration
+## Configuration
+
+Edit the `python.d/tomcat.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/tomcat.conf
+```
```yaml
localhost:
diff --git a/collectors/python.d.plugin/tomcat/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py
index ab3003304..90315f8c7 100644
--- a/collectors/python.d.plugin/tomcat/tomcat.chart.py
+++ b/collectors/python.d.plugin/tomcat/tomcat.chart.py
@@ -4,8 +4,8 @@
# Author: Wei He (Wing924)
# SPDX-License-Identifier: GPL-3.0-or-later
-import xml.etree.ElementTree as ET
import re
+import xml.etree.ElementTree as ET
from bases.FrameworkServices.UrlService import UrlService
diff --git a/collectors/python.d.plugin/tor/README.md b/collectors/python.d.plugin/tor/README.md
index 40905a958..192a86a37 100644
--- a/collectors/python.d.plugin/tor/README.md
+++ b/collectors/python.d.plugin/tor/README.md
@@ -1,8 +1,14 @@
-# tor
+<!--
+title: "Tor monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tor/README.md
+sidebar_label: "Tor"
+-->
-Module connects to tor control port to collect traffic statistics.
+# Tor monitoring with Netdata
-**Requirements:**
+Connects to the Tor control port to collect traffic statistics.
+
+## Requirements
- `tor` program
- `stem` python package
@@ -14,9 +20,17 @@ It produces only one chart:
- read
- write
-## configuration
+## Configuration
+
+Edit the `python.d/tor.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/tor.conf
+```
-Needs only `control_port`
+Needs only `control_port`.
Here is an example for local server:
diff --git a/collectors/python.d.plugin/tor/tor.chart.py b/collectors/python.d.plugin/tor/tor.chart.py
index c6378ba5c..8dc021a63 100644
--- a/collectors/python.d.plugin/tor/tor.chart.py
+++ b/collectors/python.d.plugin/tor/tor.chart.py
@@ -11,11 +11,11 @@ try:
import stem
import stem.connection
import stem.control
+
STEM_AVAILABLE = True
except ImportError:
STEM_AVAILABLE = False
-
DEF_PORT = 'default'
ORDER = [
@@ -35,6 +35,7 @@ CHARTS = {
class Service(SimpleService):
"""Provide netdata service for Tor"""
+
def __init__(self, configuration=None, name=None):
super(Service, self).__init__(configuration=configuration, name=name)
self.order = ORDER
diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md
index 9ced6060f..2a1dd77aa 100644
--- a/collectors/python.d.plugin/traefik/README.md
+++ b/collectors/python.d.plugin/traefik/README.md
@@ -1,6 +1,12 @@
-# traefik
+<!--
+title: "Traefik monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/traefik/README.md
+sidebar_label: "Traefik"
+-->
-Module uses the `health` API to provide statistics.
+# Traefik monitoring with Netdata
+
+Uses the `health` API to provide statistics.
It produces:
@@ -39,7 +45,15 @@ It produces:
- Traefik server uptime
-## configuration
+## Configuration
+
+Edit the `python.d/traefik.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/traefik.conf
+```
Needs only `url` to server's `health`
diff --git a/collectors/python.d.plugin/traefik/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py
index 570339d0a..5a498467f 100644
--- a/collectors/python.d.plugin/traefik/traefik.chart.py
+++ b/collectors/python.d.plugin/traefik/traefik.chart.py
@@ -4,12 +4,10 @@
# SPDX-License-Identifier: GPL-3.0-or-later
from collections import defaultdict
-
from json import loads
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'response_statuses',
'response_codes',
diff --git a/collectors/python.d.plugin/unbound/README.md b/collectors/python.d.plugin/unbound/README.md
deleted file mode 100644
index 4a3076100..000000000
--- a/collectors/python.d.plugin/unbound/README.md
+++ /dev/null
@@ -1,114 +0,0 @@
-# unbound
-
-## Deprecation Notes
-
-This module is deprecated. Please use [new version](https://github.com/netdata/go.d.plugin/tree/master/modules/unbound) instead.
-
-___
-
-Monitoring uses the remote control interface to fetch statistics.
-
-Provides the following charts:
-
-1. **Queries Processed**
-
- - Ratelimited
- - Cache Misses
- - Cache Hits
- - Expired
- - Prefetched
- - Recursive
-
-2. **Request List**
-
- - Average Size
- - Max Size
- - Overwritten Requests
- - Overruns
- - Current Size
- - User Requests
-
-3. **Recursion Timings**
-
-- Average recursion processing time
-- Median recursion processing time
-
-If extended stats are enabled, also provides:
-
-4. **Cache Sizes**
-
- - Message Cache
- - RRset Cache
- - Infra Cache
- - DNSSEC Key Cache
- - DNSCrypt Shared Secret Cache
- - DNSCrypt Nonce Cache
-
-## Configuration
-
-Unbound must be manually configured to enable the remote-control protocol.
-Check the Unbound documentation for info on how to do this. Additionally,
-if you want to take advantage of the autodetection this plugin offers,
-you will need to make sure your `unbound.conf` file only uses spaces for
-indentation (the default config shipped by most distributions uses tabs
-instead of spaces).
-
-Once you have the Unbound control protocol enabled, you need to make sure
-that either the certificate and key are readable by Netdata (if you're
-using the regular control interface), or that the socket is accessible
-to Netdata (if you're using a UNIX socket for the contorl interface).
-
-By default, for the local system, everything can be auto-detected
-assuming Unbound is configured correctly and has been told to listen
-on the loopback interface or a UNIX socket. This is done by looking
-up info in the Unbound config file specified by the `ubconf` key.
-
-To enable extended stats for a given job, add `extended: yes` to the
-definition.
-
-You can also enable per-thread charts for a given job by adding
-`per_thread: yes` to the definition. Note that the numbe rof threads
-is only checked on startup.
-
-A basic local configuration with extended statistics and per-thread
-charts looks like this:
-
-```yaml
-local:
- ubconf: /etc/unbound/unbound.conf
- extended: yes
- per_thread: yes
-```
-
-While it's a bit more complicated to set up correctly, it is recommended
-that you use a UNIX socket as it provides far better performance.
-
-### Troubleshooting
-
-If you've configured the module and can't get it to work, make sure and
-check all of the following:
-
-- If you're using autodetection, double check that your `unbound.conf`
- file is actually using spaces instead of tabs, and that appropriate
- indentation is present. Most Linux distributions ship a default config
- for Unbound that uses tabs, and the plugin can't read such a config file
- correctly. Also, make sure this file is actually readable by Netdata.
-- Ensure that the control protocol is actually configured correctly.
- You can check this quickly by running `unbound-control stats_noreset`
- as root, which should print out a bunch of info about the internal
- statistics of the server. If this returns an error, you don't have
- the control protocol set up correctly.
-- If using the regular control interface, make sure that the certificate
- and key file you have configured in `unbound.conf` are readable by
- Netdata. In general, it's preferred to use ACL's on the files to
- provide the required permissions.
-- If using a UNIX socket, make sure that the socket is both readable
- _and_ writable by Netdata. Just like with the regular control
- interface, it's preferred to use ACL's to provide these permissions.
-- Make sure that SELinux, Apparmor, or any other mandatory access control
- system isn't interfering with the access requirements mentioned above.
- In some cases, you may have to add a local rule to allow this access.
-
----
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Funbound%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/unbound/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py
deleted file mode 100644
index 590de4c98..000000000
--- a/collectors/python.d.plugin/unbound/unbound.chart.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: unbound netdata python.d module
-# Author: Austin S. Hemmelgarn (Ferroin)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-import sys
-
-from copy import deepcopy
-
-from bases.FrameworkServices.SocketService import SocketService
-from bases.loaders import load_config
-
-PRECISION = 1000
-
-ORDER = [
- 'queries',
- 'recursion',
- 'reqlist',
-]
-
-CHARTS = {
- 'queries': {
- 'options': [None, 'Queries Processed', 'queries', 'Unbound', 'unbound.queries', 'line'],
- 'lines': [
- ['ratelimit', 'ratelimited', 'absolute', 1, 1],
- ['cachemiss', 'cache_miss', 'absolute', 1, 1],
- ['cachehit', 'cache_hit', 'absolute', 1, 1],
- ['expired', 'expired', 'absolute', 1, 1],
- ['prefetch', 'prefetched', 'absolute', 1, 1],
- ['recursive', 'recursive', 'absolute', 1, 1]
- ]
- },
- 'recursion': {
- 'options': [None, 'Recursion Timings', 'milliseconds', 'Unbound', 'unbound.recursion', 'line'],
- 'lines': [
- ['recursive_avg', 'average', 'absolute', 1, 1],
- ['recursive_med', 'median', 'absolute', 1, 1]
- ]
- },
- 'reqlist': {
- 'options': [None, 'Request List', 'items', 'Unbound', 'unbound.reqlist', 'line'],
- 'lines': [
- ['reqlist_avg', 'average_size', 'absolute', 1, 1],
- ['reqlist_max', 'maximum_size', 'absolute', 1, 1],
- ['reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1],
- ['reqlist_exceeded', 'overruns', 'absolute', 1, 1],
- ['reqlist_current', 'current_size', 'absolute', 1, 1],
- ['reqlist_user', 'user_requests', 'absolute', 1, 1]
- ]
- }
-}
-
-# These get added too if we are told to use extended stats.
-EXTENDED_ORDER = ['cache']
-
-EXTENDED_CHARTS = {
- 'cache': {
- 'options': [None, 'Cache Sizes', 'items', 'Unbound', 'unbound.cache', 'stacked'],
- 'lines': [
- ['cache_message', 'message_cache', 'absolute', 1, 1],
- ['cache_rrset', 'rrset_cache', 'absolute', 1, 1],
- ['cache_infra', 'infra_cache', 'absolute', 1, 1],
- ['cache_key', 'dnssec_key_cache', 'absolute', 1, 1],
- ['cache_dnscss', 'dnscrypt_Shared_Secret_cache', 'absolute', 1, 1],
- ['cache_dnscn', 'dnscrypt_Nonce_cache', 'absolute', 1, 1]
- ]
- }
-}
-
-# This is used as a templates for the per-thread charts.
-PER_THREAD_CHARTS = {
- '_queries': {
- 'options': [None, '{longname} Queries Processed', 'queries', 'Queries Processed',
- 'unbound.threads.queries', 'line'],
- 'lines': [
- ['{shortname}_ratelimit', 'ratelimited', 'absolute', 1, 1],
- ['{shortname}_cachemiss', 'cache_miss', 'absolute', 1, 1],
- ['{shortname}_cachehit', 'cache_hit', 'absolute', 1, 1],
- ['{shortname}_expired', 'expired', 'absolute', 1, 1],
- ['{shortname}_prefetch', 'prefetched', 'absolute', 1, 1],
- ['{shortname}_recursive', 'recursive', 'absolute', 1, 1]
- ]
- },
- '_recursion': {
- 'options': [None, '{longname} Recursion Timings', 'milliseconds', 'Recursive Timings',
- 'unbound.threads.recursion', 'line'],
- 'lines': [
- ['{shortname}_recursive_avg', 'average', 'absolute', 1, 1],
- ['{shortname}_recursive_med', 'median', 'absolute', 1, 1]
- ]
- },
- '_reqlist': {
- 'options': [None, '{longname} Request List', 'items', 'Request List', 'unbound.threads.reqlist', 'line'],
- 'lines': [
- ['{shortname}_reqlist_avg', 'average_size', 'absolute', 1, 1],
- ['{shortname}_reqlist_max', 'maximum_size', 'absolute', 1, 1],
- ['{shortname}_reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1],
- ['{shortname}_reqlist_exceeded', 'overruns', 'absolute', 1, 1],
- ['{shortname}_reqlist_current', 'current_size', 'absolute', 1, 1],
- ['{shortname}_reqlist_user', 'user_requests', 'absolute', 1, 1]
- ]
- }
-}
-
-# This maps the Unbound stat names to our names and precision requiremnets.
-STAT_MAP = {
- 'total.num.queries_ip_ratelimited': ('ratelimit', 1),
- 'total.num.cachehits': ('cachehit', 1),
- 'total.num.cachemiss': ('cachemiss', 1),
- 'total.num.zero_ttl': ('expired', 1),
- 'total.num.prefetch': ('prefetch', 1),
- 'total.num.recursivereplies': ('recursive', 1),
- 'total.requestlist.avg': ('reqlist_avg', 1),
- 'total.requestlist.max': ('reqlist_max', 1),
- 'total.requestlist.overwritten': ('reqlist_overwritten', 1),
- 'total.requestlist.exceeded': ('reqlist_exceeded', 1),
- 'total.requestlist.current.all': ('reqlist_current', 1),
- 'total.requestlist.current.user': ('reqlist_user', 1),
- # Unbound reports recursion timings as fractional seconds, but we want to show them as milliseconds.
- 'total.recursion.time.avg': ('recursive_avg', PRECISION),
- 'total.recursion.time.median': ('recursive_med', PRECISION),
- 'msg.cache.count': ('cache_message', 1),
- 'rrset.cache.count': ('cache_rrset', 1),
- 'infra.cache.count': ('cache_infra', 1),
- 'key.cache.count': ('cache_key', 1),
- 'dnscrypt_shared_secret.cache.count': ('cache_dnscss', 1),
- 'dnscrypt_nonce.cache.count': ('cache_dnscn', 1)
-}
-
-# Same as above, but for per-thread stats.
-PER_THREAD_STAT_MAP = {
- '{shortname}.num.queries_ip_ratelimited': ('{shortname}_ratelimit', 1),
- '{shortname}.num.cachehits': ('{shortname}_cachehit', 1),
- '{shortname}.num.cachemiss': ('{shortname}_cachemiss', 1),
- '{shortname}.num.zero_ttl': ('{shortname}_expired', 1),
- '{shortname}.num.prefetch': ('{shortname}_prefetch', 1),
- '{shortname}.num.recursivereplies': ('{shortname}_recursive', 1),
- '{shortname}.requestlist.avg': ('{shortname}_reqlist_avg', 1),
- '{shortname}.requestlist.max': ('{shortname}_reqlist_max', 1),
- '{shortname}.requestlist.overwritten': ('{shortname}_reqlist_overwritten', 1),
- '{shortname}.requestlist.exceeded': ('{shortname}_reqlist_exceeded', 1),
- '{shortname}.requestlist.current.all': ('{shortname}_reqlist_current', 1),
- '{shortname}.requestlist.current.user': ('{shortname}_reqlist_user', 1),
- # Unbound reports recursion timings as fractional seconds, but we want to show them as milliseconds.
- '{shortname}.recursion.time.avg': ('{shortname}_recursive_avg', PRECISION),
- '{shortname}.recursion.time.median': ('{shortname}_recursive_med', PRECISION)
-}
-
-
-def is_readable(name):
- return os.access(name, os.R_OK)
-
-
-# Used to actually generate per-thread charts.
-def _get_perthread_info(thread):
- sname = 'thread{0}'.format(thread)
- lname = 'Thread {0}'.format(thread)
- charts = dict()
- order = []
- statmap = dict()
-
- for item in PER_THREAD_CHARTS:
- cname = '{0}{1}'.format(sname, item)
- chart = deepcopy(PER_THREAD_CHARTS[item])
- chart['options'][1] = chart['options'][1].format(longname=lname)
-
- for index, line in enumerate(chart['lines']):
- chart['lines'][index][0] = line[0].format(shortname=sname)
-
- order.append(cname)
- charts[cname] = chart
-
- for key, value in PER_THREAD_STAT_MAP.items():
- statmap[key.format(shortname=sname)] = (value[0].format(shortname=sname), value[1])
-
- return charts, order, statmap
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- # The unbound control protocol is always TLS encapsulated
- # unless it's used over a UNIX socket, so enable TLS _before_
- # doing the normal SocketService initialization.
- configuration['tls'] = True
- self.port = 8935
- SocketService.__init__(self, configuration, name)
- self.ext = self.configuration.get('extended', None)
- self.ubconf = self.configuration.get('ubconf', None)
- self.perthread = self.configuration.get('per_thread', False)
- self.threads = None
- self.order = deepcopy(ORDER)
- self.definitions = deepcopy(CHARTS)
- self.request = 'UBCT1 stats\n'
- self.statmap = deepcopy(STAT_MAP)
- self._parse_config()
- self._auto_config()
- self.debug('Extended stats: {0}'.format(self.ext))
- self.debug('Per-thread stats: {0}'.format(self.perthread))
- if self.ext:
- self.order = self.order + EXTENDED_ORDER
- self.definitions.update(EXTENDED_CHARTS)
- if self.unix_socket:
- self.debug('Using unix socket: {0}'.format(self.unix_socket))
- else:
- self.debug('Connecting to: {0}:{1}'.format(self.host, self.port))
- self.debug('Using key: {0}'.format(self.key))
- self.debug('Using certificate: {0}'.format(self.cert))
-
- def _auto_config(self):
- self.load_unbound_config()
-
- if not self.key:
- self.key = '/etc/unbound/unbound_control.key'
- if not self.cert:
- self.cert = '/etc/unbound/unbound_control.pem'
- if not self.port:
- self.port = 8953
-
- def load_unbound_config(self):
- if not (self.ubconf and is_readable(self.ubconf)):
- self.debug('Unbound configuration not found.')
- return
-
- self.debug('Loading Unbound config: {0}'.format(self.ubconf))
-
- try:
- conf = load_config(self.ubconf)
- except Exception as error:
- self.error("error on loading '{0}' : {1}".format(self.ubconf, error))
- return
-
- srv = conf.get('server')
- if self.ext is None:
- if srv and 'extended-statistics' in srv:
- self.ext = srv['extended-statistics']
-
- rc = conf.get('remote-control')
- if not (rc and isinstance(rc, dict)):
- return
-
- if rc.get('control-use-cert', False):
- self.key = self.key or rc.get('control-key-file')
- self.cert = self.cert or rc.get('control-cert-file')
- self.port = self.port or rc.get('control-port')
- else:
- ci = rc.get('control-interface', str())
- is_socket = '/' in ci
- if is_socket:
- self.unix_socket = ci
-
- def _generate_perthread_charts(self):
- tmporder = list()
- for thread in range(0, self.threads):
- charts, order, statmap = _get_perthread_info(thread)
- tmporder.extend(order)
- self.definitions.update(charts)
- self.statmap.update(statmap)
- self.order.extend(sorted(tmporder))
-
- def check(self):
- if not is_readable(self.key):
- self.error("ssl key '{0}' is not readable".format(self.key))
- return False
-
- if not is_readable(self.cert):
- self.error("ssl certificate '{0}' is not readable".format(self.certificate))
- return False
-
- # Check if authentication is working.
- self._connect()
- result = bool(self._sock)
- self._disconnect()
- # If auth works, and we need per-thread charts, query the server
- # to see how many threads it's using. This somewhat abuses the
- # SocketService API to get the data we need.
- if result and self.perthread:
- tmp = self.request
- if sys.version_info[0] < 3:
- self.request = 'UBCT1 status\n'
- else:
- self.request = b'UBCT1 status\n'
- raw = self._get_raw_data()
- if raw is None:
- result = False
- self.warning('Received no data from socket.')
- else:
- for line in raw.splitlines():
- if line.startswith('threads'):
- self.threads = int(line.split()[1])
- self._generate_perthread_charts()
- break
- if self.threads is None:
- self.info('Unable to auto-detect thread counts, disabling per-thread stats.')
- self.perthread = False
- self.request = tmp
- return result
-
- def _get_data(self):
- raw = self._get_raw_data()
- data = dict()
- tmp = dict()
- if raw is not None:
- for line in raw.splitlines():
- stat = line.split('=')
- tmp[stat[0]] = stat[1]
- for item in self.statmap:
- if item in tmp:
- data[self.statmap[item][0]] = float(tmp[item]) * self.statmap[item][1]
- else:
- self.warning('Received no data from socket.')
- return data
-
- @staticmethod
- def _check_raw_data(data):
- # The server will close the connection when it's done sending
- # data, so just keep looping until that happens.
- return False
diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md
index a8111965d..f564821a1 100644
--- a/collectors/python.d.plugin/uwsgi/README.md
+++ b/collectors/python.d.plugin/uwsgi/README.md
@@ -1,10 +1,13 @@
-# uwsgi
+<!--
+title: "uWSGI monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/uwsgi/README.md
+sidebar_label: "uWSGI"
+-->
-Module monitor uwsgi performance metrics.
+# uWSGI monitoring with Netdata
-<https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html>
+Monitors performance metrics exposed by [`Stats Server`](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html).
-lines are creates dynamically based on how many workers are there
Following charts are drawn:
@@ -23,7 +26,15 @@ Following charts are drawn:
4. **Harakiris**
5. **Respawns**
-## configuration
+## Configuration
+
+Edit the `python.d/uwsgi.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/uwsgi.conf
+```
```yaml
socket:
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
index 511b770cf..e4d900005 100644
--- a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
@@ -5,8 +5,8 @@
import json
from copy import deepcopy
-from bases.FrameworkServices.SocketService import SocketService
+from bases.FrameworkServices.SocketService import SocketService
ORDER = [
'requests',
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
index 4de883d31..cb29738f5 100644
--- a/collectors/python.d.plugin/varnish/README.md
+++ b/collectors/python.d.plugin/varnish/README.md
@@ -1,80 +1,56 @@
-# varnish
+<!--
+title: "Varnish Cache monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/varnish/README.md
+sidebar_label: "Varnish Cache"
+-->
-Module uses the `varnishstat` command to provide varnish cache statistics.
+# Varnish Cache monitoring with Netdata
-It produces:
+Provides HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics using `varnishstat` tool.
-1. **Connections Statistics** in connections/s
+Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.
- - accepted
- - dropped
+## Requirements
-2. **Client Requests** in requests/s
+- `netdata` user must be a member of the `varnish` group
- - received
+## Charts
-3. **All History Hit Rate Ratio** in percent
+This module produces the following charts:
- - hit
- - miss
- - hitpass
+- Connections Statistics in `connections/s`
+- Client Requests in `requests/s`
+- All History Hit Rate Ratio in `percent`
+- Current Poll Hit Rate Ratio in `percent`
+- Expired Objects in `expired/s`
+- Least Recently Used Nuked Objects in `nuked/s`
+- Number Of Threads In All Pools in `pools`
+- Threads Statistics in `threads/s`
+- Current Queue Length in `requests`
+- Backend Connections Statistics in `connections/s`
+- Requests To The Backend in `requests/s`
+- ESI Statistics in `problems/s`
+- Memory Usage in `MiB`
+- Uptime in `seconds`
-4. **Current Poll Hit Rate Ratio** in percent
+For every backend (VBE):
- - hit
- - miss
- - hitpass
+- Backend Response Statistics in `kilobits/s`
-5. **Expired Objects** in expired/s
+For every storage (SMF, SMA, or MSE):
- - objects
+- Storage Usage in `KiB`
+- Storage Allocated Objects
-6. **Least Recently Used Nuked Objects** in nuked/s
+## Configuration
- - objects
+Edit the `python.d/varnish.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-7. **Number Of Threads In All Pools** in threads
-
- - threads
-
-8. **Threads Statistics** in threads/s
-
- - created
- - failed
- - limited
-
-9. **Current Queue Length** in requests
-
- - in queue
-
-10. **Backend Connections Statistics** in connections/s
-
- - successful
- - unhealthy
- - reused
- - closed
- - resycled
- - failed
-
-11. **Requests To The Backend** in requests/s
-
- - received
-
-12. **ESI Statistics** in problems/s
-
- - errors
- - warnings
-
-13. **Memory Usage** in MB
-
- - free
- - allocated
-
-14. **Uptime** in seconds
-
- - uptime
-
-## configuration
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/varnish.conf
+```
Only one parameter is supported:
@@ -82,7 +58,7 @@ Only one parameter is supported:
instance_name: 'name'
```
-The name of the varnishd instance to get logs from. If not specified, the host name is used.
+The name of the `varnishd` instance to get logs from. If not specified, the host name is used.
---
diff --git a/collectors/python.d.plugin/varnish/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py
index 58745e24d..534d70926 100644
--- a/collectors/python.d.plugin/varnish/varnish.chart.py
+++ b/collectors/python.d.plugin/varnish/varnish.chart.py
@@ -103,7 +103,7 @@ CHARTS = {
['backend_unhealthy', 'unhealthy', 'incremental'],
['backend_reuse', 'reused', 'incremental'],
['backend_toolate', 'closed', 'incremental'],
- ['backend_recycle', 'resycled', 'incremental'],
+ ['backend_recycle', 'recycled', 'incremental'],
['backend_fail', 'failed', 'incremental']
]
},
@@ -135,9 +135,54 @@ CHARTS = {
}
}
+
+def backend_charts_template(name):
+ order = [
+ '{0}_response_statistics'.format(name),
+ ]
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Backend "{0}"'.format(name), 'kilobits/s', 'backend response statistics',
+ 'varnish.backend', 'area'],
+ 'lines': [
+ ['{0}_beresp_hdrbytes'.format(name), 'header', 'incremental', 8, 1000],
+ ['{0}_beresp_bodybytes'.format(name), 'body', 'incremental', -8, 1000]
+ ]
+ },
+ }
+
+ return order, charts
+
+
+def storage_charts_template(name):
+ order = [
+ 'storage_{0}_usage'.format(name),
+ 'storage_{0}_alloc_objs'.format(name)
+ ]
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Storage "{0}" Usage'.format(name), 'KiB', 'storage usage', 'varnish.storage_usage', 'stacked'],
+ 'lines': [
+ ['{0}.g_space'.format(name), 'free', 'absolute', 1, 1 << 10],
+ ['{0}.g_bytes'.format(name), 'allocated', 'absolute', 1, 1 << 10]
+ ]
+ },
+ order[1]: {
+ 'options': [None, 'Storage "{0}" Allocated Objects'.format(name), 'objects', 'storage usage', 'varnish.storage_alloc_objs', 'line'],
+ 'lines': [
+ ['{0}.g_alloc'.format(name), 'allocated', 'absolute']
+ ]
+ }
+ }
+
+ return order, charts
+
+
VARNISHSTAT = 'varnishstat'
-re_version = re.compile(r'varnish-(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)')
+re_version = re.compile(r'varnish-(?:plus-)?(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)')
class VarnishVersion:
@@ -188,6 +233,8 @@ class Service(ExecutableService):
self.instance_name = configuration.get('instance_name')
self.parser = Parser()
self.command = None
+ self.collected_vbe = set()
+ self.collected_storages = set()
def create_command(self):
varnishstat = find_binary(VARNISHSTAT)
@@ -206,10 +253,7 @@ class Service(ExecutableService):
ver = parse_varnish_version(reply)
if not ver:
self.error("failed to parse reply from '{0}', used regex :'{1}', reply : {2}".format(
- ' '.join(command),
- re_version.pattern,
- reply,
- ))
+ ' '.join(command), re_version.pattern, reply))
return False
if self.instance_name:
@@ -241,9 +285,6 @@ class Service(ExecutableService):
self.error('cant parse the output...')
return False
- if self.parser.re_backend:
- backends = [b[0] for b in self.parser.backend_stats(reply)[::2]]
- self.create_backends_charts(backends)
return True
def get_data(self):
@@ -260,11 +301,11 @@ class Service(ExecutableService):
if not server_stats:
return None
- if self.parser.re_backend:
- backend_stats = self.parser.backend_stats(raw)
- data.update(dict(('_'.join([name, param]), value) for name, param, value in backend_stats))
+ stats = dict((param, value) for _, param, value in server_stats)
+ data.update(stats)
- data.update(dict((param, value) for _, param, value in server_stats))
+ self.get_vbe_backends(data, raw)
+ self.get_storages(server_stats)
# varnish 5 uses default.g_bytes and default.g_space
data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes')
@@ -272,27 +313,63 @@ class Service(ExecutableService):
return data
- def create_backends_charts(self, backends):
- for backend in backends:
- chart_name = ''.join([backend, '_response_statistics'])
- title = 'Backend "{0}"'.format(backend.capitalize())
- hdr_bytes = ''.join([backend, '_beresp_hdrbytes'])
- body_bytes = ''.join([backend, '_beresp_bodybytes'])
-
- chart = {
- chart_name:
- {
- 'options': [None, title, 'kilobits/s', 'backend response statistics',
- 'varnish.backend', 'area'],
- 'lines': [
- [hdr_bytes, 'header', 'incremental', 8, 1000],
- [body_bytes, 'body', 'incremental', -8, 1000]
- ]
- }
- }
-
- self.order.insert(0, chart_name)
- self.definitions.update(chart)
+ def get_vbe_backends(self, data, raw):
+ if not self.parser.re_backend:
+ return
+ stats = self.parser.backend_stats(raw)
+ if not stats:
+ return
+
+ for (name, param, value) in stats:
+ data['_'.join([name, param])] = value
+ if name in self.collected_vbe:
+ continue
+ self.collected_vbe.add(name)
+ self.add_backend_charts(name)
+
+ def get_storages(self, server_stats):
+ # Storage types:
+ # - SMF: File Storage
+ # - SMA: Malloc Storage
+ # - MSE: Massive Storage Engine (Varnish-Plus only)
+ #
+ # Stats example:
+ # [('SMF.', 'ssdStorage.c_req', '47686'),
+ # ('SMF.', 'ssdStorage.c_fail', '0'),
+ # ('SMF.', 'ssdStorage.c_bytes', '668102656'),
+ # ('SMF.', 'ssdStorage.c_freed', '140980224'),
+ # ('SMF.', 'ssdStorage.g_alloc', '39753'),
+ # ('SMF.', 'ssdStorage.g_bytes', '527122432'),
+ # ('SMF.', 'ssdStorage.g_space', '53159968768'),
+ # ('SMF.', 'ssdStorage.g_smf', '40130'),
+ # ('SMF.', 'ssdStorage.g_smf_frag', '311'),
+ # ('SMF.', 'ssdStorage.g_smf_large', '66')]
+ storages = [name for typ, name, _ in server_stats if typ.startswith(('SMF', 'SMA', 'MSE')) and name.endswith('g_space')]
+ if not storages:
+ return
+ for storage in storages:
+ storage = storage.split('.')[0]
+ if storage in self.collected_storages:
+ continue
+ self.collected_storages.add(storage)
+ self.add_storage_charts(storage)
+
+ def add_backend_charts(self, backend_name):
+ self.add_charts(backend_name, backend_charts_template)
+
+ def add_storage_charts(self, storage_name):
+ self.add_charts(storage_name, storage_charts_template)
+
+ def add_charts(self, name, charts_template):
+ order, charts = charts_template(name)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
def parse_varnish_version(lines):
diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md
index 74edcc0a8..31facef77 100644
--- a/collectors/python.d.plugin/w1sensor/README.md
+++ b/collectors/python.d.plugin/w1sensor/README.md
@@ -1,14 +1,27 @@
-# w1sensor
+<!--
+title: "1-Wire Sensors monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/w1sensor/README.md
+sidebar_label: "1-Wire sensors"
+-->
+
+# 1-Wire Sensors monitoring with Netdata
+
+Monitors sensor temperature.
-Data from 1-Wire sensors.
On Linux these are supported by the wire, w1_gpio, and w1_therm modules.
Currently temperature sensors are supported and automatically detected.
Charts are created dynamically based on the number of detected sensors.
-## configuration
+## Configuration
+
+Edit the `python.d/w1sensor.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-For detailed configuration information please read [`w1sensor.conf`](w1sensor.conf) file.
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/w1sensor.conf
+```
---
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
index e50312fc5..c4f847bf0 100644
--- a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
@@ -5,6 +5,7 @@
import os
import re
+
from bases.FrameworkServices.SimpleService import SimpleService
# default module values (can be overridden per job in `config`)
@@ -40,6 +41,7 @@ THERM_FAMILY = {
class Service(SimpleService):
"""Provide netdata service for 1-Wire sensors"""
+
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md
index 33dfd696a..2cf60ed9e 100644
--- a/collectors/python.d.plugin/web_log/README.md
+++ b/collectors/python.d.plugin/web_log/README.md
@@ -1,4 +1,12 @@
-# web_log
+<!--
+title: "Web server log (Apache, NGINX, Squid) monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/web_log/README.md
+sidebar_label: "Web server logs (Apache, NGINX, Squid)"
+-->
+
+# Web server log (Apache, NGINX, Squid) monitoring with Netdata
+
+Tails access log file and Collects web server/caching proxy metrics.
## Motivation
@@ -27,7 +35,15 @@ If Netdata is installed on a system running a web server, it will detect it and
## Configuration
-[**netdata**](https://my-netdata.io/) has a powerful `web_log` plugin, capable of incrementally parsing any number of web server log files. This plugin is automatically started with [**netdata**](https://my-netdata.io/) and comes, pre-configured, for finding web server log files on popular distributions. Its configuration is at [`/etc/netdata/python.d/web_log.conf`](web_log.conf), like this:
+Edit the `python.d/web_log.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/web_log.conf
+```
+
+[**netdata**](https://my-netdata.io/) has a powerful `web_log` plugin, capable of incrementally parsing any number of web server log files. This plugin is automatically started with [**netdata**](https://my-netdata.io/) and comes, pre-configured, for finding web server log files on popular distributions. Its configuration is at `/etc/netdata/python.d/web_log.conf`, like this:
```yaml
nginx_log:
@@ -42,8 +58,8 @@ apache_log:
observium : 'observium'
```
-Theodule has preconfigured jobs for nginx, apache and gunicorn on various distros.
-You can add one such section, for each of your web server log files.
+The module has preconfigured jobs for nginx, apache and gunicorn on various distros.
+You can add one such section for each of your web server log files.
> **Important**<br/>Keep in mind [**netdata**](https://my-netdata.io/) runs as user `netdata`. So, make sure user `netdata` has access to the logs directory and can read the log file.
@@ -51,7 +67,7 @@ You can add one such section, for each of your web server log files.
Once you have all log files configured and [**netdata**](https://my-netdata.io/) restarted, **for each log file** you will get a section at the [**netdata**](https://my-netdata.io/) dashboard, with the following charts.
-### responses by status
+### Responses by status
In this chart we tried to provide a meaningful status for all responses. So:
@@ -98,13 +114,13 @@ Here we show all the response codes in detail.
Number of responses for each response code family individually (requests/s)
-### bandwidth
+### Bandwidth
This is a nice view of the traffic the web server is receiving and is sending.
What is important to know for this chart, is that the bandwidth used for each request and response is accounted at the time the log is written. Since [**netdata**](https://my-netdata.io/) refreshes this chart every single second, you may have unrealistic spikes is the size of the requests or responses is too big. The reason is simple: a response may have needed 1 minute to be completed, but all the bandwidth used during that minute for the specific response will be accounted at the second the log line is written.
-As the legend on the chart suggests, you can use FireQoS to setup QoS on the web server ports and IPs to accurately measure the bandwidth the web server is using. Actually, [there may be a few more reasons to install QoS on your servers](../../tc.plugin/#tcplugin)...
+As the legend on the chart suggests, you can use FireQoS to setup QoS on the web server ports and IPs to accurately measure the bandwidth the web server is using. Actually, [there may be a few more reasons to install QoS on your servers](/collectors/tc.plugin/README.md#tcplugin)...
**Bandwidth** KB/s
@@ -115,7 +131,7 @@ As the legend on the chart suggests, you can use FireQoS to setup QoS on the web
> **Important**<br/>Most web servers do not log the request size by default.<br/>So, [unless you have configured your web server to log the size of requests](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L76-L89), the `received` dimension will be always zero.
-### timings
+### Timings
[**netdata**](https://my-netdata.io/) will also render the `minimum`, `average` and `maximum` time the web server needed to respond to requests.
@@ -185,7 +201,7 @@ The last charts are about the unique IPs accessing your web server.
## Alarms
-The magic of [**netdata**](https://my-netdata.io/) is that all metrics are collected per second, and all metrics can be used or correlated to provide real-time alarms. Out of the box, [**netdata**](https://my-netdata.io/) automatically attaches the [following alarms](../../../health/health.d/web_log.conf) to all `web_log` charts (i.e. to all log files configured, individually):
+The magic of [**netdata**](https://my-netdata.io/) is that all metrics are collected per second, and all metrics can be used or correlated to provide real-time alarms. Out of the box, [**netdata**](https://my-netdata.io/) automatically attaches the following alarms] to all `web_log` charts (i.e. to all log files configured, individually):
| alarm|description|minimum<br/>requests|warning|critical|
|:----|-----------|:------------------:|:-----:|:------:|
@@ -198,6 +214,6 @@ The magic of [**netdata**](https://my-netdata.io/) is that all metrics are colle
The column `minimum requests` state the minimum number of requests required for the alarm to be evaluated. We found that when the site is receiving requests above this rate, these alarms are pretty accurate (i.e. no false-positives).
-[**netdata**](https://my-netdata.io/) alarms are user configurable. Sample config files can be found under directory `health/health.d` of the [Netdata GitHub repository](https://github.com/netdata/netdata/). So, even [`web_log` alarms can be adapted to your needs](../../../health/health.d/web_log.conf).
+Netdata alarms are user-configurable. Sample config files can be found under directory `health/health.d` of the [Netdata GitHub repository](https://github.com/netdata/netdata/).
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fweb_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/web_log/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py
index c1e1dcfbb..04ecadec8 100644
--- a/collectors/python.d.plugin/web_log/web_log.chart.py
+++ b/collectors/python.d.plugin/web_log/web_log.chart.py
@@ -23,7 +23,6 @@ except ImportError:
from bases.collection import read_last_line
from bases.FrameworkServices.LogService import LogService
-
ORDER_APACHE_CACHE = [
'apache_cache',
]
@@ -821,8 +820,8 @@ class Web:
dim_id = match_dict['vhost'].replace('.', '_')
if dim_id not in self.data:
self.charts['vhost'].add_dimension([dim_id,
- match_dict['vhost'],
- 'incremental'])
+ match_dict['vhost'],
+ 'incremental'])
self.data[dim_id] = 0
self.data[dim_id] += 1
@@ -961,9 +960,9 @@ class Squid:
return False
self.storage['dynamic'] = {
'http_code': {
- 'chart': 'squid_detailed_response_codes',
- 'func_dim_id': None,
- 'func_dim': None
+ 'chart': 'squid_detailed_response_codes',
+ 'func_dim_id': None,
+ 'func_dim': None
},
'hier_code': {
'chart': 'squid_hier_code',
@@ -1105,7 +1104,7 @@ def get_hist(index, buckets, time):
:param time: time
:return: None
"""
- for i in range(len(index)-1, -1, -1):
+ for i in range(len(index) - 1, -1, -1):
if time <= index[i]:
buckets[i] += 1
else: