summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin
diff options
context:
space:
mode:
Diffstat (limited to 'collectors/python.d.plugin')
-rw-r--r--collectors/python.d.plugin/.keep0
-rw-r--r--collectors/python.d.plugin/Makefile.am3
-rw-r--r--collectors/python.d.plugin/Makefile.in2025
-rw-r--r--collectors/python.d.plugin/README.md55
-rw-r--r--collectors/python.d.plugin/adaptec_raid/README.md2
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf10
-rw-r--r--collectors/python.d.plugin/apache/README.md4
-rw-r--r--collectors/python.d.plugin/apache/apache.chart.py114
-rw-r--r--collectors/python.d.plugin/apache/apache.conf10
-rw-r--r--collectors/python.d.plugin/beanstalk/README.md2
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.chart.py17
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.conf10
-rw-r--r--collectors/python.d.plugin/bind_rndc/README.md2
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py30
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.conf10
-rw-r--r--collectors/python.d.plugin/boinc/README.md2
-rw-r--r--collectors/python.d.plugin/boinc/boinc.chart.py14
-rw-r--r--collectors/python.d.plugin/boinc/boinc.conf10
-rw-r--r--collectors/python.d.plugin/ceph/README.md2
-rw-r--r--collectors/python.d.plugin/ceph/ceph.chart.py19
-rw-r--r--collectors/python.d.plugin/ceph/ceph.conf10
-rw-r--r--collectors/python.d.plugin/chrony/README.md2
-rw-r--r--collectors/python.d.plugin/chrony/chrony.chart.py16
-rw-r--r--collectors/python.d.plugin/chrony/chrony.conf10
-rw-r--r--collectors/python.d.plugin/couchdb/README.md2
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.chart.py47
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.conf10
-rw-r--r--collectors/python.d.plugin/cpufreq/README.md7
-rw-r--r--collectors/python.d.plugin/cpufreq/cpufreq.conf8
-rw-r--r--collectors/python.d.plugin/cpuidle/README.md2
-rw-r--r--collectors/python.d.plugin/cpuidle/cpuidle.conf8
-rw-r--r--collectors/python.d.plugin/dns_query_time/README.md2
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py21
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.conf10
-rw-r--r--collectors/python.d.plugin/dnsdist/README.md2
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.chart.py4
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.conf10
-rw-r--r--collectors/python.d.plugin/dockerd/README.md4
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.chart.py26
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.conf10
-rw-r--r--collectors/python.d.plugin/dovecot/README.md6
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.chart.py25
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.conf14
-rw-r--r--collectors/python.d.plugin/elasticsearch/README.md2
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py83
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.conf10
-rw-r--r--collectors/python.d.plugin/example/README.md6
-rw-r--r--collectors/python.d.plugin/example/example.chart.py9
-rw-r--r--collectors/python.d.plugin/example/example.conf10
-rw-r--r--collectors/python.d.plugin/exim/README.md2
-rw-r--r--collectors/python.d.plugin/exim/exim.chart.py13
-rw-r--r--collectors/python.d.plugin/exim/exim.conf10
-rw-r--r--collectors/python.d.plugin/fail2ban/README.md2
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.chart.py24
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.conf10
-rw-r--r--collectors/python.d.plugin/freeradius/README.md2
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.chart.py108
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.conf10
-rw-r--r--collectors/python.d.plugin/go_expvar/README.md3
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.chart.py69
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.conf10
-rw-r--r--collectors/python.d.plugin/haproxy/README.md2
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py33
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.conf10
-rw-r--r--collectors/python.d.plugin/hddtemp/README.md2
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.chart.py9
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.conf10
-rw-r--r--collectors/python.d.plugin/httpcheck/README.md2
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.chart.py13
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.conf6
-rw-r--r--collectors/python.d.plugin/icecast/README.md2
-rw-r--r--collectors/python.d.plugin/icecast/icecast.chart.py8
-rw-r--r--collectors/python.d.plugin/icecast/icecast.conf10
-rw-r--r--collectors/python.d.plugin/ipfs/README.md2
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.chart.py36
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.conf10
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/README.md2
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py30
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf10
-rw-r--r--collectors/python.d.plugin/linux_power_supply/README.md9
-rw-r--r--collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf10
-rw-r--r--collectors/python.d.plugin/litespeed/README.md2
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.chart.py14
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.conf10
-rw-r--r--collectors/python.d.plugin/logind/README.md2
-rw-r--r--collectors/python.d.plugin/logind/logind.chart.py10
-rw-r--r--collectors/python.d.plugin/logind/logind.conf10
-rw-r--r--collectors/python.d.plugin/mdstat/README.md7
-rw-r--r--collectors/python.d.plugin/mdstat/mdstat.conf8
-rw-r--r--collectors/python.d.plugin/megacli/README.md2
-rw-r--r--collectors/python.d.plugin/megacli/megacli.chart.py4
-rw-r--r--collectors/python.d.plugin/megacli/megacli.conf10
-rw-r--r--collectors/python.d.plugin/memcached/README.md2
-rw-r--r--collectors/python.d.plugin/memcached/memcached.chart.py48
-rw-r--r--collectors/python.d.plugin/memcached/memcached.conf10
-rw-r--r--collectors/python.d.plugin/mongodb/README.md29
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.chart.py22
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.conf10
-rw-r--r--collectors/python.d.plugin/monit/README.md2
-rw-r--r--collectors/python.d.plugin/monit/monit.chart.py26
-rw-r--r--collectors/python.d.plugin/monit/monit.conf10
-rw-r--r--collectors/python.d.plugin/mysql/README.md4
-rw-r--r--collectors/python.d.plugin/mysql/mysql.chart.py121
-rw-r--r--collectors/python.d.plugin/mysql/mysql.conf11
-rw-r--r--collectors/python.d.plugin/nginx/README.md3
-rw-r--r--collectors/python.d.plugin/nginx/nginx.chart.py30
-rw-r--r--collectors/python.d.plugin/nginx/nginx.conf10
-rw-r--r--collectors/python.d.plugin/nginx_plus/README.md2
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py18
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.conf10
-rw-r--r--collectors/python.d.plugin/nsd/README.md2
-rw-r--r--collectors/python.d.plugin/nsd/nsd.chart.py30
-rw-r--r--collectors/python.d.plugin/nsd/nsd.conf10
-rw-r--r--collectors/python.d.plugin/ntpd/README.md2
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.chart.py28
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.conf10
-rw-r--r--collectors/python.d.plugin/nvidia_smi/README.md3
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py29
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf10
-rw-r--r--collectors/python.d.plugin/openldap/README.md2
-rw-r--r--collectors/python.d.plugin/openldap/openldap.chart.py6
-rw-r--r--collectors/python.d.plugin/openldap/openldap.conf10
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/README.md2
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py24
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf10
-rw-r--r--collectors/python.d.plugin/phpfpm/README.md3
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.chart.py41
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.conf10
-rw-r--r--collectors/python.d.plugin/portcheck/README.md2
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.chart.py7
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.conf6
-rw-r--r--collectors/python.d.plugin/postfix/README.md2
-rw-r--r--collectors/python.d.plugin/postfix/postfix.chart.py15
-rw-r--r--collectors/python.d.plugin/postfix/postfix.conf10
-rw-r--r--collectors/python.d.plugin/postgres/README.md2
-rw-r--r--collectors/python.d.plugin/postgres/postgres.chart.py699
-rw-r--r--collectors/python.d.plugin/postgres/postgres.conf22
-rw-r--r--collectors/python.d.plugin/powerdns/README.md2
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.chart.py11
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.conf10
-rw-r--r--collectors/python.d.plugin/proxysql/README.md2
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.chart.py13
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.conf10
-rw-r--r--collectors/python.d.plugin/puppet/README.md7
-rw-r--r--collectors/python.d.plugin/puppet/puppet.chart.py28
-rw-r--r--collectors/python.d.plugin/puppet/puppet.conf12
-rw-r--r--collectors/python.d.plugin/python.d.conf6
-rw-r--r--collectors/python.d.plugin/python.d.plugin427
-rw-r--r--[-rwxr-xr-x]collectors/python.d.plugin/python.d.plugin.in4
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py30
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py109
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py10
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py19
-rw-r--r--collectors/python.d.plugin/python_modules/bases/charts.py2
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loggers.py2
-rw-r--r--collectors/python.d.plugin/rabbitmq/README.md2
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py122
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.conf10
-rw-r--r--collectors/python.d.plugin/redis/README.md2
-rw-r--r--collectors/python.d.plugin/redis/redis.chart.py13
-rw-r--r--collectors/python.d.plugin/redis/redis.conf10
-rw-r--r--collectors/python.d.plugin/rethinkdbs/README.md2
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py2
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf10
-rw-r--r--collectors/python.d.plugin/retroshare/README.md2
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.chart.py21
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.conf10
-rw-r--r--collectors/python.d.plugin/samba/README.md2
-rw-r--r--collectors/python.d.plugin/samba/samba.chart.py7
-rw-r--r--collectors/python.d.plugin/samba/samba.conf10
-rw-r--r--collectors/python.d.plugin/sensors/README.md2
-rw-r--r--collectors/python.d.plugin/sensors/sensors.chart.py4
-rw-r--r--collectors/python.d.plugin/sensors/sensors.conf8
-rw-r--r--collectors/python.d.plugin/smartd_log/README.md2
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.chart.py22
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.conf10
-rw-r--r--collectors/python.d.plugin/spigotmc/README.md2
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.chart.py5
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.conf10
-rw-r--r--collectors/python.d.plugin/springboot/README.md2
-rw-r--r--collectors/python.d.plugin/springboot/springboot.chart.py15
-rw-r--r--collectors/python.d.plugin/springboot/springboot.conf10
-rw-r--r--collectors/python.d.plugin/squid/README.md2
-rw-r--r--collectors/python.d.plugin/squid/squid.chart.py13
-rw-r--r--collectors/python.d.plugin/squid/squid.conf10
-rw-r--r--collectors/python.d.plugin/tomcat/README.md2
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.chart.py63
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.conf10
-rw-r--r--collectors/python.d.plugin/tor/README.md2
-rw-r--r--collectors/python.d.plugin/tor/tor.chart.py4
-rw-r--r--collectors/python.d.plugin/tor/tor.conf10
-rw-r--r--collectors/python.d.plugin/traefik/README.md3
-rw-r--r--collectors/python.d.plugin/traefik/traefik.chart.py29
-rw-r--r--collectors/python.d.plugin/traefik/traefik.conf10
-rw-r--r--collectors/python.d.plugin/unbound/README.md2
-rw-r--r--collectors/python.d.plugin/unbound/unbound.chart.py6
-rw-r--r--collectors/python.d.plugin/unbound/unbound.conf10
-rw-r--r--collectors/python.d.plugin/uwsgi/README.md2
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.chart.py22
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.conf10
-rw-r--r--collectors/python.d.plugin/varnish/README.md10
-rw-r--r--collectors/python.d.plugin/varnish/varnish.chart.py34
-rw-r--r--collectors/python.d.plugin/varnish/varnish.conf14
-rw-r--r--collectors/python.d.plugin/w1sensor/README.md2
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.chart.py4
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.conf10
-rw-r--r--collectors/python.d.plugin/web_log/README.md4
-rw-r--r--collectors/python.d.plugin/web_log/web_log.chart.py6
-rw-r--r--collectors/python.d.plugin/web_log/web_log.conf10
209 files changed, 2024 insertions, 3941 deletions
diff --git a/collectors/python.d.plugin/.keep b/collectors/python.d.plugin/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/collectors/python.d.plugin/.keep
diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am
index 984050c42..3599d9c9f 100644
--- a/collectors/python.d.plugin/Makefile.am
+++ b/collectors/python.d.plugin/Makefile.am
@@ -29,12 +29,11 @@ dist_python_DATA = \
userpythonconfigdir=$(configdir)/python.d
dist_userpythonconfig_DATA = \
- $(top_srcdir)/installer/.keep \
+ .keep \
$(NULL)
pythonconfigdir=$(libconfigdir)/python.d
dist_pythonconfig_DATA = \
- $(top_srcdir)/installer/.keep \
$(NULL)
include adaptec_raid/Makefile.inc
diff --git a/collectors/python.d.plugin/Makefile.in b/collectors/python.d.plugin/Makefile.in
deleted file mode 100644
index 495606896..000000000
--- a/collectors/python.d.plugin/Makefile.in
+++ /dev/null
@@ -1,2025 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-DIST_COMMON = $(top_srcdir)/build/subst.inc \
- $(srcdir)/adaptec_raid/Makefile.inc \
- $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc \
- $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc \
- $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc \
- $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc \
- $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc \
- $(srcdir)/dns_query_time/Makefile.inc \
- $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc \
- $(srcdir)/elasticsearch/Makefile.inc \
- $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc \
- $(srcdir)/fail2ban/Makefile.inc \
- $(srcdir)/freeradius/Makefile.inc \
- $(srcdir)/go_expvar/Makefile.inc \
- $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc \
- $(srcdir)/httpcheck/Makefile.inc \
- $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc \
- $(srcdir)/isc_dhcpd/Makefile.inc \
- $(srcdir)/linux_power_supply/Makefile.inc \
- $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc \
- $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc \
- $(srcdir)/memcached/Makefile.inc \
- $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc \
- $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc \
- $(srcdir)/nginx_plus/Makefile.inc \
- $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc \
- $(srcdir)/ntpd/Makefile.inc \
- $(srcdir)/ovpn_status_log/Makefile.inc \
- $(srcdir)/openldap/Makefile.inc $(srcdir)/phpfpm/Makefile.inc \
- $(srcdir)/portcheck/Makefile.inc \
- $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc \
- $(srcdir)/powerdns/Makefile.inc \
- $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc \
- $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc \
- $(srcdir)/rethinkdbs/Makefile.inc \
- $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc \
- $(srcdir)/sensors/Makefile.inc \
- $(srcdir)/smartd_log/Makefile.inc \
- $(srcdir)/spigotmc/Makefile.inc \
- $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc \
- $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc \
- $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc \
- $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc \
- $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc \
- $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(dist_plugins_SCRIPTS) $(dist_python_SCRIPTS) \
- $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \
- $(dist_libconfig_DATA) $(dist_noinst_DATA) $(dist_python_DATA) \
- $(dist_python_urllib3_DATA) \
- $(dist_python_urllib3_backports_DATA) \
- $(dist_python_urllib3_contrib_DATA) \
- $(dist_python_urllib3_packages_DATA) \
- $(dist_python_urllib3_securetransport_DATA) \
- $(dist_python_urllib3_ssl_match_hostname_DATA) \
- $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
- $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
- $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
- $(dist_userpythonconfig_DATA)
-subdir = collectors/python.d.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" \
- "$(DESTDIR)$(basesdir)" \
- "$(DESTDIR)$(bases_framework_servicesdir)" \
- "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" \
- "$(DESTDIR)$(python_urllib3dir)" \
- "$(DESTDIR)$(python_urllib3_backportsdir)" \
- "$(DESTDIR)$(python_urllib3_contribdir)" \
- "$(DESTDIR)$(python_urllib3_packagesdir)" \
- "$(DESTDIR)$(python_urllib3_securetransportdir)" \
- "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" \
- "$(DESTDIR)$(python_urllib3_utildir)" \
- "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" \
- "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" \
- "$(DESTDIR)$(third_partydir)" \
- "$(DESTDIR)$(userpythonconfigdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS) $(dist_python_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \
- $(dist_libconfig_DATA) $(dist_noinst_DATA) $(dist_python_DATA) \
- $(dist_python_urllib3_DATA) \
- $(dist_python_urllib3_backports_DATA) \
- $(dist_python_urllib3_contrib_DATA) \
- $(dist_python_urllib3_packages_DATA) \
- $(dist_python_urllib3_securetransport_DATA) \
- $(dist_python_urllib3_ssl_match_hostname_DATA) \
- $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
- $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
- $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
- $(dist_userpythonconfig_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- python.d.plugin \
- $(NULL)
-
-SUFFIXES = .in
-dist_libconfig_DATA = \
- python.d.conf \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- python.d.plugin \
- $(NULL)
-
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA = python.d.plugin.in README.md $(NULL) \
- adaptec_raid/README.md adaptec_raid/Makefile.inc \
- apache/README.md apache/Makefile.inc beanstalk/README.md \
- beanstalk/Makefile.inc bind_rndc/README.md \
- bind_rndc/Makefile.inc boinc/README.md boinc/Makefile.inc \
- ceph/README.md ceph/Makefile.inc chrony/README.md \
- chrony/Makefile.inc couchdb/README.md couchdb/Makefile.inc \
- cpufreq/README.md cpufreq/Makefile.inc cpuidle/README.md \
- cpuidle/Makefile.inc dnsdist/README.md dnsdist/Makefile.inc \
- dns_query_time/README.md dns_query_time/Makefile.inc \
- dockerd/README.md dockerd/Makefile.inc dovecot/README.md \
- dovecot/Makefile.inc elasticsearch/README.md \
- elasticsearch/Makefile.inc example/README.md \
- example/Makefile.inc exim/README.md exim/Makefile.inc \
- fail2ban/README.md fail2ban/Makefile.inc freeradius/README.md \
- freeradius/Makefile.inc go_expvar/README.md \
- go_expvar/Makefile.inc haproxy/README.md haproxy/Makefile.inc \
- hddtemp/README.md hddtemp/Makefile.inc httpcheck/README.md \
- httpcheck/Makefile.inc icecast/README.md icecast/Makefile.inc \
- ipfs/README.md ipfs/Makefile.inc isc_dhcpd/README.md \
- isc_dhcpd/Makefile.inc linux_power_supply/README.md \
- linux_power_supply/Makefile.inc litespeed/README.md \
- litespeed/Makefile.inc logind/README.md logind/Makefile.inc \
- mdstat/README.md mdstat/Makefile.inc megacli/README.md \
- megacli/Makefile.inc memcached/README.md \
- memcached/Makefile.inc mongodb/README.md mongodb/Makefile.inc \
- monit/README.md monit/Makefile.inc mysql/README.md \
- mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \
- nginx_plus/README.md nginx_plus/Makefile.inc \
- nvidia_smi/README.md nvidia_smi/Makefile.inc nsd/README.md \
- nsd/Makefile.inc ntpd/README.md ntpd/Makefile.inc \
- ovpn_status_log/README.md ovpn_status_log/Makefile.inc \
- openldap/README.md openldap/Makefile.inc phpfpm/README.md \
- phpfpm/Makefile.inc portcheck/README.md portcheck/Makefile.inc \
- postfix/README.md postfix/Makefile.inc postgres/README.md \
- postgres/Makefile.inc powerdns/README.md powerdns/Makefile.inc \
- proxysql/README.md proxysql/Makefile.inc puppet/README.md \
- puppet/Makefile.inc rabbitmq/README.md rabbitmq/Makefile.inc \
- redis/README.md redis/Makefile.inc rethinkdbs/README.md \
- rethinkdbs/Makefile.inc retroshare/README.md \
- retroshare/Makefile.inc samba/README.md samba/Makefile.inc \
- sensors/README.md sensors/Makefile.inc smartd_log/README.md \
- smartd_log/Makefile.inc spigotmc/README.md \
- spigotmc/Makefile.inc springboot/README.md \
- springboot/Makefile.inc squid/README.md squid/Makefile.inc \
- tomcat/README.md tomcat/Makefile.inc tor/README.md \
- tor/Makefile.inc traefik/README.md traefik/Makefile.inc \
- unbound/README.md unbound/Makefile.inc uwsgi/README.md \
- uwsgi/Makefile.inc varnish/README.md varnish/Makefile.inc \
- w1sensor/README.md w1sensor/Makefile.inc web_log/README.md \
- web_log/Makefile.inc
-dist_python_SCRIPTS = \
- $(NULL)
-
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-dist_python_DATA = $(NULL) adaptec_raid/adaptec_raid.chart.py \
- apache/apache.chart.py beanstalk/beanstalk.chart.py \
- bind_rndc/bind_rndc.chart.py boinc/boinc.chart.py \
- ceph/ceph.chart.py chrony/chrony.chart.py \
- couchdb/couchdb.chart.py cpufreq/cpufreq.chart.py \
- cpuidle/cpuidle.chart.py dnsdist/dnsdist.chart.py \
- dns_query_time/dns_query_time.chart.py \
- dockerd/dockerd.chart.py dovecot/dovecot.chart.py \
- elasticsearch/elasticsearch.chart.py example/example.chart.py \
- exim/exim.chart.py fail2ban/fail2ban.chart.py \
- freeradius/freeradius.chart.py go_expvar/go_expvar.chart.py \
- haproxy/haproxy.chart.py hddtemp/hddtemp.chart.py \
- httpcheck/httpcheck.chart.py icecast/icecast.chart.py \
- ipfs/ipfs.chart.py isc_dhcpd/isc_dhcpd.chart.py \
- linux_power_supply/linux_power_supply.chart.py \
- litespeed/litespeed.chart.py logind/logind.chart.py \
- mdstat/mdstat.chart.py megacli/megacli.chart.py \
- memcached/memcached.chart.py mongodb/mongodb.chart.py \
- monit/monit.chart.py mysql/mysql.chart.py nginx/nginx.chart.py \
- nginx_plus/nginx_plus.chart.py nvidia_smi/nvidia_smi.chart.py \
- nsd/nsd.chart.py ntpd/ntpd.chart.py \
- ovpn_status_log/ovpn_status_log.chart.py \
- openldap/openldap.chart.py phpfpm/phpfpm.chart.py \
- portcheck/portcheck.chart.py postfix/postfix.chart.py \
- postgres/postgres.chart.py powerdns/powerdns.chart.py \
- proxysql/proxysql.chart.py puppet/puppet.chart.py \
- rabbitmq/rabbitmq.chart.py redis/redis.chart.py \
- rethinkdbs/rethinkdbs.chart.py retroshare/retroshare.chart.py \
- samba/samba.chart.py sensors/sensors.chart.py \
- smartd_log/smartd_log.chart.py spigotmc/spigotmc.chart.py \
- springboot/springboot.chart.py squid/squid.chart.py \
- tomcat/tomcat.chart.py tor/tor.chart.py \
- traefik/traefik.chart.py unbound/unbound.chart.py \
- uwsgi/uwsgi.chart.py varnish/varnish.chart.py \
- w1sensor/w1sensor.chart.py web_log/web_log.chart.py
-userpythonconfigdir = $(configdir)/python.d
-dist_userpythonconfig_DATA = \
- $(top_srcdir)/installer/.keep \
- $(NULL)
-
-pythonconfigdir = $(libconfigdir)/python.d
-dist_pythonconfig_DATA = $(top_srcdir)/installer/.keep $(NULL) \
- adaptec_raid/adaptec_raid.conf apache/apache.conf \
- beanstalk/beanstalk.conf bind_rndc/bind_rndc.conf \
- boinc/boinc.conf ceph/ceph.conf chrony/chrony.conf \
- couchdb/couchdb.conf cpufreq/cpufreq.conf cpuidle/cpuidle.conf \
- dnsdist/dnsdist.conf dns_query_time/dns_query_time.conf \
- dockerd/dockerd.conf dovecot/dovecot.conf \
- elasticsearch/elasticsearch.conf example/example.conf \
- exim/exim.conf fail2ban/fail2ban.conf \
- freeradius/freeradius.conf go_expvar/go_expvar.conf \
- haproxy/haproxy.conf hddtemp/hddtemp.conf \
- httpcheck/httpcheck.conf icecast/icecast.conf ipfs/ipfs.conf \
- isc_dhcpd/isc_dhcpd.conf \
- linux_power_supply/linux_power_supply.conf \
- litespeed/litespeed.conf logind/logind.conf mdstat/mdstat.conf \
- megacli/megacli.conf memcached/memcached.conf \
- mongodb/mongodb.conf monit/monit.conf mysql/mysql.conf \
- nginx/nginx.conf nginx_plus/nginx_plus.conf \
- nvidia_smi/nvidia_smi.conf nsd/nsd.conf ntpd/ntpd.conf \
- ovpn_status_log/ovpn_status_log.conf openldap/openldap.conf \
- phpfpm/phpfpm.conf portcheck/portcheck.conf \
- postfix/postfix.conf postgres/postgres.conf \
- powerdns/powerdns.conf proxysql/proxysql.conf \
- puppet/puppet.conf rabbitmq/rabbitmq.conf redis/redis.conf \
- rethinkdbs/rethinkdbs.conf retroshare/retroshare.conf \
- samba/samba.conf sensors/sensors.conf \
- smartd_log/smartd_log.conf spigotmc/spigotmc.conf \
- springboot/springboot.conf squid/squid.conf tomcat/tomcat.conf \
- tor/tor.conf traefik/traefik.conf unbound/unbound.conf \
- uwsgi/uwsgi.conf varnish/varnish.conf w1sensor/w1sensor.conf \
- web_log/web_log.conf
-pythonmodulesdir = $(pythondir)/python_modules
-dist_pythonmodules_DATA = \
- python_modules/__init__.py \
- $(NULL)
-
-basesdir = $(pythonmodulesdir)/bases
-dist_bases_DATA = \
- python_modules/bases/__init__.py \
- python_modules/bases/charts.py \
- python_modules/bases/collection.py \
- python_modules/bases/loaders.py \
- python_modules/bases/loggers.py \
- $(NULL)
-
-bases_framework_servicesdir = $(basesdir)/FrameworkServices
-dist_bases_framework_services_DATA = \
- python_modules/bases/FrameworkServices/__init__.py \
- python_modules/bases/FrameworkServices/ExecutableService.py \
- python_modules/bases/FrameworkServices/LogService.py \
- python_modules/bases/FrameworkServices/MySQLService.py \
- python_modules/bases/FrameworkServices/SimpleService.py \
- python_modules/bases/FrameworkServices/SocketService.py \
- python_modules/bases/FrameworkServices/UrlService.py \
- $(NULL)
-
-third_partydir = $(pythonmodulesdir)/third_party
-dist_third_party_DATA = \
- python_modules/third_party/__init__.py \
- python_modules/third_party/ordereddict.py \
- python_modules/third_party/lm_sensors.py \
- python_modules/third_party/mcrcon.py \
- python_modules/third_party/boinc_client.py \
- python_modules/third_party/monotonic.py \
- $(NULL)
-
-pythonyaml2dir = $(pythonmodulesdir)/pyyaml2
-dist_pythonyaml2_DATA = \
- python_modules/pyyaml2/__init__.py \
- python_modules/pyyaml2/composer.py \
- python_modules/pyyaml2/constructor.py \
- python_modules/pyyaml2/cyaml.py \
- python_modules/pyyaml2/dumper.py \
- python_modules/pyyaml2/emitter.py \
- python_modules/pyyaml2/error.py \
- python_modules/pyyaml2/events.py \
- python_modules/pyyaml2/loader.py \
- python_modules/pyyaml2/nodes.py \
- python_modules/pyyaml2/parser.py \
- python_modules/pyyaml2/reader.py \
- python_modules/pyyaml2/representer.py \
- python_modules/pyyaml2/resolver.py \
- python_modules/pyyaml2/scanner.py \
- python_modules/pyyaml2/serializer.py \
- python_modules/pyyaml2/tokens.py \
- $(NULL)
-
-pythonyaml3dir = $(pythonmodulesdir)/pyyaml3
-dist_pythonyaml3_DATA = \
- python_modules/pyyaml3/__init__.py \
- python_modules/pyyaml3/composer.py \
- python_modules/pyyaml3/constructor.py \
- python_modules/pyyaml3/cyaml.py \
- python_modules/pyyaml3/dumper.py \
- python_modules/pyyaml3/emitter.py \
- python_modules/pyyaml3/error.py \
- python_modules/pyyaml3/events.py \
- python_modules/pyyaml3/loader.py \
- python_modules/pyyaml3/nodes.py \
- python_modules/pyyaml3/parser.py \
- python_modules/pyyaml3/reader.py \
- python_modules/pyyaml3/representer.py \
- python_modules/pyyaml3/resolver.py \
- python_modules/pyyaml3/scanner.py \
- python_modules/pyyaml3/serializer.py \
- python_modules/pyyaml3/tokens.py \
- $(NULL)
-
-python_urllib3dir = $(pythonmodulesdir)/urllib3
-dist_python_urllib3_DATA = \
- python_modules/urllib3/__init__.py \
- python_modules/urllib3/_collections.py \
- python_modules/urllib3/connection.py \
- python_modules/urllib3/connectionpool.py \
- python_modules/urllib3/exceptions.py \
- python_modules/urllib3/fields.py \
- python_modules/urllib3/filepost.py \
- python_modules/urllib3/response.py \
- python_modules/urllib3/poolmanager.py \
- python_modules/urllib3/request.py \
- $(NULL)
-
-python_urllib3_utildir = $(python_urllib3dir)/util
-dist_python_urllib3_util_DATA = \
- python_modules/urllib3/util/__init__.py \
- python_modules/urllib3/util/connection.py \
- python_modules/urllib3/util/request.py \
- python_modules/urllib3/util/response.py \
- python_modules/urllib3/util/retry.py \
- python_modules/urllib3/util/selectors.py \
- python_modules/urllib3/util/ssl_.py \
- python_modules/urllib3/util/timeout.py \
- python_modules/urllib3/util/url.py \
- python_modules/urllib3/util/wait.py \
- $(NULL)
-
-python_urllib3_packagesdir = $(python_urllib3dir)/packages
-dist_python_urllib3_packages_DATA = \
- python_modules/urllib3/packages/__init__.py \
- python_modules/urllib3/packages/ordered_dict.py \
- python_modules/urllib3/packages/six.py \
- $(NULL)
-
-python_urllib3_backportsdir = $(python_urllib3_packagesdir)/backports
-dist_python_urllib3_backports_DATA = \
- python_modules/urllib3/packages/backports/__init__.py \
- python_modules/urllib3/packages/backports/makefile.py \
- $(NULL)
-
-python_urllib3_ssl_match_hostnamedir = $(python_urllib3_packagesdir)/ssl_match_hostname
-dist_python_urllib3_ssl_match_hostname_DATA = \
- python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
- python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
- $(NULL)
-
-python_urllib3_contribdir = $(python_urllib3dir)/contrib
-dist_python_urllib3_contrib_DATA = \
- python_modules/urllib3/contrib/__init__.py \
- python_modules/urllib3/contrib/appengine.py \
- python_modules/urllib3/contrib/ntlmpool.py \
- python_modules/urllib3/contrib/pyopenssl.py \
- python_modules/urllib3/contrib/securetransport.py \
- python_modules/urllib3/contrib/socks.py \
- $(NULL)
-
-python_urllib3_securetransportdir = $(python_urllib3_contribdir)/_securetransport
-dist_python_urllib3_securetransport_DATA = \
- python_modules/urllib3/contrib/_securetransport/__init__.py \
- python_modules/urllib3/contrib/_securetransport/bindings.py \
- python_modules/urllib3/contrib/_securetransport/low_level.py \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/linux_power_supply/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/openldap/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/linux_power_supply/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/openldap/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc:
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pythondir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pythondir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pythonSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
-install-dist_basesDATA: $(dist_bases_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(basesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(basesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(basesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(basesdir)" || exit $$?; \
- done
-
-uninstall-dist_basesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(basesdir)'; $(am__uninstall_files_from_dir)
-install-dist_bases_framework_servicesDATA: $(dist_bases_framework_services_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(bases_framework_servicesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(bases_framework_servicesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bases_framework_servicesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(bases_framework_servicesdir)" || exit $$?; \
- done
-
-uninstall-dist_bases_framework_servicesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(bases_framework_servicesdir)'; $(am__uninstall_files_from_dir)
-install-dist_libconfigDATA: $(dist_libconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_libconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonDATA: $(dist_python_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythondir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythondir)" || exit $$?; \
- done
-
-uninstall-dist_pythonDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3DATA: $(dist_python_urllib3_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3dir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3dir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_backportsDATA: $(dist_python_urllib3_backports_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_backportsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_backportsdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_backportsDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_backportsdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_contribDATA: $(dist_python_urllib3_contrib_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_contribdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_contribdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_contribdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_contribdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_contribDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_contribdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_packagesDATA: $(dist_python_urllib3_packages_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_packagesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_packagesdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_packagesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_packagesdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_securetransportDATA: $(dist_python_urllib3_securetransport_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_securetransportDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_securetransportdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_ssl_match_hostnameDATA: $(dist_python_urllib3_ssl_match_hostname_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_ssl_match_hostnameDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_utilDATA: $(dist_python_urllib3_util_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_utildir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_utildir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_utildir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_utildir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_utilDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_utildir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonconfigDATA: $(dist_pythonconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_pythonconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonmodulesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonmodulesdir)" || exit $$?; \
- done
-
-uninstall-dist_pythonmodulesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml2dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml2dir)" || exit $$?; \
- done
-
-uninstall-dist_pythonyaml2DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml3dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml3dir)" || exit $$?; \
- done
-
-uninstall-dist_pythonyaml3DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir)
-install-dist_third_partyDATA: $(dist_third_party_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(third_partydir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(third_partydir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(third_partydir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(third_partydir)" || exit $$?; \
- done
-
-uninstall-dist_third_partyDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(third_partydir)'; $(am__uninstall_files_from_dir)
-install-dist_userpythonconfigDATA: $(dist_userpythonconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(userpythonconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(userpythonconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userpythonconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(userpythonconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_userpythonconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(userpythonconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" "$(DESTDIR)$(bases_framework_servicesdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" "$(DESTDIR)$(python_urllib3_backportsdir)" "$(DESTDIR)$(python_urllib3_contribdir)" "$(DESTDIR)$(python_urllib3_packagesdir)" "$(DESTDIR)$(python_urllib3_securetransportdir)" "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" "$(DESTDIR)$(python_urllib3_utildir)" "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)" "$(DESTDIR)$(userpythonconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_basesDATA \
- install-dist_bases_framework_servicesDATA \
- install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_python_urllib3DATA \
- install-dist_python_urllib3_backportsDATA \
- install-dist_python_urllib3_contribDATA \
- install-dist_python_urllib3_packagesDATA \
- install-dist_python_urllib3_securetransportDATA \
- install-dist_python_urllib3_ssl_match_hostnameDATA \
- install-dist_python_urllib3_utilDATA \
- install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
- install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
- install-dist_third_partyDATA install-dist_userpythonconfigDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_basesDATA \
- uninstall-dist_bases_framework_servicesDATA \
- uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_python_urllib3DATA \
- uninstall-dist_python_urllib3_backportsDATA \
- uninstall-dist_python_urllib3_contribDATA \
- uninstall-dist_python_urllib3_packagesDATA \
- uninstall-dist_python_urllib3_securetransportDATA \
- uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
- uninstall-dist_python_urllib3_utilDATA \
- uninstall-dist_pythonconfigDATA \
- uninstall-dist_pythonmodulesDATA \
- uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
- uninstall-dist_third_partyDATA \
- uninstall-dist_userpythonconfigDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_basesDATA \
- install-dist_bases_framework_servicesDATA \
- install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_python_urllib3DATA \
- install-dist_python_urllib3_backportsDATA \
- install-dist_python_urllib3_contribDATA \
- install-dist_python_urllib3_packagesDATA \
- install-dist_python_urllib3_securetransportDATA \
- install-dist_python_urllib3_ssl_match_hostnameDATA \
- install-dist_python_urllib3_utilDATA \
- install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
- install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
- install-dist_third_partyDATA install-dist_userpythonconfigDATA \
- install-dvi install-dvi-am install-exec install-exec-am \
- install-html install-html-am install-info install-info-am \
- install-man install-pdf install-pdf-am install-ps \
- install-ps-am install-strip installcheck installcheck-am \
- installdirs maintainer-clean maintainer-clean-generic \
- mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
- uninstall uninstall-am uninstall-dist_basesDATA \
- uninstall-dist_bases_framework_servicesDATA \
- uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_python_urllib3DATA \
- uninstall-dist_python_urllib3_backportsDATA \
- uninstall-dist_python_urllib3_contribDATA \
- uninstall-dist_python_urllib3_packagesDATA \
- uninstall-dist_python_urllib3_securetransportDATA \
- uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
- uninstall-dist_python_urllib3_utilDATA \
- uninstall-dist_pythonconfigDATA \
- uninstall-dist_pythonmodulesDATA \
- uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
- uninstall-dist_third_partyDATA \
- uninstall-dist_userpythonconfigDATA
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
- -e 's#[@]pythondir_POST@#$(pythondir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
index 673fc2c99..8955197a7 100644
--- a/collectors/python.d.plugin/README.md
+++ b/collectors/python.d.plugin/README.md
@@ -9,21 +9,6 @@
5. Allows each **module** to have one or more data collection **jobs**
6. Each **job** is collecting one or more metrics from a single data source
-## Pull Request Checklist for Python Plugins
-
-This is a generic checklist for submitting a new Python plugin for Netdata. It is by no means comprehensive.
-
-At minimum, to be buildable and testable, the PR needs to include:
-
-* The module itself, following proper naming conventions: `python.d/<module_dir>/<module_name>.chart.py`
-* A README.md file for the plugin under `python.d/<module_dir>`.
-* The configuration file for the module: `conf.d/python.d/<module_name>.conf`. Python config files are in YAML format, and should include comments describing what options are present. The instructions are also needed in the configuration section of the README.md
-* A basic configuration for the plugin in the appropriate global config file: `conf.d/python.d.conf`, which is also in YAML format. Either add a line that reads `# <module_name>: yes` if the module is to be enabled by default, or one that reads `<module_name>: no` if it is to be disabled by default.
-* A line for the plugin in `python.d/Makefile.am` under `dist_python_DATA`.
-* A line for the plugin configuration file in `conf.d/Makefile.am`, under `dist_pythonconfig_DATA`
-* Optionally, chart information in `web/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
-
-
## Disclaimer
Every module should be compatible with python2 and python3.
@@ -36,7 +21,6 @@ Every configuration file must have one of two formats:
```yaml
update_every : 2 # update frequency
-retries : 1 # how many failures in update() is tolerated
priority : 20000 # where it is shown on dashboard
other_var1 : bla # variables passed to module
@@ -48,7 +32,6 @@ other_var2 : alb
```yaml
# module defaults:
update_every : 2
-retries : 1
priority : 20000
local: # job name
@@ -57,13 +40,25 @@ local: # job name
other_job:
priority : 5 # job position on dashboard
- retries : 20 # job retries
other_var2 : val # module specific variable
```
-`update_every`, `retries`, and `priority` are always optional.
+`update_every` and `priority` are always optional.
----
+## How to debug a python module
+
+```
+# become user netdata
+sudo su -s /bin/bash netdata
+```
+Depending on where Netdata was installed, execute one of the following commands to trace the execution of a python module:
+
+```
+# execute the plugin in debug mode, for a specific module
+/opt/netdata/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
+/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
+```
+Where `[module]` is the directory name under https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin
## How to write a new module
@@ -74,7 +69,9 @@ Writing new python module is simple. You just need to remember to include 5 majo
- **_get_data** method
- all code needs to be compatible with Python 2 (**≥ 2.7**) *and* 3 (**≥ 3.1**)
-If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](#pull-request-checklist-for-python-plugins) beforehand to make sure you have updated all the files you need to.
+If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](#pull-request-checklist-for-python-plugins) beforehand to make sure you have updated all the files you need to.
+
+For a quick start, you can look at the [example plugin](example/example.chart.py).
### Global variables `ORDER` and `CHART`
@@ -210,3 +207,19 @@ Sockets are accessed in non-blocking mode with 15 second timeout.
After every execution of `_get_raw_data` socket is closed, to prevent this module needs to set `_keep_alive` variable to `True` and implement custom `_check_raw_data` method.
`_check_raw_data` should take raw data and return `True` if all data is received otherwise it should return `False`. Also it should do it in fast and efficient way.
+
+## Pull Request Checklist for Python Plugins
+
+This is a generic checklist for submitting a new Python plugin for Netdata. It is by no means comprehensive.
+
+At minimum, to be buildable and testable, the PR needs to include:
+
+* The module itself, following proper naming conventions: `python.d/<module_dir>/<module_name>.chart.py`
+* A README.md file for the plugin under `python.d/<module_dir>`.
+* The configuration file for the module: `conf.d/python.d/<module_name>.conf`. Python config files are in YAML format, and should include comments describing what options are present. The instructions are also needed in the configuration section of the README.md
+* A basic configuration for the plugin in the appropriate global config file: `conf.d/python.d.conf`, which is also in YAML format. Either add a line that reads `# <module_name>: yes` if the module is to be enabled by default, or one that reads `<module_name>: no` if it is to be disabled by default.
+* A line for the plugin in `python.d/Makefile.am` under `dist_python_DATA`.
+* A line for the plugin configuration file in `conf.d/Makefile.am`, under `dist_pythonconfig_DATA`
+* Optionally, chart information in `web/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md
index 499dc9190..682280f2e 100644
--- a/collectors/python.d.plugin/adaptec_raid/README.md
+++ b/collectors/python.d.plugin/adaptec_raid/README.md
@@ -44,3 +44,5 @@ adaptec_raid: yes
![image](https://user-images.githubusercontent.com/22274335/47278133-6d306680-d601-11e8-87c2-cc9c0f42d686.png)
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fadaptec_raid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
index 253cbf5a9..fa462ec83 100644
--- a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
@@ -19,11 +19,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -50,6 +48,6 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md
index c6d1d126a..090feb070 100644
--- a/collectors/python.d.plugin/apache/README.md
+++ b/collectors/python.d.plugin/apache/README.md
@@ -46,14 +46,14 @@ priority : 90100
local:
url : 'http://localhost/server-status?auto'
- retries : 20
remote:
url : 'http://www.apache.org/server-status?auto'
update_every : 5
- retries : 4
```
Without configuration, module attempts to connect to `http://localhost/server-status?auto`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fapache%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/apache/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py
index d136274d0..655616d07 100644
--- a/collectors/python.d.plugin/apache/apache.chart.py
+++ b/collectors/python.d.plugin/apache/apache.chart.py
@@ -5,64 +5,60 @@
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'url': 'http://www.apache.org/server-status?auto'
-# }}
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['requests', 'connections', 'conns_async', 'net', 'workers', 'reqpersec', 'bytespersec', 'bytesperreq']
+
+ORDER = [
+ 'requests',
+ 'connections',
+ 'conns_async',
+ 'net',
+ 'workers',
+ 'reqpersec',
+ 'bytespersec',
+ 'bytesperreq',
+]
CHARTS = {
'bytesperreq': {
- 'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request',
+ 'options': [None, 'Lifetime Avg. Request Size', 'KiB',
'statistics', 'apache.bytesperreq', 'area'],
'lines': [
- ['size_req']
+ ['size_req', 'size', 'absolute', 1, 1024 * 100000]
]},
'workers': {
- 'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
+ 'options': [None, 'Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
'lines': [
['idle'],
['busy'],
]},
'reqpersec': {
- 'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics',
+ 'options': [None, 'Lifetime Avg. Requests/s', 'requests/s', 'statistics',
'apache.reqpersec', 'area'],
'lines': [
- ['requests_sec']
+ ['requests_sec', 'requests', 'absolute', 1, 100000]
]},
'bytespersec': {
- 'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
+ 'options': [None, 'Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
'apache.bytesperreq', 'area'],
'lines': [
- ['size_sec', None, 'absolute', 8, 1000]
+ ['size_sec', None, 'absolute', 8, 1000 * 100000]
]},
'requests': {
- 'options': [None, 'apache Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
'lines': [
['requests', None, 'incremental']
]},
'net': {
- 'options': [None, 'apache Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
'lines': [
['sent', None, 'incremental', 8, 1]
]},
'connections': {
- 'options': [None, 'apache Connections', 'connections', 'connections', 'apache.connections', 'line'],
+ 'options': [None, 'Connections', 'connections', 'connections', 'apache.connections', 'line'],
'lines': [
['connections']
]},
'conns_async': {
- 'options': [None, 'apache Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
+ 'options': [None, 'Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
'lines': [
['keepalive'],
['closing'],
@@ -86,6 +82,14 @@ ASSIGNMENT = {
'ConnsAsyncWriting': 'writing'
}
+FLOAT_VALUES = [
+ 'BytesPerReq',
+ 'ReqPerSec',
+ 'BytesPerSec',
+]
+
+LIGHTTPD_MARKER = 'idle_servers'
+
class Service(UrlService):
def __init__(self, configuration=None, name=None):
@@ -96,20 +100,15 @@ class Service(UrlService):
def check(self):
self._manager = self._build_manager()
+
data = self._get_data()
+
if not data:
return None
- if 'idle_servers' in data:
- self.module_name = 'lighttpd'
- for chart in self.definitions:
- if chart == 'workers':
- lines = self.definitions[chart]['lines']
- lines[0] = ['idle_servers', 'idle']
- lines[1] = ['busy_servers', 'busy']
- opts = self.definitions[chart]['options']
- opts[1] = opts[1].replace('apache', 'lighttpd')
- opts[4] = opts[4].replace('apache', 'lighttpd')
+ if LIGHTTPD_MARKER in data:
+ self.turn_into_lighttpd()
+
return True
def _get_data(self):
@@ -118,15 +117,44 @@ class Service(UrlService):
:return: dict
"""
raw_data = self._get_raw_data()
+
if not raw_data:
return None
+
data = dict()
- for row in raw_data.split('\n'):
- tmp = row.split(':')
- if tmp[0] in ASSIGNMENT:
- try:
- data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1]))
- except (IndexError, ValueError):
- continue
+ for line in raw_data.split('\n'):
+ try:
+ parse_line(line, data)
+ except ValueError:
+ continue
+
return data or None
+
+ def turn_into_lighttpd(self):
+ self.module_name = 'lighttpd'
+ for chart in self.definitions:
+ if chart == 'workers':
+ lines = self.definitions[chart]['lines']
+ lines[0] = ['idle_servers', 'idle']
+ lines[1] = ['busy_servers', 'busy']
+ opts = self.definitions[chart]['options']
+ opts[1] = opts[1].replace('apache', 'lighttpd')
+ opts[4] = opts[4].replace('apache', 'lighttpd')
+
+
+def parse_line(line, data):
+ parts = line.split(':')
+
+ if len(parts) != 2:
+ return
+
+ key, value = parts[0], parts[1]
+
+ if key not in ASSIGNMENT:
+ return
+
+ if key in FLOAT_VALUES:
+ data[ASSIGNMENT[key]] = int((float(value) * 100000))
+ else:
+ data[ASSIGNMENT[key]] = int(value)
diff --git a/collectors/python.d.plugin/apache/apache.conf b/collectors/python.d.plugin/apache/apache.conf
index 8b606f7e0..84e12a57c 100644
--- a/collectors/python.d.plugin/apache/apache.conf
+++ b/collectors/python.d.plugin/apache/apache.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, apache also supports the following:
diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md
index c2d7d5787..8daa36604 100644
--- a/collectors/python.d.plugin/beanstalk/README.md
+++ b/collectors/python.d.plugin/beanstalk/README.md
@@ -101,3 +101,5 @@ port : 11300
If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbeanstalk%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
index 1472b4e1a..ed945a781 100644
--- a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
@@ -12,13 +12,18 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
from bases.loaders import safe_load
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-ORDER = ['cpu_usage', 'jobs_rate', 'connections_rate', 'commands_rate', 'current_tubes', 'current_jobs',
- 'current_connections', 'binlog', 'uptime']
+ORDER = [
+ 'cpu_usage',
+ 'jobs_rate',
+ 'connections_rate',
+ 'commands_rate',
+ 'current_tubes',
+ 'current_jobs',
+ 'current_connections',
+ 'binlog',
+ 'uptime',
+]
CHARTS = {
'cpu_usage': {
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.conf b/collectors/python.d.plugin/beanstalk/beanstalk.conf
index 3b11d9192..7586ad26b 100644
--- a/collectors/python.d.plugin/beanstalk/beanstalk.conf
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -68,7 +66,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
# chart_cleanup: 10 # the JOB's chart cleanup interval in iterations
#
diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md
index 688297ab3..fefe74931 100644
--- a/collectors/python.d.plugin/bind_rndc/README.md
+++ b/collectors/python.d.plugin/bind_rndc/README.md
@@ -58,3 +58,5 @@ local:
If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbind_rndc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
index 423232f65..7ac1bc3dc 100644
--- a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
@@ -11,11 +11,15 @@ from subprocess import Popen
from bases.collection import find_binary
from bases.FrameworkServices.SimpleService import SimpleService
-priority = 60000
-retries = 60
+
update_every = 30
-ORDER = ['name_server_statistics', 'incoming_queries', 'outgoing_queries', 'named_stats_size']
+ORDER = [
+ 'name_server_statistics',
+ 'incoming_queries',
+ 'outgoing_queries',
+ 'named_stats_size',
+]
CHARTS = {
'name_server_statistics': {
@@ -44,7 +48,7 @@ CHARTS = {
'lines': [
]},
'named_stats_size': {
- 'options': [None, 'Named Stats File Size', 'MB', 'file size', 'bind_rndc.stats_size', 'line'],
+ 'options': [None, 'Named Stats File Size', 'MiB', 'file size', 'bind_rndc.stats_size', 'line'],
'lines': [
['stats_size', None, 'absolute', 1, 1 << 20]
]
@@ -92,10 +96,20 @@ class Service(SimpleService):
self.definitions = CHARTS
self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
self.rndc = find_binary('rndc')
- self.data = dict(nms_requests=0, nms_responses=0, nms_failure=0, nms_auth=0,
- nms_non_auth=0, nms_nxrrset=0, nms_success=0, nms_nxdomain=0,
- nms_recursion=0, nms_duplicate=0, nms_rejected_queries=0,
- nms_dropped_queries=0)
+ self.data = dict(
+ nms_requests=0,
+ nms_responses=0,
+ nms_failure=0,
+ nms_auth=0,
+ nms_non_auth=0,
+ nms_nxrrset=0,
+ nms_success=0,
+ nms_nxdomain=0,
+ nms_recursion=0,
+ nms_duplicate=0,
+ nms_rejected_queries=0,
+ nms_dropped_queries=0,
+ )
def check(self):
if not self.rndc:
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.conf b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
index 71958ff98..3b7e9a216 100644
--- a/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, bind_rndc also supports the following:
diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md
index 595bcd3c0..0f0aa1c6e 100644
--- a/collectors/python.d.plugin/boinc/README.md
+++ b/collectors/python.d.plugin/boinc/README.md
@@ -26,3 +26,5 @@ remote:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fboinc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/boinc/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py
index d14754c4b..e10b28cea 100644
--- a/collectors/python.d.plugin/boinc/boinc.chart.py
+++ b/collectors/python.d.plugin/boinc/boinc.chart.py
@@ -10,7 +10,12 @@ from bases.FrameworkServices.SimpleService import SimpleService
from third_party import boinc_client
-ORDER = ['tasks', 'states', 'sched_states', 'process_states']
+ORDER = [
+ 'tasks',
+ 'states',
+ 'sched_states',
+ 'process_states',
+]
CHARTS = {
'tasks': {
@@ -141,14 +146,16 @@ class Service(SimpleService):
def _get_data(self):
if not self.is_alive():
return None
+
data = dict(_DATA_TEMPLATE)
- results = []
+
try:
results = self.client.get_tasks()
except socket.error:
self.error('Connection is dead')
self.alive = False
return None
+
for task in results:
data['total'] += 1
data[_TASK_MAP[task.state]] += 1
@@ -159,4 +166,5 @@ class Service(SimpleService):
data[_PROC_MAP[task.active_task_state]] += 1
except AttributeError:
pass
- return data
+
+ return data or None
diff --git a/collectors/python.d.plugin/boinc/boinc.conf b/collectors/python.d.plugin/boinc/boinc.conf
index e59d2509d..16edf55c4 100644
--- a/collectors/python.d.plugin/boinc/boinc.conf
+++ b/collectors/python.d.plugin/boinc/boinc.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, boinc also supports the following:
diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md
index 29dfe5d1d..1f067c61c 100644
--- a/collectors/python.d.plugin/ceph/README.md
+++ b/collectors/python.d.plugin/ceph/README.md
@@ -30,3 +30,5 @@ local:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fceph%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/ceph/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py
index 31c764d0f..45b52620f 100644
--- a/collectors/python.d.plugin/ceph/ceph.chart.py
+++ b/collectors/python.d.plugin/ceph/ceph.chart.py
@@ -9,14 +9,13 @@ try:
except ImportError:
CEPH = False
-import os
import json
+import os
+
from bases.FrameworkServices.SimpleService import SimpleService
# default module values (can be overridden per job in `config`)
update_every = 10
-priority = 60000
-retries = 60
ORDER = [
'general_usage',
@@ -37,7 +36,7 @@ ORDER = [
CHARTS = {
'general_usage': {
- 'options': [None, 'Ceph General Space', 'KB', 'general', 'ceph.general_usage', 'stacked'],
+ 'options': [None, 'Ceph General Space', 'KiB', 'general', 'ceph.general_usage', 'stacked'],
'lines': [
['general_available', 'avail', 'absolute'],
['general_usage', 'used', 'absolute']
@@ -50,7 +49,7 @@ CHARTS = {
]
},
'general_bytes': {
- 'options': [None, 'Ceph General Read/Write Data/s', 'KB', 'general', 'ceph.general_bytes',
+ 'options': [None, 'Ceph General Read/Write Data/s', 'KiB/s', 'general', 'ceph.general_bytes',
'area'],
'lines': [
['general_read_bytes', 'read', 'absolute', 1, 1024],
@@ -74,7 +73,7 @@ CHARTS = {
]
},
'pool_usage': {
- 'options': [None, 'Ceph Pools', 'KB', 'pool', 'ceph.pool_usage', 'line'],
+ 'options': [None, 'Ceph Pools', 'KiB', 'pool', 'ceph.pool_usage', 'line'],
'lines': []
},
'pool_objects': {
@@ -82,11 +81,11 @@ CHARTS = {
'lines': []
},
'pool_read_bytes': {
- 'options': [None, 'Ceph Read Pool Data/s', 'KB', 'pool', 'ceph.pool_read_bytes', 'area'],
+ 'options': [None, 'Ceph Read Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_read_bytes', 'area'],
'lines': []
},
'pool_write_bytes': {
- 'options': [None, 'Ceph Write Pool Data/s', 'KB', 'pool', 'ceph.pool_write_bytes', 'area'],
+ 'options': [None, 'Ceph Write Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_write_bytes', 'area'],
'lines': []
},
'pool_read_operations': {
@@ -98,7 +97,7 @@ CHARTS = {
'lines': []
},
'osd_usage': {
- 'options': [None, 'Ceph OSDs', 'KB', 'osd', 'ceph.osd_usage', 'line'],
+ 'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'],
'lines': []
},
'osd_apply_latency': {
@@ -320,7 +319,7 @@ class Service(SimpleService):
return json.loads(self.cluster.mon_command(json.dumps({
'prefix': 'osd df',
'format': 'json'
- }), '')[1])
+ }), '')[1].replace('-nan', '"-nan"'))
def _get_osd_perf(self):
"""
diff --git a/collectors/python.d.plugin/ceph/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf
index 78ac1e251..4caabbf6d 100644
--- a/collectors/python.d.plugin/ceph/ceph.conf
+++ b/collectors/python.d.plugin/ceph/ceph.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 10 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, ceph plugin also supports the following:
diff --git a/collectors/python.d.plugin/chrony/README.md b/collectors/python.d.plugin/chrony/README.md
index 30636fe77..67ed1a059 100644
--- a/collectors/python.d.plugin/chrony/README.md
+++ b/collectors/python.d.plugin/chrony/README.md
@@ -29,3 +29,5 @@ local:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fchrony%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/chrony/chrony.chart.py b/collectors/python.d.plugin/chrony/chrony.chart.py
index fd01d4e85..91f725001 100644
--- a/collectors/python.d.plugin/chrony/chrony.chart.py
+++ b/collectors/python.d.plugin/chrony/chrony.chart.py
@@ -7,11 +7,19 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
# default module values (can be overridden per job in `config`)
update_every = 5
-priority = 60000
-retries = 10
+
+CHRONY_COMMAND = 'chronyc -n tracking'
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew']
+ORDER = [
+ 'system',
+ 'offsets',
+ 'stratum',
+ 'root',
+ 'frequency',
+ 'residualfreq',
+ 'skew',
+]
CHARTS = {
'system': {
@@ -77,9 +85,9 @@ class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(
self, configuration=configuration, name=name)
- self.command = 'chronyc -n tracking'
self.order = ORDER
self.definitions = CHARTS
+ self.command = CHRONY_COMMAND
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/chrony/chrony.conf b/collectors/python.d.plugin/chrony/chrony.conf
index 9ac906b5f..fd95519b5 100644
--- a/collectors/python.d.plugin/chrony/chrony.conf
+++ b/collectors/python.d.plugin/chrony/chrony.conf
@@ -27,11 +27,9 @@ update_every: 5
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@ update_every: 5
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, chrony also supports the following:
diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md
index eff8c0810..2cc353edb 100644
--- a/collectors/python.d.plugin/couchdb/README.md
+++ b/collectors/python.d.plugin/couchdb/README.md
@@ -33,3 +33,5 @@ local:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcouchdb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/couchdb/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py
index 5d6b9916f..a58694d70 100644
--- a/collectors/python.d.plugin/couchdb/couchdb.chart.py
+++ b/collectors/python.d.plugin/couchdb/couchdb.chart.py
@@ -8,6 +8,7 @@ from collections import namedtuple, defaultdict
from json import loads
from threading import Thread
from socket import gethostbyname, gaierror
+
try:
from queue import Queue
except ImportError:
@@ -15,10 +16,9 @@ except ImportError:
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
+
update_every = 1
-priority = 60000
-retries = 60
+
METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
@@ -109,7 +109,7 @@ ORDER = [
CHARTS = {
'activity': {
- 'options': [None, 'Overall Activity', 'req/s',
+ 'options': [None, 'Overall Activity', 'requests/s',
'dbactivity', 'couchdb.activity', 'stacked'],
'lines': [
['couchdb_database_reads', 'DB reads', 'incremental'],
@@ -118,7 +118,7 @@ CHARTS = {
]
},
'request_methods': {
- 'options': [None, 'HTTP request methods', 'req/s',
+ 'options': [None, 'HTTP request methods', 'requests/s',
'httptraffic', 'couchdb.request_methods',
'stacked'],
'lines': [
@@ -133,7 +133,7 @@ CHARTS = {
]
},
'response_codes': {
- 'options': [None, 'HTTP response status codes', 'resp/s',
+ 'options': [None, 'HTTP response status codes', 'responses/s',
'httptraffic', 'couchdb.response_codes',
'stacked'],
'lines': [
@@ -151,15 +151,13 @@ CHARTS = {
]
},
'open_files': {
- 'options': [None, 'Open files', 'files',
- 'ops', 'couchdb.open_files', 'line'],
+ 'options': [None, 'Open files', 'files', 'ops', 'couchdb.open_files', 'line'],
'lines': [
['couchdb_open_os_files', '# files', 'absolute']
]
},
'active_tasks': {
- 'options': [None, 'Active task breakdown', 'tasks',
- 'ops', 'couchdb.active_tasks', 'stacked'],
+ 'options': [None, 'Active task breakdown', 'tasks', 'ops', 'couchdb.active_tasks', 'stacked'],
'lines': [
['activetasks_indexer', 'Indexer', 'absolute'],
['activetasks_database_compaction', 'DB Compaction', 'absolute'],
@@ -168,8 +166,7 @@ CHARTS = {
]
},
'replicator_jobs': {
- 'options': [None, 'Replicator job breakdown', 'jobs',
- 'ops', 'couchdb.replicator_jobs', 'stacked'],
+ 'options': [None, 'Replicator job breakdown', 'jobs', 'ops', 'couchdb.replicator_jobs', 'stacked'],
'lines': [
['couch_replicator_jobs_running', 'Running', 'absolute'],
['couch_replicator_jobs_pending', 'Pending', 'absolute'],
@@ -179,8 +176,7 @@ CHARTS = {
]
},
'erlang_memory': {
- 'options': [None, 'Erlang VM memory usage', 'bytes',
- 'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
+ 'options': [None, 'Erlang VM memory usage', 'B', 'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
'lines': [
['memory_atom', 'atom', 'absolute'],
['memory_binary', 'binaries', 'absolute'],
@@ -191,23 +187,20 @@ CHARTS = {
]
},
'erlang_reductions': {
- 'options': [None, 'Erlang reductions', 'count',
- 'erlang', 'couchdb.reductions', 'line'],
+ 'options': [None, 'Erlang reductions', 'count', 'erlang', 'couchdb.reductions', 'line'],
'lines': [
['reductions', 'reductions', 'incremental']
]
},
'erlang_proc_counts': {
- 'options': [None, 'Process counts', 'count',
- 'erlang', 'couchdb.proccounts', 'line'],
+ 'options': [None, 'Process counts', 'count', 'erlang', 'couchdb.proccounts', 'line'],
'lines': [
['os_proc_count', 'OS procs', 'absolute'],
['process_count', 'erl procs', 'absolute']
]
},
'erlang_peak_msg_queue': {
- 'options': [None, 'Peak message queue size', 'count',
- 'erlang', 'couchdb.peakmsgqueue',
+ 'options': [None, 'Peak message queue size', 'count', 'erlang', 'couchdb.peakmsgqueue',
'line'],
'lines': [
['peak_msg_queue', 'peak size', 'absolute']
@@ -215,18 +208,15 @@ CHARTS = {
},
# Lines for the following are added as part of check()
'db_sizes_file': {
- 'options': [None, 'Database sizes (file)', 'KB',
- 'perdbstats', 'couchdb.db_sizes_file', 'line'],
+ 'options': [None, 'Database sizes (file)', 'KiB', 'perdbstats', 'couchdb.db_sizes_file', 'line'],
'lines': []
},
'db_sizes_external': {
- 'options': [None, 'Database sizes (external)', 'KB',
- 'perdbstats', 'couchdb.db_sizes_external', 'line'],
+ 'options': [None, 'Database sizes (external)', 'KiB', 'perdbstats', 'couchdb.db_sizes_external', 'line'],
'lines': []
},
'db_sizes_active': {
- 'options': [None, 'Database sizes (active)', 'KB',
- 'perdbstats', 'couchdb.db_sizes_active', 'line'],
+ 'options': [None, 'Database sizes (active)', 'KiB', 'perdbstats', 'couchdb.db_sizes_active', 'line'],
'lines': []
},
'db_doc_counts': {
@@ -235,8 +225,7 @@ CHARTS = {
'lines': []
},
'db_doc_del_counts': {
- 'options': [None, 'Database # of deleted docs', 'docs',
- 'perdbstats', 'couchdb_db_doc_del_count', 'line'],
+ 'options': [None, 'Database # of deleted docs', 'docs', 'perdbstats', 'couchdb_db_doc_del_count', 'line'],
'lines': []
}
}
@@ -256,7 +245,7 @@ class Service(UrlService):
try:
self.dbs = self.configuration.get('databases').split(' ')
except (KeyError, AttributeError):
- self.dbs = []
+ self.dbs = list()
def check(self):
if not (self.host and self.port):
diff --git a/collectors/python.d.plugin/couchdb/couchdb.conf b/collectors/python.d.plugin/couchdb/couchdb.conf
index 5f6e75cff..9c68be777 100644
--- a/collectors/python.d.plugin/couchdb/couchdb.conf
+++ b/collectors/python.d.plugin/couchdb/couchdb.conf
@@ -28,11 +28,9 @@ update_every: 10
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -59,7 +57,7 @@ update_every: 10
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, the couchdb plugin also supports the following:
diff --git a/collectors/python.d.plugin/cpufreq/README.md b/collectors/python.d.plugin/cpufreq/README.md
index 33891d59d..f1fc1e8f2 100644
--- a/collectors/python.d.plugin/cpufreq/README.md
+++ b/collectors/python.d.plugin/cpufreq/README.md
@@ -1,5 +1,10 @@
# cpufreq
+> THIS MODULE IS OBSOLETE.
+> USE THE [PROC PLUGIN](../../proc.plugin) - IT IS MORE EFFICIENT
+
+---
+
This module shows the current CPU frequency as set by the cpufreq kernel
module.
@@ -28,3 +33,5 @@ If no configuration is given, module will search for cpufreq files in `/sys/devi
Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcpufreq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/cpufreq/cpufreq.conf b/collectors/python.d.plugin/cpufreq/cpufreq.conf
index 0890245d9..96c0884c6 100644
--- a/collectors/python.d.plugin/cpufreq/cpufreq.conf
+++ b/collectors/python.d.plugin/cpufreq/cpufreq.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
diff --git a/collectors/python.d.plugin/cpuidle/README.md b/collectors/python.d.plugin/cpuidle/README.md
index 495169638..bb6722a11 100644
--- a/collectors/python.d.plugin/cpuidle/README.md
+++ b/collectors/python.d.plugin/cpuidle/README.md
@@ -9,3 +9,5 @@ It produces one stacked chart per CPU, showing the percentage of time spent in
each state.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcpuidle%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/cpuidle/cpuidle.conf b/collectors/python.d.plugin/cpuidle/cpuidle.conf
index bc276fcd2..25f5fed64 100644
--- a/collectors/python.d.plugin/cpuidle/cpuidle.conf
+++ b/collectors/python.d.plugin/cpuidle/cpuidle.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md
index 3703e8aaf..73d70d3a2 100644
--- a/collectors/python.d.plugin/dns_query_time/README.md
+++ b/collectors/python.d.plugin/dns_query_time/README.md
@@ -8,3 +8,5 @@ This module provides DNS query time statistics.
It produces one aggregate chart or one chart per DNS server, showing the query time.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdns_query_time%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
index d3c3db788..4a5e0e108 100644
--- a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
@@ -28,10 +28,7 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
update_every = 5
-priority = 60000
-retries = 60
class Service(SimpleService):
@@ -46,14 +43,14 @@ class Service(SimpleService):
def check(self):
if not DNS_PYTHON:
- self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py')
+ self.error("'python-dnspython' package is needed to use dns_query_time.chart.py")
return False
self.timeout = self.timeout if isinstance(self.timeout, int) else 4
if not all([self.domains, self.server_list,
isinstance(self.server_list, str), isinstance(self.domains, str)]):
- self.error('server_list and domain_list can\'t be empty')
+ self.error("server_list and domain_list can't be empty")
return False
else:
self.domains, self.server_list = self.domains.split(), self.server_list.split()
@@ -129,17 +126,27 @@ def create_charts(aggregate, server_list):
}
}
for ns in server_list:
- definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'])
+ dim = [
+ '_'.join(['ns', ns.replace('.', '_')]),
+ ns,
+ 'absolute',
+ ]
+ definitions['dns_group']['lines'].append(dim)
return order, definitions
else:
order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
definitions = dict()
+
for ns in server_list:
definitions[''.join(['dns_', ns.replace('.', '_')])] = {
'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'],
'lines': [
- ['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute']
+ [
+ '_'.join(['ns', ns.replace('.', '_')]),
+ ns,
+ 'absolute',
+ ]
]
}
return order, definitions
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.conf b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
index d32c6db83..9c0838ee2 100644
--- a/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, dns_query_time also supports the following:
diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md
index b646ae27c..c7647a116 100644
--- a/collectors/python.d.plugin/dnsdist/README.md
+++ b/collectors/python.d.plugin/dnsdist/README.md
@@ -52,3 +52,5 @@ localhost:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdnsdist%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
index 1aff3f803..d60858659 100644
--- a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
@@ -90,9 +90,9 @@ CHARTS = {
]
},
'servermem': {
- 'options': [None, 'DNSDIST server memory utilization', 'MB', 'server', 'dnsdist.servermem', 'area'],
+ 'options': [None, 'DNSDIST server memory utilization', 'MiB', 'server', 'dnsdist.servermem', 'area'],
'lines': [
- ['real-memory-usage', 'memory usage', 'absolute', 1, 1048576]
+ ['real-memory-usage', 'memory usage', 'absolute', 1, 1 << 20]
]
},
'query_latency': {
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.conf b/collectors/python.d.plugin/dnsdist/dnsdist.conf
index aec58b8e1..324d65aaf 100644
--- a/collectors/python.d.plugin/dnsdist/dnsdist.conf
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-#retries: 600000
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
#
diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md
index d3f603808..b09a5d59f 100644
--- a/collectors/python.d.plugin/dockerd/README.md
+++ b/collectors/python.d.plugin/dockerd/README.md
@@ -3,7 +3,7 @@
Module monitor docker health metrics.
**Requirement:**
-* `docker` package
+* `docker` package, required version 3.2.0+
Following charts are drawn:
@@ -24,3 +24,5 @@ Following charts are drawn:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdockerd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/dockerd/dockerd.chart.py b/collectors/python.d.plugin/dockerd/dockerd.chart.py
index a0d3d7e65..8bd45df9e 100644
--- a/collectors/python.d.plugin/dockerd/dockerd.chart.py
+++ b/collectors/python.d.plugin/dockerd/dockerd.chart.py
@@ -10,10 +10,8 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
-# update_every = 1
-priority = 60000
-retries = 60
+from distutils.version import StrictVersion
+
# charts order (can be overridden if you want less charts, or different order)
ORDER = [
@@ -24,21 +22,21 @@ ORDER = [
CHARTS = {
'running_containers': {
- 'options': [None, 'Number of running containers', 'running containers', 'running containers',
+ 'options': [None, 'Number of running containers', 'containers', 'running containers',
'docker.running_containers', 'line'],
'lines': [
['running_containers', 'running']
]
},
'healthy_containers': {
- 'options': [None, 'Number of healthy containers', 'healthy containers', 'healthy containers',
+ 'options': [None, 'Number of healthy containers', 'containers', 'healthy containers',
'docker.healthy_containers', 'line'],
'lines': [
['healthy_containers', 'healthy']
]
},
'unhealthy_containers': {
- 'options': [None, 'Number of unhealthy containers', 'unhealthy containers', 'unhealthy containers',
+ 'options': [None, 'Number of unhealthy containers', 'containers', 'unhealthy containers',
'docker.unhealthy_containers', 'line'],
'lines': [
['unhealthy_containers', 'unhealthy']
@@ -47,15 +45,26 @@ CHARTS = {
}
+MIN_REQUIRED_VERSION = '3.2.0'
+
+
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
+ self.client = None
def check(self):
if not HAS_DOCKER:
- self.error('\'docker\' package is needed to use docker.chart.py')
+ self.error("'docker' package is needed to use dockerd module")
+ return False
+
+ if StrictVersion(docker.__version__) < StrictVersion(MIN_REQUIRED_VERSION):
+ self.error("installed 'docker' package version {0}, minimum required version {1}, please upgrade".format(
+ docker.__version__,
+ MIN_REQUIRED_VERSION,
+ ))
return False
self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock'))
@@ -70,6 +79,7 @@ class Service(SimpleService):
def get_data(self):
data = dict()
+
data['running_containers'] = len(self.client.containers.list(sparse=True))
data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True))
data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True))
diff --git a/collectors/python.d.plugin/dockerd/dockerd.conf b/collectors/python.d.plugin/dockerd/dockerd.conf
index 5ef17a1f5..96c8ee0d8 100644
--- a/collectors/python.d.plugin/dockerd/dockerd.conf
+++ b/collectors/python.d.plugin/dockerd/dockerd.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 10 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, dockerd plugin also supports the following:
diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md
index 50950ecc1..de8788b36 100644
--- a/collectors/python.d.plugin/dovecot/README.md
+++ b/collectors/python.d.plugin/dovecot/README.md
@@ -1,9 +1,13 @@
# dovecot
This module provides statistics information from Dovecot server.
+
Statistics are taken from dovecot socket by executing `EXPORT global` command.
More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics)
+Module isn't compatible with new statistic api (v2.3), but you are still able to use the module with Dovecot v2.3
+by following [upgrading steps.](https://wiki2.dovecot.org/Upgrading/2.3).
+
**Requirement:**
Dovecot UNIX socket with R/W permissions for user netdata or Dovecot with configured TCP/IP socket.
@@ -71,3 +75,5 @@ localsocket:
If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdovecot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/dovecot/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py
index 7fee3bfac..be1fa53d5 100644
--- a/collectors/python.d.plugin/dovecot/dovecot.chart.py
+++ b/collectors/python.d.plugin/dovecot/dovecot.chart.py
@@ -5,12 +5,10 @@
from bases.FrameworkServices.SocketService import SocketService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-# charts order (can be overridden if you want less charts, or different order)
+UNIX_SOCKET = '/var/run/dovecot/stats'
+
+
ORDER = [
'sessions',
'logins',
@@ -53,14 +51,14 @@ CHARTS = {
]
},
'context_switches': {
- 'options': [None, 'Dovecot Context Switches', '', 'context switches', 'dovecot.context_switches', 'line'],
+ 'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches', 'line'],
'lines': [
['vol_cs', 'voluntary', 'absolute'],
['invol_cs', 'involuntary', 'absolute']
]
},
'io': {
- 'options': [None, 'Dovecot Disk I/O', 'kilobytes/s', 'disk', 'dovecot.io', 'area'],
+ 'options': [None, 'Dovecot Disk I/O', 'KiB/s', 'disk', 'dovecot.io', 'area'],
'lines': [
['disk_input', 'read', 'incremental', 1, 1024],
['disk_output', 'write', 'incremental', -1, 1024]
@@ -69,8 +67,8 @@ CHARTS = {
'net': {
'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'],
'lines': [
- ['read_bytes', 'read', 'incremental', 8, 1024],
- ['write_bytes', 'write', 'incremental', -8, 1024]
+ ['read_bytes', 'read', 'incremental', 8, 1000],
+ ['write_bytes', 'write', 'incremental', -8, 1000]
]
},
'syscalls': {
@@ -113,13 +111,12 @@ CHARTS = {
class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
- self.request = 'EXPORT\tglobal\r\n'
- self.host = None # localhost
- self.port = None # 24242
- # self._keep_alive = True
- self.unix_socket = '/var/run/dovecot/stats'
self.order = ORDER
self.definitions = CHARTS
+ self.host = None # localhost
+ self.port = None # 24242
+ self.unix_socket = UNIX_SOCKET
+ self.request = 'EXPORT\tglobal\r\n'
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/dovecot/dovecot.conf b/collectors/python.d.plugin/dovecot/dovecot.conf
index 56c394991..451dbc9ac 100644
--- a/collectors/python.d.plugin/dovecot/dovecot.conf
+++ b/collectors/python.d.plugin/dovecot/dovecot.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, dovecot also supports the following:
@@ -94,3 +92,7 @@ localsocket:
name : 'local'
socket : '/var/run/dovecot/stats'
+localsocket_old:
+ name : 'local'
+ socket : '/var/run/dovecot/old-stats'
+
diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md
index 7ce6c0b74..6d25b02d1 100644
--- a/collectors/python.d.plugin/elasticsearch/README.md
+++ b/collectors/python.d.plugin/elasticsearch/README.md
@@ -58,3 +58,5 @@ local:
If no configuration is given, module will fail to run.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Felasticsearch%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
index 3f431f6e0..f1ea03fe8 100644
--- a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
@@ -159,17 +159,20 @@ ORDER = [
'fielddata_evictions_tripped',
'cluster_health_status',
'cluster_health_nodes',
+ 'cluster_health_pending_tasks',
+ 'cluster_health_flight_fetch',
'cluster_health_shards',
'cluster_stats_nodes',
'cluster_stats_query_cache',
'cluster_stats_docs',
'cluster_stats_store',
- 'cluster_stats_indices_shards',
+ 'cluster_stats_indices',
+ 'cluster_stats_shards_total',
]
CHARTS = {
'search_performance_total': {
- 'options': [None, 'Queries And Fetches', 'number of', 'search performance',
+ 'options': [None, 'Queries And Fetches', 'events/s', 'search performance',
'elastic.search_performance_total', 'stacked'],
'lines': [
['indices_search_query_total', 'queries', 'incremental'],
@@ -177,7 +180,7 @@ CHARTS = {
]
},
'search_performance_current': {
- 'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance',
+ 'options': [None, 'Queries and Fetches In Progress', 'events', 'search performance',
'elastic.search_performance_current', 'stacked'],
'lines': [
['indices_search_query_current', 'queries', 'absolute'],
@@ -193,14 +196,14 @@ CHARTS = {
]
},
'search_latency': {
- 'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'elastic.search_latency', 'stacked'],
+ 'options': [None, 'Query And Fetch Latency', 'milliseconds', 'search performance', 'elastic.search_latency', 'stacked'],
'lines': [
['query_latency', 'query', 'absolute', 1, 1000],
['fetch_latency', 'fetch', 'absolute', 1, 1000]
]
},
'index_performance_total': {
- 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of',
+ 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'events/s',
'indexing performance', 'elastic.index_performance_total', 'stacked'],
'lines': [
['indices_indexing_index_total', 'indexed', 'incremental'],
@@ -225,7 +228,7 @@ CHARTS = {
]
},
'index_latency': {
- 'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance',
+ 'options': [None, 'Indexing And Flushing Latency', 'milliseconds', 'indexing performance',
'elastic.index_latency', 'stacked'],
'lines': [
['indexing_latency', 'indexing', 'absolute', 1, 1000],
@@ -233,7 +236,7 @@ CHARTS = {
]
},
'index_translog_operations': {
- 'options': [None, 'Translog Operations', 'count', 'translog',
+ 'options': [None, 'Translog Operations', 'operations', 'translog',
'elastic.index_translog_operations', 'area'],
'lines': [
['indices_translog_operations', 'total', 'absolute'],
@@ -241,7 +244,7 @@ CHARTS = {
]
},
'index_translog_size': {
- 'options': [None, 'Translog Size', 'MB', 'translog',
+ 'options': [None, 'Translog Size', 'MiB', 'translog',
'elastic.index_translog_size', 'area'],
'lines': [
['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567],
@@ -249,21 +252,21 @@ CHARTS = {
]
},
'index_segments_count': {
- 'options': [None, 'Total Number Of Indices Segments', 'count', 'indices segments',
+ 'options': [None, 'Total Number Of Indices Segments', 'segments', 'indices segments',
'elastic.index_segments_count', 'line'],
'lines': [
['indices_segments_count', 'segments', 'absolute']
]
},
'index_segments_memory_writer': {
- 'options': [None, 'Index Writer Memory Usage', 'MB', 'indices segments',
+ 'options': [None, 'Index Writer Memory Usage', 'MiB', 'indices segments',
'elastic.index_segments_memory_writer', 'area'],
'lines': [
['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567]
]
},
'index_segments_memory': {
- 'options': [None, 'Indices Segments Memory Usage', 'MB', 'indices segments',
+ 'options': [None, 'Indices Segments Memory Usage', 'MiB', 'indices segments',
'elastic.index_segments_memory', 'stacked'],
'lines': [
['indices_segments_terms_memory_in_bytes', 'terms', 'absolute', 1, 1048567],
@@ -277,14 +280,14 @@ CHARTS = {
]
},
'jvm_mem_heap': {
- 'options': [None, 'JVM Heap Percentage Currently in Use', 'percent', 'memory usage and gc',
+ 'options': [None, 'JVM Heap Percentage Currently in Use', 'percentage', 'memory usage and gc',
'elastic.jvm_heap', 'area'],
'lines': [
['jvm_mem_heap_used_percent', 'inuse', 'absolute']
]
},
'jvm_mem_heap_bytes': {
- 'options': [None, 'JVM Heap Commit And Usage', 'MB', 'memory usage and gc',
+ 'options': [None, 'JVM Heap Commit And Usage', 'MiB', 'memory usage and gc',
'elastic.jvm_heap_bytes', 'area'],
'lines': [
['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576],
@@ -292,7 +295,7 @@ CHARTS = {
]
},
'jvm_buffer_pool_count': {
- 'options': [None, 'JVM Buffers', 'count', 'memory usage and gc',
+ 'options': [None, 'JVM Buffers', 'pools', 'memory usage and gc',
'elastic.jvm_buffer_pool_count', 'line'],
'lines': [
['jvm_buffer_pools_direct_count', 'direct', 'absolute'],
@@ -300,7 +303,7 @@ CHARTS = {
]
},
'jvm_direct_buffers_memory': {
- 'options': [None, 'JVM Direct Buffers Memory', 'MB', 'memory usage and gc',
+ 'options': [None, 'JVM Direct Buffers Memory', 'MiB', 'memory usage and gc',
'elastic.jvm_direct_buffers_memory', 'area'],
'lines': [
['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567],
@@ -308,7 +311,7 @@ CHARTS = {
]
},
'jvm_mapped_buffers_memory': {
- 'options': [None, 'JVM Mapped Buffers Memory', 'MB', 'memory usage and gc',
+ 'options': [None, 'JVM Mapped Buffers Memory', 'MiB', 'memory usage and gc',
'elastic.jvm_mapped_buffers_memory', 'area'],
'lines': [
['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567],
@@ -316,14 +319,14 @@ CHARTS = {
]
},
'jvm_gc_count': {
- 'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
+ 'options': [None, 'Garbage Collections', 'events/s', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
'lines': [
['jvm_gc_collectors_young_collection_count', 'young', 'incremental'],
['jvm_gc_collectors_old_collection_count', 'old', 'incremental']
]
},
'jvm_gc_time': {
- 'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc',
+ 'options': [None, 'Time Spent On Garbage Collections', 'milliseconds', 'memory usage and gc',
'elastic.gc_time', 'stacked'],
'lines': [
['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'],
@@ -353,13 +356,13 @@ CHARTS = {
]
},
'fielddata_cache': {
- 'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
+ 'options': [None, 'Fielddata Cache', 'MiB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
'lines': [
['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576]
]
},
'fielddata_evictions_tripped': {
- 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events',
+ 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'events/s',
'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'],
'lines': [
['indices_fielddata_evictions', 'evictions', 'incremental'],
@@ -367,12 +370,24 @@ CHARTS = {
]
},
'cluster_health_nodes': {
- 'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API',
+ 'options': [None, 'Nodes Statistics', 'nodes', 'cluster health API',
'elastic.cluster_health_nodes', 'stacked'],
'lines': [
['number_of_nodes', 'nodes', 'absolute'],
['number_of_data_nodes', 'data_nodes', 'absolute'],
+ ]
+ },
+ 'cluster_health_pending_tasks': {
+ 'options': [None, 'Tasks Statistics', 'tasks', 'cluster health API',
+ 'elastic.cluster_health_pending_tasks', 'line'],
+ 'lines': [
['number_of_pending_tasks', 'pending_tasks', 'absolute'],
+ ]
+ },
+ 'cluster_health_flight_fetch': {
+ 'options': [None, 'In Flight Fetches Statistics', 'fetches', 'cluster health API',
+ 'elastic.cluster_health_flight_fetch', 'line'],
+ 'lines': [
['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
]
},
@@ -420,24 +435,30 @@ CHARTS = {
]
},
'cluster_stats_docs': {
- 'options': [None, 'Docs Statistics', 'count', 'cluster stats API',
+ 'options': [None, 'Docs Statistics', 'docs', 'cluster stats API',
'elastic.cluster_docs', 'line'],
'lines': [
['indices_docs_count', 'docs', 'absolute']
]
},
'cluster_stats_store': {
- 'options': [None, 'Store Statistics', 'MB', 'cluster stats API',
+ 'options': [None, 'Store Statistics', 'MiB', 'cluster stats API',
'elastic.cluster_store', 'line'],
'lines': [
['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567]
]
},
- 'cluster_stats_indices_shards': {
- 'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API',
- 'elastic.cluster_indices_shards', 'stacked'],
+ 'cluster_stats_indices': {
+ 'options': [None, 'Indices Statistics', 'indices', 'cluster stats API',
+ 'elastic.cluster_indices', 'line'],
'lines': [
['indices_count', 'indices', 'absolute'],
+ ]
+ },
+ 'cluster_stats_shards_total': {
+ 'options': [None, 'Total Shards Statistics', 'shards', 'cluster stats API',
+ 'elastic.cluster_shards_total', 'line'],
+ 'lines': [
['indices_shards_total', 'shards', 'absolute']
]
},
@@ -450,7 +471,7 @@ CHARTS = {
]
},
'host_metrics_file_descriptors': {
- 'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics',
+ 'options': [None, 'Available File Descriptors In Percent', 'percentage', 'host metrics',
'elastic.host_descriptors', 'area'],
'lines': [
['file_descriptors_used', 'used', 'absolute', 1, 10]
@@ -473,9 +494,11 @@ class Service(UrlService):
self.definitions = CHARTS
self.host = self.configuration.get('host')
self.port = self.configuration.get('port', 9200)
- self.url = '{scheme}://{host}:{port}'.format(scheme=self.configuration.get('scheme', 'http'),
- host=self.host,
- port=self.port)
+ self.url = '{scheme}://{host}:{port}'.format(
+ scheme=self.configuration.get('scheme', 'http'),
+ host=self.host,
+ port=self.port,
+ )
self.latency = dict()
self.methods = list()
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.conf b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
index 213843bf9..e5c97e7ef 100644
--- a/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, elasticsearch plugin also supports the following:
diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md
index f9f314ac4..d65f8cf90 100644
--- a/collectors/python.d.plugin/example/README.md
+++ b/collectors/python.d.plugin/example/README.md
@@ -1 +1,5 @@
-An example python data collection module. \ No newline at end of file
+# example
+
+An example python data collection module.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexample%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/example/example.chart.py b/collectors/python.d.plugin/example/example.chart.py
index 85defa4d1..cc8c18759 100644
--- a/collectors/python.d.plugin/example/example.chart.py
+++ b/collectors/python.d.plugin/example/example.chart.py
@@ -7,12 +7,13 @@ from random import SystemRandom
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values
-# update_every = 4
+
priority = 90000
-retries = 60
-ORDER = ['random']
+ORDER = [
+ 'random',
+]
+
CHARTS = {
'random': {
'options': [None, 'A random number', 'random number', 'random', 'random', 'line'],
diff --git a/collectors/python.d.plugin/example/example.conf b/collectors/python.d.plugin/example/example.conf
index e7fed9b50..3d8435173 100644
--- a/collectors/python.d.plugin/example/example.conf
+++ b/collectors/python.d.plugin/example/example.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, example also supports the following:
diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md
index b9a62cad9..1cebb27ff 100644
--- a/collectors/python.d.plugin/exim/README.md
+++ b/collectors/python.d.plugin/exim/README.md
@@ -11,3 +11,5 @@ It produces only one chart:
Configuration is not needed.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/exim/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py
index 5431dd46b..68b7b5cfb 100644
--- a/collectors/python.d.plugin/exim/exim.chart.py
+++ b/collectors/python.d.plugin/exim/exim.chart.py
@@ -5,13 +5,12 @@
from bases.FrameworkServices.ExecutableService import ExecutableService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['qemails']
+EXIM_COMMAND = 'exim -bpc'
+
+ORDER = [
+ 'qemails',
+]
CHARTS = {
'qemails': {
@@ -26,9 +25,9 @@ CHARTS = {
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(self, configuration=configuration, name=name)
- self.command = 'exim -bpc'
self.order = ORDER
self.definitions = CHARTS
+ self.command = EXIM_COMMAND
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/exim/exim.conf b/collectors/python.d.plugin/exim/exim.conf
index 2add7b2cb..3b7e65922 100644
--- a/collectors/python.d.plugin/exim/exim.conf
+++ b/collectors/python.d.plugin/exim/exim.conf
@@ -28,11 +28,9 @@ update_every: 10
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -59,7 +57,7 @@ update_every: 10
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, exim also supports the following:
diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md
index 2ab021965..26511986a 100644
--- a/collectors/python.d.plugin/fail2ban/README.md
+++ b/collectors/python.d.plugin/fail2ban/README.md
@@ -21,3 +21,5 @@ If no configuration is given, module will attempt to read log file at `/var/log/
If conf file is not found default jail is `ssh`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffail2ban%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
index 954689008..dfd2feab7 100644
--- a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
@@ -35,8 +35,19 @@ def charts(jails):
},
}
for jail in jails:
- ch[ORDER[0]]['lines'].append([jail, jail, 'incremental'])
- ch[ORDER[1]]['lines'].append(['{0}_in_jail'.format(jail), jail, 'absolute'])
+ dim = [
+ jail,
+ jail,
+ 'incremental',
+ ]
+ ch[ORDER[0]]['lines'].append(dim)
+
+ dim = [
+ '{0}_in_jail'.format(jail),
+ jail,
+ 'absolute',
+ ]
+ ch[ORDER[1]]['lines'].append(dim)
return ch
@@ -46,7 +57,8 @@ RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= (true|false)')
# Example:
# 2018-09-12 11:45:53,715 fail2ban.actions[25029]: WARNING [ssh] Unban 195.201.88.33
# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Ban 217.59.246.27
-RE_DATA = re.compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>Unban|Ban) (?P<ip>[a-f0-9.:]+)')
+# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Restore Ban 217.59.246.27
+RE_DATA = re.compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>Unban|Ban|Restore Ban) (?P<ip>[a-f0-9.:]+)')
DEFAULT_JAILS = [
'ssh',
@@ -58,12 +70,10 @@ class Service(LogService):
LogService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = dict()
-
self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
self.exclude = self.configuration.get('exclude', str())
-
self.monitoring_jails = list()
self.banned_ips = defaultdict(set)
self.data = dict()
@@ -116,7 +126,7 @@ class Service(LogService):
jail, action, ip = match['jail'], match['action'], match['ip']
- if action == 'Ban':
+ if action == 'Ban' or action == 'Restore Ban':
self.data[jail] += 1
if ip not in self.banned_ips[jail]:
self.banned_ips[jail].add(ip)
@@ -126,7 +136,7 @@ class Service(LogService):
self.banned_ips[jail].remove(ip)
self.data['{0}_in_jail'.format(jail)] -= 1
- return self.data
+ return self.data
def get_files_from_dir(self, dir_path, suffix):
"""
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.conf b/collectors/python.d.plugin/fail2ban/fail2ban.conf
index 60ca87231..a36436b51 100644
--- a/collectors/python.d.plugin/fail2ban/fail2ban.conf
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, fail2ban also supports the following:
diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md
index e5fe88ec3..00eb50dff 100644
--- a/collectors/python.d.plugin/freeradius/README.md
+++ b/collectors/python.d.plugin/freeradius/README.md
@@ -68,3 +68,5 @@ To do this, create a link from the sites-enabled directory to the status file in
and restart/reload your FREERADIUS server.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffreeradius%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/freeradius/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py
index 3126831b7..8563660cc 100644
--- a/collectors/python.d.plugin/freeradius/freeradius.chart.py
+++ b/collectors/python.d.plugin/freeradius/freeradius.chart.py
@@ -3,25 +3,37 @@
# Author: l2isbad
# SPDX-License-Identifier: GPL-3.0-or-later
-from re import findall
+import re
from subprocess import Popen, PIPE
from bases.collection import find_binary
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
-priority = 60000
-retries = 60
update_every = 15
+PARSER = re.compile(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)')
+
RADIUS_MSG = 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept'
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['authentication', 'accounting', 'proxy-auth', 'proxy-acct']
+RADCLIENT_RETRIES = 1
+RADCLIENT_TIMEOUT = 1
+
+DEFAULT_HOST = 'localhost'
+DEFAULT_PORT = 18121
+DEFAULT_DO_ACCT = False
+DEFAULT_DO_PROXY_AUTH = False
+DEFAULT_DO_PROXY_ACCT = False
+
+ORDER = [
+ 'authentication',
+ 'accounting',
+ 'proxy-auth',
+ 'proxy-acct',
+]
CHARTS = {
'authentication': {
- 'options': [None, 'Authentication', 'packets/s', 'Authentication', 'freerad.auth', 'line'],
+ 'options': [None, 'Authentication', 'packets/s', 'authentication', 'freerad.auth', 'line'],
'lines': [
['access-accepts', None, 'incremental'],
['access-rejects', None, 'incremental'],
@@ -33,7 +45,7 @@ CHARTS = {
]
},
'accounting': {
- 'options': [None, 'Accounting', 'packets/s', 'Accounting', 'freerad.acct', 'line'],
+ 'options': [None, 'Accounting', 'packets/s', 'accounting', 'freerad.acct', 'line'],
'lines': [
['accounting-requests', 'requests', 'incremental'],
['accounting-responses', 'responses', 'incremental'],
@@ -45,7 +57,7 @@ CHARTS = {
]
},
'proxy-auth': {
- 'options': [None, 'Proxy Authentication', 'packets/s', 'Authentication', 'freerad.proxy.auth', 'line'],
+ 'options': [None, 'Proxy Authentication', 'packets/s', 'authentication', 'freerad.proxy.auth', 'line'],
'lines': [
['proxy-access-accepts', 'access-accepts', 'incremental'],
['proxy-access-rejects', 'access-rejects', 'incremental'],
@@ -57,7 +69,7 @@ CHARTS = {
]
},
'proxy-acct': {
- 'options': [None, 'Proxy Accounting', 'packets/s', 'Accounting', 'freerad.proxy.acct', 'line'],
+ 'options': [None, 'Proxy Accounting', 'packets/s', 'accounting', 'freerad.proxy.acct', 'line'],
'lines': [
['proxy-accounting-requests', 'requests', 'incremental'],
['proxy-accounting-responses', 'responses', 'incremental'],
@@ -71,46 +83,80 @@ CHARTS = {
}
+def radclient_status(radclient, retries, timeout, host, port, secret):
+ # radclient -r 1 -t 1 -x 127.0.0.1:18121 status secret
+
+ return '{radclient} -r {num_retries} -t {timeout} -x {host}:{port} status {secret}'.format(
+ radclient=radclient,
+ num_retries=retries,
+ timeout=timeout,
+ host=host,
+ port=port,
+ secret=secret,
+ ).split()
+
+
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
self.definitions = CHARTS
- self.host = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', '18121')
+ self.host = self.configuration.get('host', DEFAULT_HOST)
+ self.port = self.configuration.get('port', DEFAULT_PORT)
self.secret = self.configuration.get('secret')
- self.acct = self.configuration.get('acct', False)
- self.proxy_auth = self.configuration.get('proxy_auth', False)
- self.proxy_acct = self.configuration.get('proxy_acct', False)
- chart_choice = [True, bool(self.acct), bool(self.proxy_auth), bool(self.proxy_acct)]
- self.order = [chart for chart, choice in zip(ORDER, chart_choice) if choice]
+ self.do_acct = self.configuration.get('acct', DEFAULT_DO_ACCT)
+ self.do_proxy_auth = self.configuration.get('proxy_auth', DEFAULT_DO_PROXY_AUTH)
+ self.do_proxy_acct = self.configuration.get('proxy_acct', DEFAULT_DO_PROXY_ACCT)
self.echo = find_binary('echo')
self.radclient = find_binary('radclient')
self.sub_echo = [self.echo, RADIUS_MSG]
- self.sub_radclient = [self.radclient, '-r', '1', '-t', '1', '-x',
- ':'.join([self.host, self.port]), 'status', self.secret]
+ self.sub_radclient = radclient_status(
+ self.radclient, RADCLIENT_RETRIES, RADCLIENT_TIMEOUT, self.host, self.port, self.secret,
+ )
def check(self):
- if not all([self.echo, self.radclient]):
- self.error('Can\'t locate "radclient" binary or binary is not executable by netdata')
+ if not self.radclient:
+ self.error("Can't locate 'radclient' binary or binary is not executable by netdata user")
return False
+
+ if not self.echo:
+ self.error("Can't locate 'echo' binary or binary is not executable by netdata user")
+ return None
+
if not self.secret:
- self.error('"secret" not set')
+ self.error("'secret' isn't set")
return None
- if self._get_raw_data():
- return True
- self.error('Request returned no data. Is server alive?')
- return False
+ if not self.get_raw_data():
+ self.error('Request returned no data. Is server alive?')
+ return False
- def _get_data(self):
+ if not self.do_acct:
+ self.order.remove('accounting')
+
+ if not self.do_proxy_auth:
+ self.order.remove('proxy-auth')
+
+ if not self.do_proxy_acct:
+ self.order.remove('proxy-acct')
+
+ return True
+
+ def get_data(self):
"""
Format data received from shell command
:return: dict
"""
- result = self._get_raw_data()
- return dict([(elem[0].lower(), int(elem[1])) for elem in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)])
+ result = self.get_raw_data()
- def _get_raw_data(self):
+ if not result:
+ return None
+
+ return dict(
+ (key.lower(), value) for key, value in PARSER.findall(result)
+ )
+
+ def get_raw_data(self):
"""
The following code is equivalent to
'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept"
@@ -124,6 +170,8 @@ class Service(SimpleService):
raw_result = process_rad.communicate()[0]
except OSError:
return None
+
if process_rad.returncode is 0:
return raw_result.decode()
+
return None
diff --git a/collectors/python.d.plugin/freeradius/freeradius.conf b/collectors/python.d.plugin/freeradius/freeradius.conf
index 3336d4c49..74b273776 100644
--- a/collectors/python.d.plugin/freeradius/freeradius.conf
+++ b/collectors/python.d.plugin/freeradius/freeradius.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, freeradius also supports the following:
diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md
index e3356e1f1..3942a7be8 100644
--- a/collectors/python.d.plugin/go_expvar/README.md
+++ b/collectors/python.d.plugin/go_expvar/README.md
@@ -169,7 +169,6 @@ and its base `UrlService` class. These are:
update_every: 1 # the job's data collection frequency
priority: 60000 # the job's order on the dashboard
- retries: 60 # the job's number of restoration attempts
user: admin # use when the expvar endpoint is protected by HTTP Basic Auth
password: sekret # use when the expvar endpoint is protected by HTTP Basic Auth
@@ -274,3 +273,5 @@ The images below show how do the final charts in netdata look.
![Custom charts](https://cloud.githubusercontent.com/assets/15180106/26762051/62ae915e-493b-11e7-8518-bd25a3886650.png)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fgo_expvar%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
index 76e8b72ec..e82a87761 100644
--- a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
@@ -6,17 +6,24 @@
from __future__ import division
import json
+from collections import namedtuple
+
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
+MEMSTATS_ORDER = [
+ 'memstats_heap',
+ 'memstats_stack',
+ 'memstats_mspan',
+ 'memstats_mcache',
+ 'memstats_sys',
+ 'memstats_live_objects',
+ 'memstats_gc_pauses',
+]
MEMSTATS_CHARTS = {
'memstats_heap': {
- 'options': ['heap', 'memory: size of heap memory structures', 'kB', 'memstats',
+ 'options': ['heap', 'memory: size of heap memory structures', 'KiB', 'memstats',
'expvar.memstats.heap', 'line'],
'lines': [
['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
@@ -24,21 +31,21 @@ MEMSTATS_CHARTS = {
]
},
'memstats_stack': {
- 'options': ['stack', 'memory: size of stack memory structures', 'kB', 'memstats',
+ 'options': ['stack', 'memory: size of stack memory structures', 'KiB', 'memstats',
'expvar.memstats.stack', 'line'],
'lines': [
['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
]
},
'memstats_mspan': {
- 'options': ['mspan', 'memory: size of mspan memory structures', 'kB', 'memstats',
+ 'options': ['mspan', 'memory: size of mspan memory structures', 'KiB', 'memstats',
'expvar.memstats.mspan', 'line'],
'lines': [
['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
]
},
'memstats_mcache': {
- 'options': ['mcache', 'memory: size of mcache memory structures', 'kB', 'memstats',
+ 'options': ['mcache', 'memory: size of mcache memory structures', 'KiB', 'memstats',
'expvar.memstats.mcache', 'line'],
'lines': [
['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
@@ -52,7 +59,7 @@ MEMSTATS_CHARTS = {
]
},
'memstats_sys': {
- 'options': ['sys', 'memory: size of reserved virtual address space', 'kB', 'memstats',
+ 'options': ['sys', 'memory: size of reserved virtual address space', 'KiB', 'memstats',
'expvar.memstats.sys', 'line'],
'lines': [
['memstats_sys', 'sys', 'absolute', 1, 1024]
@@ -67,8 +74,14 @@ MEMSTATS_CHARTS = {
}
}
-MEMSTATS_ORDER = ['memstats_heap', 'memstats_stack', 'memstats_mspan', 'memstats_mcache',
- 'memstats_sys', 'memstats_live_objects', 'memstats_gc_pauses']
+EXPVAR = namedtuple(
+ "EXPVAR",
+ [
+ "key",
+ "type",
+ "id",
+ ]
+)
def flatten(d, top='', sep='.'):
@@ -85,7 +98,6 @@ def flatten(d, top='', sep='.'):
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
-
# if memstats collection is enabled, add the charts and their order
if self.configuration.get('collect_memstats'):
self.definitions = dict(MEMSTATS_CHARTS)
@@ -118,7 +130,7 @@ class Service(UrlService):
def _parse_extra_charts_config(self, extra_charts_config):
# a place to store the expvar keys and their types
- self.expvars = dict()
+ self.expvars = list()
for chart in extra_charts_config:
@@ -156,11 +168,8 @@ class Service(UrlService):
self.info('Unsupported expvar_type "{0}". Must be "int" or "float"'.format(ev_type))
continue
- if ev_key in self.expvars:
- self.info('Duplicate expvar key {0}: skipping line.'.format(ev_key))
- continue
-
- self.expvars[ev_key] = (ev_type, line_id)
+ # self.expvars[ev_key] = (ev_type, line_id)
+ self.expvars.append(EXPVAR(ev_key, ev_type, line_id))
chart_dict['lines'].append(
[
@@ -197,21 +206,21 @@ class Service(UrlService):
# the rest of the data, thus avoiding needless iterating over the multiply nested memstats dict.
del (data['memstats'])
flattened = flatten(data)
- for k, v in flattened.items():
- ev = self.expvars.get(k)
- if not ev:
- # expvar is not defined in config, skip it
+
+ for ev in self.expvars:
+ v = flattened.get(ev.key)
+
+ if v is None:
continue
+
try:
- key_type, line_id = ev
- if key_type == 'int':
- expvars[line_id] = int(v)
- elif key_type == 'float':
- # if the value type is float, multiply it by 1000 and set line divisor to 1000
- expvars[line_id] = float(v) * 100
+ if ev.type == 'int':
+ expvars[ev.id] = int(v)
+ elif ev.type == 'float':
+ expvars[ev.id] = float(v) * 100
except ValueError:
- self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(k, key_type))
- del self.expvars[k]
+ self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(ev.key, ev.type))
+ return None
return expvars
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.conf b/collectors/python.d.plugin/go_expvar/go_expvar.conf
index af89158aa..4b821cde9 100644
--- a/collectors/python.d.plugin/go_expvar/go_expvar.conf
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -53,7 +51,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, this plugin also supports the following:
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
index 4bff25670..4bd80a23d 100644
--- a/collectors/python.d.plugin/haproxy/README.md
+++ b/collectors/python.d.plugin/haproxy/README.md
@@ -47,3 +47,5 @@ via_socket:
If no configuration is given, module will fail to run.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhaproxy%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
index a46689f50..d97d28d2b 100644
--- a/collectors/python.d.plugin/haproxy/haproxy.chart.py
+++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py
@@ -14,12 +14,6 @@ except ImportError:
from bases.FrameworkServices.SocketService import SocketService
from bases.FrameworkServices.UrlService import UrlService
-
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
# charts order (can be overridden if you want less charts, or different order)
ORDER = [
'fbin',
@@ -56,11 +50,11 @@ ORDER = [
CHARTS = {
'fbin': {
- 'options': [None, 'Kilobytes In', 'KB/s', 'frontend', 'haproxy_f.bin', 'line'],
+ 'options': [None, 'Kilobytes In', 'KiB/s', 'frontend', 'haproxy_f.bin', 'line'],
'lines': []
},
'fbout': {
- 'options': [None, 'Kilobytes Out', 'KB/s', 'frontend', 'haproxy_f.bout', 'line'],
+ 'options': [None, 'Kilobytes Out', 'KiB/s', 'frontend', 'haproxy_f.bout', 'line'],
'lines': []
},
'fscur': {
@@ -101,11 +95,11 @@ CHARTS = {
'lines': []
},
'bbin': {
- 'options': [None, 'Kilobytes In', 'KB/s', 'backend', 'haproxy_b.bin', 'line'],
+ 'options': [None, 'Kilobytes In', 'KiB/s', 'backend', 'haproxy_b.bin', 'line'],
'lines': []
},
'bbout': {
- 'options': [None, 'Kilobytes Out', 'KB/s', 'backend', 'haproxy_b.bout', 'line'],
+ 'options': [None, 'Kilobytes Out', 'KiB/s', 'backend', 'haproxy_b.bout', 'line'],
'lines': []
},
'bscur': {
@@ -146,41 +140,39 @@ CHARTS = {
'lines': []
},
'bqtime': {
- 'options': [None, 'The average queue time over the 1024 last requests', 'ms', 'backend',
+ 'options': [None, 'The average queue time over the 1024 last requests', 'milliseconds', 'backend',
'haproxy_b.qtime', 'line'],
'lines': []
},
'bctime': {
- 'options': [None, 'The average connect time over the 1024 last requests', 'ms', 'backend',
+ 'options': [None, 'The average connect time over the 1024 last requests', 'milliseconds', 'backend',
'haproxy_b.ctime', 'line'],
'lines': []
},
'brtime': {
- 'options': [None, 'The average response time over the 1024 last requests', 'ms', 'backend',
+ 'options': [None, 'The average response time over the 1024 last requests', 'milliseconds', 'backend',
'haproxy_b.rtime', 'line'],
'lines': []
},
'bttime': {
- 'options': [None, 'The average total session time over the 1024 last requests', 'ms', 'backend',
+ 'options': [None, 'The average total session time over the 1024 last requests', 'milliseconds', 'backend',
'haproxy_b.ttime', 'line'],
'lines': []
},
'health_sdown': {
- 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health',
- 'haproxy_hs.down', 'line'],
+ 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health', 'haproxy_hs.down', 'line'],
'lines': []
},
'health_sup': {
- 'options': [None, 'Backend Servers In UP State', 'health servers', 'health',
- 'haproxy_hs.up', 'line'],
+ 'options': [None, 'Backend Servers In UP State', 'health servers', 'health', 'haproxy_hs.up', 'line'],
'lines': []
},
'health_bdown': {
- 'options': [None, 'Is Backend Alive? 1 = DOWN', 'failed backend', 'health', 'haproxy_hb.down', 'line'],
+ 'options': [None, 'Is Backend Failed?', 'boolean', 'health', 'haproxy_hb.down', 'line'],
'lines': []
},
'health_idle': {
- 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percent', 'health', 'haproxy.idle', 'line'],
+ 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percentage', 'health', 'haproxy.idle', 'line'],
'lines': [
['idle', None, 'absolute']
]
@@ -214,6 +206,7 @@ REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
+# TODO: the code is unreadable
class Service(UrlService, SocketService):
def __init__(self, configuration=None, name=None):
if 'socket' in configuration:
diff --git a/collectors/python.d.plugin/haproxy/haproxy.conf b/collectors/python.d.plugin/haproxy/haproxy.conf
index a40dd76a5..10a0df3c3 100644
--- a/collectors/python.d.plugin/haproxy/haproxy.conf
+++ b/collectors/python.d.plugin/haproxy/haproxy.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, haproxy also supports the following:
diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md
index 1236186a5..d9f254d51 100644
--- a/collectors/python.d.plugin/hddtemp/README.md
+++ b/collectors/python.d.plugin/hddtemp/README.md
@@ -20,3 +20,5 @@ port: 7634
If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhddtemp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
index dea701171..810aaacc9 100644
--- a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
@@ -12,7 +12,9 @@ from copy import deepcopy
from bases.FrameworkServices.SocketService import SocketService
-ORDER = ['temperatures']
+ORDER = [
+ 'temperatures',
+]
CHARTS = {
'temperatures': {
@@ -39,11 +41,11 @@ class Service(SocketService):
SocketService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = deepcopy(CHARTS)
+ self.do_only = self.configuration.get('devices')
self._keep_alive = False
self.request = ""
self.host = "127.0.0.1"
self.port = 7634
- self.do_only = self.configuration.get('devices')
def get_disks(self):
r = self._get_raw_data()
@@ -89,8 +91,7 @@ class Service(SocketService):
return False
for d in disks:
- n = d.id if d.id.startswith('sd') else d.name
- dim = [d.id, n]
+ dim = [d.id]
self.definitions['temperatures']['lines'].append(dim)
return True
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.conf b/collectors/python.d.plugin/hddtemp/hddtemp.conf
index 9165798a2..b2d7aef63 100644
--- a/collectors/python.d.plugin/hddtemp/hddtemp.conf
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, hddtemp also supports the following:
diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md
index 759107663..4cd024d12 100644
--- a/collectors/python.d.plugin/httpcheck/README.md
+++ b/collectors/python.d.plugin/httpcheck/README.md
@@ -39,3 +39,5 @@ server:
response time is low and should be used as reference only.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhttpcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
index f046f33c0..fd51370da 100644
--- a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
@@ -16,7 +16,6 @@ from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
update_every = 3
priority = 60000
-retries = 60
# Response
HTTP_RESPONSE_TIME = 'time'
@@ -29,11 +28,15 @@ HTTP_BAD_STATUS = 'bad_status'
HTTP_TIMEOUT = 'timeout'
HTTP_NO_CONNECTION = 'no_connection'
-ORDER = ['response_time', 'response_length', 'status']
+ORDER = [
+ 'response_time',
+ 'response_length',
+ 'status',
+]
CHARTS = {
'response_time': {
- 'options': [None, 'HTTP response time', 'ms', 'response', 'httpcheck.responsetime', 'line'],
+ 'options': [None, 'HTTP response time', 'milliseconds', 'response', 'httpcheck.responsetime', 'line'],
'lines': [
[HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000]
]
@@ -60,12 +63,12 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
pattern = self.configuration.get('regex')
self.regex = re.compile(pattern) if pattern else None
self.status_codes_accepted = self.configuration.get('status_accepted', [200])
self.follow_redirect = self.configuration.get('redirect', True)
- self.order = ORDER
- self.definitions = CHARTS
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.conf b/collectors/python.d.plugin/httpcheck/httpcheck.conf
index bd21b5af8..1e1dd0205 100644
--- a/collectors/python.d.plugin/httpcheck/httpcheck.conf
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.conf
@@ -27,6 +27,10 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
# chart_cleanup sets the default chart cleanup interval in iterations.
# A chart is marked as obsolete if it has not been updated
# 'chart_cleanup' iterations in a row.
@@ -61,7 +65,7 @@ chart_cleanup: 0
# # JOBs sharing a name are mutually exclusive
# update_every: 3 # [optional] the JOB's data collection frequency
# priority: 60000 # [optional] the JOB's order on the dashboard
-# retries: 60 # [optional] the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# timeout: 1 # [optional] the timeout when connecting, supports decimals (e.g. 0.5s)
# url: 'http[s]://host-ip-or-dns[:port][path]'
# # [required] the remote host url to connect to. If [:port] is missing, it defaults to 80
diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md
index a28a6c398..068da6a06 100644
--- a/collectors/python.d.plugin/icecast/README.md
+++ b/collectors/python.d.plugin/icecast/README.md
@@ -24,3 +24,5 @@ remote:
Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ficecast%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/icecast/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py
index d8813f9ba..40eaf89b9 100644
--- a/collectors/python.d.plugin/icecast/icecast.chart.py
+++ b/collectors/python.d.plugin/icecast/icecast.chart.py
@@ -8,11 +8,9 @@ import json
from bases.FrameworkServices.UrlService import UrlService
-priority = 60000
-retries = 60
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['listeners']
+ORDER = [
+ 'listeners',
+]
CHARTS = {
'listeners': {
diff --git a/collectors/python.d.plugin/icecast/icecast.conf b/collectors/python.d.plugin/icecast/icecast.conf
index a900d06d3..a33074aef 100644
--- a/collectors/python.d.plugin/icecast/icecast.conf
+++ b/collectors/python.d.plugin/icecast/icecast.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, icecast also supports the following:
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
index a30649a5f..a83920370 100644
--- a/collectors/python.d.plugin/ipfs/README.md
+++ b/collectors/python.d.plugin/ipfs/README.md
@@ -23,3 +23,5 @@ localhost:
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fipfs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/ipfs/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py
index 3f6794e48..8c89b4be1 100644
--- a/collectors/python.d.plugin/ipfs/ipfs.chart.py
+++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py
@@ -7,25 +7,17 @@ import json
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'url': 'http://localhost:5001'
-# }}
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['bandwidth', 'peers', 'repo_size', 'repo_objects']
+ORDER = [
+ 'bandwidth',
+ 'peers',
+ 'repo_size',
+ 'repo_objects',
+]
CHARTS = {
'bandwidth': {
- 'options': [None, 'IPFS Bandwidth', 'kbits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
+ 'options': [None, 'IPFS Bandwidth', 'kilobits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
'lines': [
['in', None, 'absolute', 8, 1000],
['out', None, 'absolute', -8, 1000]
@@ -38,10 +30,10 @@ CHARTS = {
]
},
'repo_size': {
- 'options': [None, 'IPFS Repo Size', 'GB', 'Size', 'ipfs.repo_size', 'area'],
+ 'options': [None, 'IPFS Repo Size', 'GiB', 'Size', 'ipfs.repo_size', 'area'],
'lines': [
- ['avail', None, 'absolute', 1, 1e9],
- ['size', None, 'absolute', 1, 1e9],
+ ['avail', None, 'absolute', 1, 1 << 30],
+ ['size', None, 'absolute', 1, 1 << 30],
]
},
'repo_objects': {
@@ -69,11 +61,11 @@ SI_zeroes = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.baseurl = self.configuration.get('url', 'http://localhost:5001')
self.order = ORDER
self.definitions = CHARTS
- self.__storage_max = None
+ self.baseurl = self.configuration.get('url', 'http://localhost:5001')
self.do_pinapi = self.configuration.get('pinapi')
+ self.__storage_max = None
def _get_json(self, sub_url):
"""
@@ -135,6 +127,6 @@ class Service(UrlService):
for new_key, orig_key, xmute in cfg[suburl]:
try:
r[new_key] = xmute(in_json[orig_key])
- except Exception:
- continue
+ except Exception as error:
+ self.debug(error)
return r or None
diff --git a/collectors/python.d.plugin/ipfs/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf
index e3df0f6bb..c7e186487 100644
--- a/collectors/python.d.plugin/ipfs/ipfs.conf
+++ b/collectors/python.d.plugin/ipfs/ipfs.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, ipfs also supports the following:
diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md
index 334d86e33..67547e2f6 100644
--- a/collectors/python.d.plugin/isc_dhcpd/README.md
+++ b/collectors/python.d.plugin/isc_dhcpd/README.md
@@ -32,3 +32,5 @@ In case of python2 you need to install `py2-ipaddress` to make plugin work.
The module will not work If no configuration is given.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fisc_dhcpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
index a9f274949..bbe7a9369 100644
--- a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
@@ -19,14 +19,16 @@ from copy import deepcopy
from bases.FrameworkServices.SimpleService import SimpleService
-priority = 60000
-retries = 60
-ORDER = ['pools_utilization', 'pools_active_leases', 'leases_total']
+ORDER = [
+ 'pools_utilization',
+ 'pools_active_leases',
+ 'leases_total',
+]
CHARTS = {
'pools_utilization': {
- 'options': [None, 'Pools Utilization', '%', 'utilization', 'isc_dhcpd.utilization', 'line'],
+ 'options': [None, 'Pools Utilization', 'percentage', 'utilization', 'isc_dhcpd.utilization', 'line'],
'lines': []
},
'pools_active_leases': {
@@ -120,7 +122,6 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = deepcopy(CHARTS)
-
lease_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
self.dhcpd_leases = DhcpdLeasesFile(path=lease_path)
self.pools = list()
@@ -131,7 +132,7 @@ class Service(SimpleService):
def check(self):
if not HAVE_IP_ADDRESS:
- self.error("'python-ipaddress' module is needed")
+ self.error("'python-ipaddress' package is needed")
return False
if not self.dhcpd_leases.is_valid():
@@ -190,6 +191,17 @@ class Service(SimpleService):
def create_charts(self):
for pool in self.pools:
- self.definitions['pools_utilization']['lines'].append([pool.id + '_utilization', pool.name,
- 'absolute', 1, 100])
- self.definitions['pools_active_leases']['lines'].append([pool.id + '_active_leases', pool.name])
+ dim = [
+ pool.id + '_utilization',
+ pool.name,
+ 'absolute',
+ 1,
+ 100,
+ ]
+ self.definitions['pools_utilization']['lines'].append(dim)
+
+ dim = [
+ pool.id + '_active_leases',
+ pool.name,
+ ]
+ self.definitions['pools_active_leases']['lines'].append(dim)
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
index 4a4c4a5e3..8dcb5082f 100644
--- a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, isc_dhcpd supports the following:
diff --git a/collectors/python.d.plugin/linux_power_supply/README.md b/collectors/python.d.plugin/linux_power_supply/README.md
index 5cfbe41ce..f5b05d199 100644
--- a/collectors/python.d.plugin/linux_power_supply/README.md
+++ b/collectors/python.d.plugin/linux_power_supply/README.md
@@ -1,4 +1,9 @@
-# linux\_power\_supply
+# Linux power supply
+
+> THIS MODULE IS OBSOLETE.
+> USE THE [PROC PLUGIN](../../proc.plugin) - IT IS MORE EFFICIENT
+
+---
This module monitors variosu metrics reported by power supply drivers
on Linux. This allows tracking and alerting on things like remaining
@@ -65,3 +70,5 @@ the corresponding `min` or `empty`, which will then always read as zero.
This way, alerts which match on these will still work.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flinux_power_supply%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
index 3cb610f7f..96eeef44f 100644
--- a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
+++ b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_everye
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# In addition to the above parameters, linux_power_supply also supports
diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md
index d1482f33c..88b672533 100644
--- a/collectors/python.d.plugin/litespeed/README.md
+++ b/collectors/python.d.plugin/litespeed/README.md
@@ -45,3 +45,5 @@ local:
If no configuration is given, module will use "/tmp/lshttpd/".
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flitespeed%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/litespeed/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py
index efdc6869c..9da94213e 100644
--- a/collectors/python.d.plugin/litespeed/litespeed.chart.py
+++ b/collectors/python.d.plugin/litespeed/litespeed.chart.py
@@ -16,11 +16,15 @@ update_every = 10
# charts order (can be overridden if you want less charts, or different order)
ORDER = [
- 'net_throughput_http', 'net_throughput_https', # net throughput
- 'connections_http', 'connections_https', # connections
- 'requests', 'requests_processing', # requests
- 'pub_cache_hits', 'private_cache_hits', # cache
- 'static_hits' # static
+ 'net_throughput_http', # net throughput
+ 'net_throughput_https', # net throughput
+ 'connections_http', # connections
+ 'connections_https', # connections
+ 'requests', # requests
+ 'requests_processing', # requests
+ 'pub_cache_hits', # cache
+ 'private_cache_hits', # cache
+ 'static_hits', # static
]
CHARTS = {
diff --git a/collectors/python.d.plugin/litespeed/litespeed.conf b/collectors/python.d.plugin/litespeed/litespeed.conf
index 17d0f690e..a326e184e 100644
--- a/collectors/python.d.plugin/litespeed/litespeed.conf
+++ b/collectors/python.d.plugin/litespeed/litespeed.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, lightspeed also supports the following:
diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md
index 8f8670d4a..c35630c8f 100644
--- a/collectors/python.d.plugin/logind/README.md
+++ b/collectors/python.d.plugin/logind/README.md
@@ -52,3 +52,5 @@ is currently disabled by default, and needs to be explicitly enabled in
`/etc/netdata/python.d.conf` before it will run.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flogind%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/logind/logind.chart.py b/collectors/python.d.plugin/logind/logind.chart.py
index bfc486c7f..708668649 100644
--- a/collectors/python.d.plugin/logind/logind.chart.py
+++ b/collectors/python.d.plugin/logind/logind.chart.py
@@ -8,7 +8,13 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
priority = 59999
disabled_by_default = True
-ORDER = ['sessions', 'users', 'seats']
+LOGINCTL_COMMAND = 'loginctl list-sessions --no-legend'
+
+ORDER = [
+ 'sessions',
+ 'users',
+ 'seats',
+]
CHARTS = {
'sessions': {
@@ -39,9 +45,9 @@ CHARTS = {
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(self, configuration=configuration, name=name)
- self.command = 'loginctl list-sessions --no-legend'
self.order = ORDER
self.definitions = CHARTS
+ self.command = LOGINCTL_COMMAND
def _get_data(self):
ret = {
diff --git a/collectors/python.d.plugin/logind/logind.conf b/collectors/python.d.plugin/logind/logind.conf
index 0623493de..01a859d21 100644
--- a/collectors/python.d.plugin/logind/logind.conf
+++ b/collectors/python.d.plugin/logind/logind.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,5 +56,5 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
diff --git a/collectors/python.d.plugin/mdstat/README.md b/collectors/python.d.plugin/mdstat/README.md
index 1ff8f7dab..f88346eec 100644
--- a/collectors/python.d.plugin/mdstat/README.md
+++ b/collectors/python.d.plugin/mdstat/README.md
@@ -1,5 +1,10 @@
# mdstat
+> THIS MODULE IS OBSOLETE.
+> USE THE [PROC PLUGIN](../../proc.plugin) - IT IS MORE EFFICIENT
+
+---
+
Module monitor /proc/mdstat
It produces:
@@ -24,3 +29,5 @@ It produces:
No configuration is needed.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmdstat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/mdstat/mdstat.conf b/collectors/python.d.plugin/mdstat/mdstat.conf
index 66a2f153c..c72b63835 100644
--- a/collectors/python.d.plugin/mdstat/mdstat.conf
+++ b/collectors/python.d.plugin/mdstat/mdstat.conf
@@ -19,11 +19,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md
index d288a6353..e96015ddb 100644
--- a/collectors/python.d.plugin/megacli/README.md
+++ b/collectors/python.d.plugin/megacli/README.md
@@ -46,3 +46,5 @@ do_battery: yes
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmegacli%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py
index 41a1079f6..e1a05e416 100644
--- a/collectors/python.d.plugin/megacli/megacli.chart.py
+++ b/collectors/python.d.plugin/megacli/megacli.chart.py
@@ -66,7 +66,7 @@ def battery_charts(bats):
charts.update(
{
'bbu_{0}_relative_charge'.format(b.id): {
- 'options': [None, 'Relative State of Charge', '%', 'battery',
+ 'options': [None, 'Relative State of Charge', 'percentage', 'battery',
'megacli.bbu_relative_charge', 'line'],
'lines': [
['bbu_{0}_relative_charge'.format(b.id), 'adapter {0}'.format(b.id)],
@@ -180,8 +180,8 @@ class Service(ExecutableService):
ExecutableService.__init__(self, configuration=configuration, name=name)
self.order = list()
self.definitions = dict()
- self.megacli = Megacli()
self.do_battery = self.configuration.get('do_battery')
+ self.megacli = Megacli()
def check_sudo(self):
err = self._get_raw_data(command=self.megacli.sudo_check, stderr=True)
diff --git a/collectors/python.d.plugin/megacli/megacli.conf b/collectors/python.d.plugin/megacli/megacli.conf
index 73afb2f7f..1af4292d9 100644
--- a/collectors/python.d.plugin/megacli/megacli.conf
+++ b/collectors/python.d.plugin/megacli/megacli.conf
@@ -19,11 +19,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -50,7 +48,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, megacli also supports the following:
diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md
index 3521c109d..98627c4a3 100644
--- a/collectors/python.d.plugin/memcached/README.md
+++ b/collectors/python.d.plugin/memcached/README.md
@@ -67,3 +67,5 @@ localtcpip:
If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmemcached%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/memcached/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py
index 3c310ec69..9803dbb09 100644
--- a/collectors/python.d.plugin/memcached/memcached.chart.py
+++ b/collectors/python.d.plugin/memcached/memcached.chart.py
@@ -5,37 +5,37 @@
from bases.FrameworkServices.SocketService import SocketService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'host': 'localhost',
-# 'port': 11211,
-# 'unix_socket': None
-# }}
-
-ORDER = ['cache', 'net', 'connections', 'items', 'evicted_reclaimed',
- 'get', 'get_rate', 'set_rate', 'cas', 'delete', 'increment', 'decrement', 'touch', 'touch_rate']
+
+ORDER = [
+ 'cache',
+ 'net',
+ 'connections',
+ 'items',
+ 'evicted_reclaimed',
+ 'get',
+ 'get_rate',
+ 'set_rate',
+ 'cas',
+ 'delete',
+ 'increment',
+ 'decrement',
+ 'touch',
+ 'touch_rate',
+]
CHARTS = {
'cache': {
- 'options': [None, 'Cache Size', 'megabytes', 'cache', 'memcached.cache', 'stacked'],
+ 'options': [None, 'Cache Size', 'MiB', 'cache', 'memcached.cache', 'stacked'],
'lines': [
- ['avail', 'available', 'absolute', 1, 1048576],
- ['used', 'used', 'absolute', 1, 1048576]
+ ['avail', 'available', 'absolute', 1, 1 << 20],
+ ['used', 'used', 'absolute', 1, 1 << 20]
]
},
'net': {
'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
'lines': [
- ['bytes_read', 'in', 'incremental', 8, 1024],
- ['bytes_written', 'out', 'incremental', -8, 1024]
+ ['bytes_read', 'in', 'incremental', 8, 1000],
+ ['bytes_written', 'out', 'incremental', -8, 1000],
]
},
'connections': {
@@ -127,13 +127,13 @@ CHARTS = {
class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
self.request = 'stats\r\n'
self.host = 'localhost'
self.port = 11211
self._keep_alive = True
self.unix_socket = None
- self.order = ORDER
- self.definitions = CHARTS
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/memcached/memcached.conf b/collectors/python.d.plugin/memcached/memcached.conf
index 85c3daf65..3286b4623 100644
--- a/collectors/python.d.plugin/memcached/memcached.conf
+++ b/collectors/python.d.plugin/memcached/memcached.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, memcached also supports the following:
diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md
index 8e5f652c5..ac8930dd2 100644
--- a/collectors/python.d.plugin/mongodb/README.md
+++ b/collectors/python.d.plugin/mongodb/README.md
@@ -121,6 +121,33 @@ Number of charts depends on mongodb version, storage engine and other features (
26. **Replication set member heartbeat latency**
* member (time when last heartbeat was received from replica set member)
+### prerequisite
+Create a read-only user for the netdata in the admin database.
+
+1. Authenticate as the admin user.
+
+```
+use admin
+db.auth("admin", "<MONGODB_ADMIN_PASSWORD>")
+```
+
+2. Create a user.
+
+```
+# MongoDB 2.x.
+db.addUser("netdata", "<UNIQUE_PASSWORD>", true)
+
+# MongoDB 3.x or higher.
+db.createUser({
+ "user":"netdata",
+ "pwd": "<UNIQUE_PASSWORD>",
+ "roles" : [
+ {role: 'read', db: 'admin' },
+ {role: 'clusterMonitor', db: 'admin'},
+ {role: 'read', db: 'local' }
+ ]
+})
+```
### configuration
@@ -139,3 +166,5 @@ local:
If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmongodb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/mongodb/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py
index 10344342d..92740ff86 100644
--- a/collectors/python.d.plugin/mongodb/mongodb.chart.py
+++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py
@@ -16,10 +16,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
REPL_SET_STATES = [
('1', 'primary'),
@@ -209,21 +205,21 @@ CHARTS = {
]
},
'journaling_volume': {
- 'options': [None, 'Volume of data written to the journal', 'MB', 'database performance',
+ 'options': [None, 'Volume of data written to the journal', 'MiB', 'database performance',
'mongodb.journaling_volume', 'line'],
'lines': [
['journaledMB', 'volume', 'absolute', 1, 100]
]
},
'background_flush_average': {
- 'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance',
+ 'options': [None, 'Average time taken by flushes to execute', 'milliseconds', 'database performance',
'mongodb.background_flush_average', 'line'],
'lines': [
['average_ms', 'time', 'absolute', 1, 100]
]
},
'background_flush_last': {
- 'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance',
+ 'options': [None, 'Time taken by the last flush operation to execute', 'milliseconds', 'database performance',
'mongodb.background_flush_last', 'line'],
'lines': [
['last_ms', 'time', 'absolute', 1, 100]
@@ -269,7 +265,7 @@ CHARTS = {
]
},
'memory': {
- 'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'],
+ 'options': [None, 'Memory metrics', 'MiB', 'resource utilization', 'mongodb.memory', 'stacked'],
'lines': [
['virtual', None, 'absolute', 1, 1],
['resident', None, 'absolute', 1, 1],
@@ -313,7 +309,7 @@ CHARTS = {
},
'wiredtiger_cache': {
'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
- 'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
+ 'percentage', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
'lines': [
['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
@@ -333,14 +329,14 @@ CHARTS = {
'lines': []
},
'tcmalloc_generic': {
- 'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
+ 'options': [None, 'Tcmalloc generic metrics', 'MiB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
'lines': [
- ['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576],
- ['heap_size', 'heap_size', 'absolute', 1, 1048576]
+ ['current_allocated_bytes', 'allocated', 'absolute', 1, 1 << 20],
+ ['heap_size', 'heap_size', 'absolute', 1, 1 << 20]
]
},
'tcmalloc_metrics': {
- 'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
+ 'options': [None, 'Tcmalloc metrics', 'KiB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
'lines': [
['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024],
['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024],
diff --git a/collectors/python.d.plugin/mongodb/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf
index 62faef68d..f69acac79 100644
--- a/collectors/python.d.plugin/mongodb/mongodb.conf
+++ b/collectors/python.d.plugin/mongodb/mongodb.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, mongodb also supports the following:
diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md
index 6d10240c9..0f69aff29 100644
--- a/collectors/python.d.plugin/monit/README.md
+++ b/collectors/python.d.plugin/monit/README.md
@@ -31,3 +31,5 @@ local:
If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmonit%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/monit/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py
index 51943c0e1..3ac0032c5 100644
--- a/collectors/python.d.plugin/monit/monit.chart.py
+++ b/collectors/python.d.plugin/monit/monit.chart.py
@@ -6,13 +6,20 @@
import xml.etree.ElementTree as ET
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
# see enum State_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h)
-MONIT_SERVICE_NAMES = ['Filesystem', 'Directory', 'File', 'Process', 'Host', 'System', 'Fifo', 'Program', 'Net']
+MONIT_SERVICE_NAMES = [
+ 'Filesystem',
+ 'Directory',
+ 'File',
+ 'Process',
+ 'Host',
+ 'System',
+ 'Fifo',
+ 'Program',
+ 'Net',
+]
+
DEFAULT_SERVICES_IDS = [0, 1, 2, 3, 4, 6, 7, 8]
# charts order (can be overridden if you want less charts, or different order)
@@ -90,10 +97,10 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- base_url = self.configuration.get('url', 'http://localhost:2812')
- self.url = '{0}/_status?format=xml&level=full'.format(base_url)
self.order = ORDER
self.definitions = CHARTS
+ base_url = self.configuration.get('url', 'http://localhost:2812')
+ self.url = '{0}/_status?format=xml&level=full'.format(base_url)
def parse(self, data):
try:
@@ -105,15 +112,19 @@ class Service(UrlService):
def check(self):
self._manager = self._build_manager()
+
raw_data = self._get_raw_data()
if not raw_data:
return None
+
return bool(self.parse(raw_data))
def _get_data(self):
raw_data = self._get_raw_data()
+
if not raw_data:
return None
+
xml = self.parse(raw_data)
if not xml:
return None
@@ -121,6 +132,7 @@ class Service(UrlService):
data = {}
for service_id in DEFAULT_SERVICES_IDS:
service_category = MONIT_SERVICE_NAMES[service_id].lower()
+
if service_category == 'system':
self.debug("Skipping service from 'System' category, because it's useless in graphs")
continue
diff --git a/collectors/python.d.plugin/monit/monit.conf b/collectors/python.d.plugin/monit/monit.conf
index f9c26dbc3..9a3fb6938 100644
--- a/collectors/python.d.plugin/monit/monit.conf
+++ b/collectors/python.d.plugin/monit/monit.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, this plugin also supports the following:
diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md
index e38098e7e..498493a3f 100644
--- a/collectors/python.d.plugin/mysql/README.md
+++ b/collectors/python.d.plugin/mysql/README.md
@@ -65,7 +65,6 @@ Here is an example for 3 servers:
```yaml
update_every : 10
priority : 90100
-retries : 5
local:
'my.cnf' : '/etc/mysql/my.cnf'
@@ -82,9 +81,10 @@ remote:
pass : 'bla'
host : 'example.org'
port : 9000
- retries : 20
```
If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/mysql/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py
index c4d1e8b3a..20d32f81b 100644
--- a/collectors/python.d.plugin/mysql/mysql.chart.py
+++ b/collectors/python.d.plugin/mysql/mysql.chart.py
@@ -6,10 +6,6 @@
from bases.FrameworkServices.MySQLService import MySQLService
-# default module values (can be overridden per job in `config`)
-# update_every = 3
-priority = 60000
-retries = 60
# query executed on MySQL server
QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
@@ -172,16 +168,19 @@ ORDER = [
'binlog_cache',
'binlog_stmt_cache',
'threads',
+ 'threads_creation_rate',
'thread_cache_misses',
'innodb_io',
'innodb_io_ops',
'innodb_io_pending_ops',
'innodb_log',
'innodb_os_log',
+ 'innodb_os_log_fsync_writes',
'innodb_os_log_io',
'innodb_cur_row_lock',
'innodb_rows',
'innodb_buffer_pool_pages',
+ 'innodb_buffer_pool_flush_pages_requests',
'innodb_buffer_pool_bytes',
'innodb_buffer_pool_read_ahead',
'innodb_buffer_pool_reqs',
@@ -206,14 +205,14 @@ ORDER = [
CHARTS = {
'net': {
- 'options': [None, 'mysql Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'],
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'],
'lines': [
- ['Bytes_received', 'in', 'incremental', 8, 1024],
- ['Bytes_sent', 'out', 'incremental', -8, 1024]
+ ['Bytes_received', 'in', 'incremental', 8, 1000],
+ ['Bytes_sent', 'out', 'incremental', -8, 1000]
]
},
'queries': {
- 'options': [None, 'mysql Queries', 'queries/s', 'queries', 'mysql.queries', 'line'],
+ 'options': [None, 'Queries', 'queries/s', 'queries', 'mysql.queries', 'line'],
'lines': [
['Queries', 'queries', 'incremental'],
['Questions', 'questions', 'incremental'],
@@ -221,7 +220,7 @@ CHARTS = {
]
},
'queries_type': {
- 'options': [None, 'mysql Query type', 'queries/s', 'query_types', 'mysql.queries_type', 'stacked'],
+ 'options': [None, 'Query Type', 'queries/s', 'query_types', 'mysql.queries_type', 'stacked'],
'lines': [
['Com_select', 'select', 'incremental'],
['Com_delete', 'delete', 'incremental'],
@@ -232,7 +231,7 @@ CHARTS = {
]
},
'handlers': {
- 'options': [None, 'mysql Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'],
+ 'options': [None, 'Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'],
'lines': [
['Handler_commit', 'commit', 'incremental'],
['Handler_delete', 'delete', 'incremental'],
@@ -251,14 +250,14 @@ CHARTS = {
]
},
'table_locks': {
- 'options': [None, 'mysql Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'],
+ 'options': [None, 'Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'],
'lines': [
['Table_locks_immediate', 'immediate', 'incremental'],
['Table_locks_waited', 'waited', 'incremental', -1, 1]
]
},
'join_issues': {
- 'options': [None, 'mysql Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'],
+ 'options': [None, 'Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'],
'lines': [
['Select_full_join', 'full_join', 'incremental'],
['Select_full_range_join', 'full_range_join', 'incremental'],
@@ -268,7 +267,7 @@ CHARTS = {
]
},
'sort_issues': {
- 'options': [None, 'mysql Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'],
+ 'options': [None, 'Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'],
'lines': [
['Sort_merge_passes', 'merge_passes', 'incremental'],
['Sort_range', 'range', 'incremental'],
@@ -276,7 +275,7 @@ CHARTS = {
]
},
'tmp': {
- 'options': [None, 'mysql Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'],
+ 'options': [None, 'Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'],
'lines': [
['Created_tmp_disk_tables', 'disk_tables', 'incremental'],
['Created_tmp_files', 'files', 'incremental'],
@@ -284,14 +283,14 @@ CHARTS = {
]
},
'connections': {
- 'options': [None, 'mysql Connections', 'connections/s', 'connections', 'mysql.connections', 'line'],
+ 'options': [None, 'Connections', 'connections/s', 'connections', 'mysql.connections', 'line'],
'lines': [
['Connections', 'all', 'incremental'],
['Aborted_connects', 'aborted', 'incremental']
]
},
'connections_active': {
- 'options': [None, 'mysql Connections Active', 'connections', 'connections', 'mysql.connections_active', 'line'],
+ 'options': [None, 'Connections Active', 'connections', 'connections', 'mysql.connections_active', 'line'],
'lines': [
['Threads_connected', 'active', 'absolute'],
['max_connections', 'limit', 'absolute'],
@@ -299,21 +298,26 @@ CHARTS = {
]
},
'binlog_cache': {
- 'options': [None, 'mysql Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'],
+ 'options': [None, 'Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'],
'lines': [
['Binlog_cache_disk_use', 'disk', 'incremental'],
['Binlog_cache_use', 'all', 'incremental']
]
},
'threads': {
- 'options': [None, 'mysql Threads', 'threads', 'threads', 'mysql.threads', 'line'],
+ 'options': [None, 'Threads', 'threads', 'threads', 'mysql.threads', 'line'],
'lines': [
['Threads_connected', 'connected', 'absolute'],
- ['Threads_created', 'created', 'incremental'],
['Threads_cached', 'cached', 'absolute', -1, 1],
['Threads_running', 'running', 'absolute'],
]
},
+ 'threads_creation_rate': {
+ 'options': [None, 'Threads Creation Rate', 'threads', 'threads/s', 'mysql.threads', 'line'],
+ 'lines': [
+ ['Threads_created', 'created', 'incremental'],
+ ]
+ },
'thread_cache_misses': {
'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'],
'lines': [
@@ -321,14 +325,14 @@ CHARTS = {
]
},
'innodb_io': {
- 'options': [None, 'mysql InnoDB I/O Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_io', 'area'],
+ 'options': [None, 'InnoDB I/O Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_io', 'area'],
'lines': [
['Innodb_data_read', 'read', 'incremental', 1, 1024],
['Innodb_data_written', 'write', 'incremental', -1, 1024]
]
},
'innodb_io_ops': {
- 'options': [None, 'mysql InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'],
+ 'options': [None, 'InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'],
'lines': [
['Innodb_data_reads', 'reads', 'incremental'],
['Innodb_data_writes', 'writes', 'incremental', -1, 1],
@@ -336,7 +340,7 @@ CHARTS = {
]
},
'innodb_io_pending_ops': {
- 'options': [None, 'mysql InnoDB Pending I/O Operations', 'operations', 'innodb',
+ 'options': [None, 'InnoDB Pending I/O Operations', 'operations', 'innodb',
'mysql.innodb_io_pending_ops', 'line'],
'lines': [
['Innodb_data_pending_reads', 'reads', 'absolute'],
@@ -345,7 +349,7 @@ CHARTS = {
]
},
'innodb_log': {
- 'options': [None, 'mysql InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'],
+ 'options': [None, 'InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'],
'lines': [
['Innodb_log_waits', 'waits', 'incremental'],
['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1],
@@ -353,28 +357,33 @@ CHARTS = {
]
},
'innodb_os_log': {
- 'options': [None, 'mysql InnoDB OS Log Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'],
+ 'options': [None, 'InnoDB OS Log Pending Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'],
+ 'lines': [
+ ['Innodb_os_log_pending_fsyncs', 'fsyncs', 'absolute'],
+ ['Innodb_os_log_pending_writes', 'writes', 'absolute', -1, 1],
+ ]
+ },
+ 'innodb_os_log_fsync_writes': {
+ 'options': [None, 'InnoDB OS Log Operations', 'operations/s', 'innodb', 'mysql.innodb_os_log', 'line'],
'lines': [
['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'],
- ['Innodb_os_log_pending_fsyncs', 'pending_fsyncs', 'absolute'],
- ['Innodb_os_log_pending_writes', 'pending_writes', 'absolute', -1, 1],
]
},
'innodb_os_log_io': {
- 'options': [None, 'mysql InnoDB OS Log Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
+ 'options': [None, 'InnoDB OS Log Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
'lines': [
['Innodb_os_log_written', 'write', 'incremental', -1, 1024],
]
},
'innodb_cur_row_lock': {
- 'options': [None, 'mysql InnoDB Current Row Locks', 'operations', 'innodb',
+ 'options': [None, 'InnoDB Current Row Locks', 'operations', 'innodb',
'mysql.innodb_cur_row_lock', 'area'],
'lines': [
['Innodb_row_lock_current_waits', 'current_waits', 'absolute']
]
},
'innodb_rows': {
- 'options': [None, 'mysql InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
+ 'options': [None, 'InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
'lines': [
['Innodb_rows_inserted', 'inserted', 'incremental'],
['Innodb_rows_read', 'read', 'incremental', 1, 1],
@@ -383,19 +392,25 @@ CHARTS = {
]
},
'innodb_buffer_pool_pages': {
- 'options': [None, 'mysql InnoDB Buffer Pool Pages', 'pages', 'innodb',
+ 'options': [None, 'InnoDB Buffer Pool Pages', 'pages', 'innodb',
'mysql.innodb_buffer_pool_pages', 'line'],
'lines': [
['Innodb_buffer_pool_pages_data', 'data', 'absolute'],
['Innodb_buffer_pool_pages_dirty', 'dirty', 'absolute', -1, 1],
['Innodb_buffer_pool_pages_free', 'free', 'absolute'],
- ['Innodb_buffer_pool_pages_flushed', 'flushed', 'incremental', -1, 1],
['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1],
['Innodb_buffer_pool_pages_total', 'total', 'absolute']
]
},
+ 'innodb_buffer_pool_flush_pages_requests': {
+ 'options': [None, 'InnoDB Buffer Pool Flush Pages Requests', 'requests/s', 'innodb',
+ 'mysql.innodb_buffer_pool_pages', 'line'],
+ 'lines': [
+ ['Innodb_buffer_pool_pages_flushed', 'flush pages', 'incremental'],
+ ]
+ },
'innodb_buffer_pool_bytes': {
- 'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
+ 'options': [None, 'InnoDB Buffer Pool Bytes', 'MiB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
'lines': [
['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024],
['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024]
@@ -411,7 +426,7 @@ CHARTS = {
]
},
'innodb_buffer_pool_reqs': {
- 'options': [None, 'mysql InnoDB Buffer Pool Requests', 'requests/s', 'innodb',
+ 'options': [None, 'InnoDB Buffer Pool Requests', 'requests/s', 'innodb',
'mysql.innodb_buffer_pool_reqs', 'area'],
'lines': [
['Innodb_buffer_pool_read_requests', 'reads', 'incremental'],
@@ -419,7 +434,7 @@ CHARTS = {
]
},
'innodb_buffer_pool_ops': {
- 'options': [None, 'mysql InnoDB Buffer Pool Operations', 'operations/s', 'innodb',
+ 'options': [None, 'InnoDB Buffer Pool Operations', 'operations/s', 'innodb',
'mysql.innodb_buffer_pool_ops', 'area'],
'lines': [
['Innodb_buffer_pool_reads', 'disk reads', 'incremental'],
@@ -427,7 +442,7 @@ CHARTS = {
]
},
'qcache_ops': {
- 'options': [None, 'mysql QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'],
+ 'options': [None, 'QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'],
'lines': [
['Qcache_hits', 'hits', 'incremental'],
['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1],
@@ -436,26 +451,26 @@ CHARTS = {
]
},
'qcache': {
- 'options': [None, 'mysql QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'],
+ 'options': [None, 'QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'],
'lines': [
['Qcache_queries_in_cache', 'queries', 'absolute']
]
},
'qcache_freemem': {
- 'options': [None, 'mysql QCache Free Memory', 'MB', 'qcache', 'mysql.qcache_freemem', 'area'],
+ 'options': [None, 'QCache Free Memory', 'MiB', 'qcache', 'mysql.qcache_freemem', 'area'],
'lines': [
['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024]
]
},
'qcache_memblocks': {
- 'options': [None, 'mysql QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'],
+ 'options': [None, 'QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'],
'lines': [
['Qcache_free_blocks', 'free', 'absolute'],
['Qcache_total_blocks', 'total', 'absolute']
]
},
'key_blocks': {
- 'options': [None, 'mysql MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'],
+ 'options': [None, 'MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'],
'lines': [
['Key_blocks_unused', 'unused', 'absolute'],
['Key_blocks_used', 'used', 'absolute', -1, 1],
@@ -463,14 +478,14 @@ CHARTS = {
]
},
'key_requests': {
- 'options': [None, 'mysql MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'],
+ 'options': [None, 'MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'],
'lines': [
['Key_read_requests', 'reads', 'incremental'],
['Key_write_requests', 'writes', 'incremental', -1, 1]
]
},
'key_disk_ops': {
- 'options': [None, 'mysql MyISAM Key Cache Disk Operations', 'operations/s',
+ 'options': [None, 'MyISAM Key Cache Disk Operations', 'operations/s',
'myisam', 'mysql.key_disk_ops', 'area'],
'lines': [
['Key_reads', 'reads', 'incremental'],
@@ -478,19 +493,19 @@ CHARTS = {
]
},
'files': {
- 'options': [None, 'mysql Open Files', 'files', 'files', 'mysql.files', 'line'],
+ 'options': [None, 'Open Files', 'files', 'files', 'mysql.files', 'line'],
'lines': [
['Open_files', 'files', 'absolute']
]
},
'files_rate': {
- 'options': [None, 'mysql Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'],
+ 'options': [None, 'Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'],
'lines': [
['Opened_files', 'files', 'incremental']
]
},
'binlog_stmt_cache': {
- 'options': [None, 'mysql Binlog Statement Cache', 'statements/s', 'binlog',
+ 'options': [None, 'Binlog Statement Cache', 'statements/s', 'binlog',
'mysql.binlog_stmt_cache', 'line'],
'lines': [
['Binlog_stmt_cache_disk_use', 'disk', 'incremental'],
@@ -498,7 +513,7 @@ CHARTS = {
]
},
'connection_errors': {
- 'options': [None, 'mysql Connection Errors', 'connections/s', 'connections',
+ 'options': [None, 'Connection Errors', 'connections/s', 'connections',
'mysql.connection_errors', 'line'],
'lines': [
['Connection_errors_accept', 'accept', 'incremental'],
@@ -523,35 +538,35 @@ CHARTS = {
]
},
'galera_writesets': {
- 'options': [None, 'Replicated writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'],
+ 'options': [None, 'Replicated Writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'],
'lines': [
['wsrep_received', 'rx', 'incremental'],
['wsrep_replicated', 'tx', 'incremental', -1, 1],
]
},
'galera_bytes': {
- 'options': [None, 'Replicated bytes', 'KB/s', 'galera', 'mysql.galera_bytes', 'area'],
+ 'options': [None, 'Replicated Bytes', 'KiB/s', 'galera', 'mysql.galera_bytes', 'area'],
'lines': [
['wsrep_received_bytes', 'rx', 'incremental', 1, 1024],
['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024],
]
},
'galera_queue': {
- 'options': [None, 'Galera queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'],
+ 'options': [None, 'Galera Queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'],
'lines': [
['wsrep_local_recv_queue', 'rx', 'absolute'],
['wsrep_local_send_queue', 'tx', 'absolute', -1, 1],
]
},
'galera_conflicts': {
- 'options': [None, 'Replication conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'],
+ 'options': [None, 'Replication Conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'],
'lines': [
['wsrep_local_bf_aborts', 'bf_aborts', 'incremental'],
['wsrep_local_cert_failures', 'cert_fails', 'incremental', -1, 1],
]
},
'galera_flow_control': {
- 'options': [None, 'Flow control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'],
+ 'options': [None, 'Flow Control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'],
'lines': [
['wsrep_flow_control_paused_ns', 'paused', 'incremental', 1, 1000000],
]
@@ -564,7 +579,11 @@ class Service(MySQLService):
MySQLService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
- self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE, variables=QUERY_VARIABLES)
+ self.queries = dict(
+ global_status=QUERY_GLOBAL,
+ slave_status=QUERY_SLAVE,
+ variables=QUERY_VARIABLES,
+ )
def _get_data(self):
diff --git a/collectors/python.d.plugin/mysql/mysql.conf b/collectors/python.d.plugin/mysql/mysql.conf
index b5956a2c6..ac9b505bc 100644
--- a/collectors/python.d.plugin/mysql/mysql.conf
+++ b/collectors/python.d.plugin/mysql/mysql.conf
@@ -27,11 +27,10 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +57,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, mysql also supports the following:
diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md
index 007f45c7c..7854105b7 100644
--- a/collectors/python.d.plugin/nginx/README.md
+++ b/collectors/python.d.plugin/nginx/README.md
@@ -37,9 +37,10 @@ priority : 90100
local:
url : 'http://localhost/stub_status'
- retries : 10
```
Without configuration, module attempts to connect to `http://localhost/stub_status`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/nginx/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py
index 09c6bbd37..84a5985e4 100644
--- a/collectors/python.d.plugin/nginx/nginx.chart.py
+++ b/collectors/python.d.plugin/nginx/nginx.chart.py
@@ -5,38 +5,30 @@
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'url': 'http://localhost/stub_status'
-# }}
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['connections', 'requests', 'connection_status', 'connect_rate']
+ORDER = [
+ 'connections',
+ 'requests',
+ 'connection_status',
+ 'connect_rate',
+]
CHARTS = {
'connections': {
- 'options': [None, 'nginx Active Connections', 'connections', 'active connections',
+ 'options': [None, 'Active Connections', 'connections', 'active connections',
'nginx.connections', 'line'],
'lines': [
['active']
]
},
'requests': {
- 'options': [None, 'nginx Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
'lines': [
['requests', None, 'incremental']
]
},
'connection_status': {
- 'options': [None, 'nginx Active Connections by Status', 'connections', 'status',
+ 'options': [None, 'Active Connections by Status', 'connections', 'status',
'nginx.connection_status', 'line'],
'lines': [
['reading'],
@@ -45,7 +37,7 @@ CHARTS = {
]
},
'connect_rate': {
- 'options': [None, 'nginx Connections Rate', 'connections/s', 'connections rate',
+ 'options': [None, 'Connections Rate', 'connections/s', 'connections rate',
'nginx.connect_rate', 'line'],
'lines': [
['accepts', 'accepted', 'incremental'],
@@ -58,9 +50,9 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', 'http://localhost/stub_status')
self.order = ORDER
self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://localhost/stub_status')
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/nginx/nginx.conf b/collectors/python.d.plugin/nginx/nginx.conf
index 71c521066..4001b4bbe 100644
--- a/collectors/python.d.plugin/nginx/nginx.conf
+++ b/collectors/python.d.plugin/nginx/nginx.conf
@@ -39,11 +39,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -70,7 +68,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, this plugin also supports the following:
diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md
index 43ec867a3..c20ce30a0 100644
--- a/collectors/python.d.plugin/nginx_plus/README.md
+++ b/collectors/python.d.plugin/nginx_plus/README.md
@@ -123,3 +123,5 @@ local:
Without configuration, module fail to start.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx_plus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
index 1392f5a56..3082fdbe7 100644
--- a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
@@ -16,12 +16,7 @@ except ImportError:
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-update_every = 1
-priority = 60000
-retries = 60
-# charts order (can be overridden if you want less charts, or different order)
ORDER = [
'requests_total',
'requests_current',
@@ -76,7 +71,7 @@ CHARTS = {
]
},
'ssl_memory_usage': {
- 'options': [None, 'Memory Usage', '%', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
+ 'options': [None, 'Memory Usage', 'percentage', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
'lines': [
['ssl_memory_usage', 'usage', 'absolute', 1, 100]
]
@@ -95,7 +90,7 @@ def cache_charts(cache):
charts = OrderedDict()
charts['{0}_traffic'.format(cache.name)] = {
- 'options': [None, 'Traffic', 'KB', family, 'nginx_plus.cache_traffic', 'stacked'],
+ 'options': [None, 'Traffic', 'KiB', family, 'nginx_plus.cache_traffic', 'stacked'],
'lines': [
['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024],
['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024],
@@ -103,7 +98,7 @@ def cache_charts(cache):
]
}
charts['{0}_memory_usage'.format(cache.name)] = {
- 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.cache_memory_usage', 'area'],
+ 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.cache_memory_usage', 'area'],
'lines': [
['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100],
]
@@ -200,7 +195,8 @@ def web_upstream_charts(wu):
'lines': dimensions('active')
}
charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = {
- 'options': [None, 'Peers Connections Usage', '%', family, 'nginx_plus.web_upstream_connections_usage', 'line'],
+ 'options': [None, 'Peers Connections Usage', 'percentage', family,
+ 'nginx_plus.web_upstream_connections_usage', 'line'],
'lines': dimensions('connections_usage', d=100)
}
# Traffic
@@ -223,7 +219,7 @@ def web_upstream_charts(wu):
# Response Time
for peer in wu:
charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.server)] = {
- 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'ms', family,
+ 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'milliseconds', family,
'nginx_plus.web_upstream_peer_timings', 'line'],
'lines': [
['_'.join([wu.name, peer.server, 'header_time']), 'header'],
@@ -232,7 +228,7 @@ def web_upstream_charts(wu):
}
# Memory Usage
charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = {
- 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.web_upstream_memory_usage', 'area'],
+ 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.web_upstream_memory_usage', 'area'],
'lines': [
['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100]
]
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.conf b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
index 7b5c8f43f..201eb0eb7 100644
--- a/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, nginx_plus also supports the following:
diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md
index 02c302f41..b118657d2 100644
--- a/collectors/python.d.plugin/nsd/README.md
+++ b/collectors/python.d.plugin/nsd/README.md
@@ -52,3 +52,5 @@ It produces:
Configuration is not needed.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnsd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/nsd/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py
index d713f46bd..77b0d7bbf 100644
--- a/collectors/python.d.plugin/nsd/nsd.chart.py
+++ b/collectors/python.d.plugin/nsd/nsd.chart.py
@@ -7,13 +7,20 @@ import re
from bases.FrameworkServices.ExecutableService import ExecutableService
-# default module values (can be overridden per job in `config`)
-priority = 60000
-retries = 5
+
update_every = 30
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode']
+NSD_CONTROL_COMMAND = 'nsd-control stats_noreset'
+REGEX = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
+
+ORDER = [
+ 'queries',
+ 'zones',
+ 'protocol',
+ 'type',
+ 'transfer',
+ 'rcode',
+]
CHARTS = {
'queries': {
@@ -79,22 +86,21 @@ CHARTS = {
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(
- self, configuration=configuration, name=name)
- self.command = 'nsd-control stats_noreset'
+ ExecutableService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
- self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
+ self.command = NSD_CONTROL_COMMAND
def _get_data(self):
lines = self._get_raw_data()
if not lines:
return None
- r = self.regex
- stats = dict((k.replace('.', '_'), int(v))
- for k, v in r.findall(''.join(lines)))
+ stats = dict(
+ (k.replace('.', '_'), int(v)) for k, v in REGEX.findall(''.join(lines))
+ )
stats.setdefault('num_opcode_NOTIFY', 0)
stats.setdefault('num_type_TYPE252', 0)
stats.setdefault('num_type_TYPE255', 0)
+
return stats
diff --git a/collectors/python.d.plugin/nsd/nsd.conf b/collectors/python.d.plugin/nsd/nsd.conf
index 078e97216..77a8a3177 100644
--- a/collectors/python.d.plugin/nsd/nsd.conf
+++ b/collectors/python.d.plugin/nsd/nsd.conf
@@ -28,11 +28,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -59,7 +57,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, nsd also supports the following:
diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md
index b0fa17fde..d33fd877a 100644
--- a/collectors/python.d.plugin/ntpd/README.md
+++ b/collectors/python.d.plugin/ntpd/README.md
@@ -69,3 +69,5 @@ otherhost:
If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fntpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/ntpd/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py
index 79d557c80..5a5477e63 100644
--- a/collectors/python.d.plugin/ntpd/ntpd.chart.py
+++ b/collectors/python.d.plugin/ntpd/ntpd.chart.py
@@ -9,10 +9,6 @@ import re
from bases.FrameworkServices.SocketService import SocketService
-# default module values
-update_every = 1
-priority = 60000
-retries = 60
# NTP Control Message Protocol constants
MODE = 6
@@ -54,13 +50,15 @@ ORDER = [
CHARTS = {
'sys_offset': {
- 'options': [None, 'Combined offset of server relative to this host', 'ms', 'system', 'ntpd.sys_offset', 'area'],
+ 'options': [None, 'Combined offset of server relative to this host', 'milliseconds',
+ 'system', 'ntpd.sys_offset', 'area'],
'lines': [
['offset', 'offset', 'absolute', 1, PRECISION]
]
},
'sys_jitter': {
- 'options': [None, 'Combined system jitter and clock jitter', 'ms', 'system', 'ntpd.sys_jitter', 'line'],
+ 'options': [None, 'Combined system jitter and clock jitter', 'milliseconds',
+ 'system', 'ntpd.sys_jitter', 'line'],
'lines': [
['sys_jitter', 'system', 'absolute', 1, PRECISION],
['clk_jitter', 'clock', 'absolute', 1, PRECISION]
@@ -79,14 +77,14 @@ CHARTS = {
]
},
'sys_rootdelay': {
- 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'system',
+ 'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'system',
'ntpd.sys_rootdelay', 'area'],
'lines': [
['rootdelay', 'delay', 'absolute', 1, PRECISION]
]
},
'sys_rootdisp': {
- 'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'system',
+ 'options': [None, 'Total root dispersion to the primary reference clock', 'milliseconds', 'system',
'ntpd.sys_rootdisp', 'area'],
'lines': [
['rootdisp', 'dispersion', 'absolute', 1, PRECISION]
@@ -115,27 +113,27 @@ CHARTS = {
PEER_CHARTS = {
'peer_offset': {
- 'options': [None, 'Filter offset', 'ms', 'peers', 'ntpd.peer_offset', 'line'],
+ 'options': [None, 'Filter offset', 'milliseconds', 'peers', 'ntpd.peer_offset', 'line'],
'lines': []
},
'peer_delay': {
- 'options': [None, 'Filter delay', 'ms', 'peers', 'ntpd.peer_delay', 'line'],
+ 'options': [None, 'Filter delay', 'milliseconds', 'peers', 'ntpd.peer_delay', 'line'],
'lines': []
},
'peer_dispersion': {
- 'options': [None, 'Filter dispersion', 'ms', 'peers', 'ntpd.peer_dispersion', 'line'],
+ 'options': [None, 'Filter dispersion', 'milliseconds', 'peers', 'ntpd.peer_dispersion', 'line'],
'lines': []
},
'peer_jitter': {
- 'options': [None, 'Filter jitter', 'ms', 'peers', 'ntpd.peer_jitter', 'line'],
+ 'options': [None, 'Filter jitter', 'milliseconds', 'peers', 'ntpd.peer_jitter', 'line'],
'lines': []
},
'peer_xleave': {
- 'options': [None, 'Interleave delay', 'ms', 'peers', 'ntpd.peer_xleave', 'line'],
+ 'options': [None, 'Interleave delay', 'milliseconds', 'peers', 'ntpd.peer_xleave', 'line'],
'lines': []
},
'peer_rootdelay': {
- 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'peers',
+ 'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'peers',
'ntpd.peer_rootdelay', 'line'],
'lines': []
},
@@ -235,7 +233,6 @@ class Service(SocketService):
SocketService.__init__(self, configuration=configuration, name=name)
self.order = list(ORDER)
self.definitions = dict(CHARTS)
-
self.port = 'ntp'
self.dgram_socket = True
self.system = System()
@@ -244,7 +241,6 @@ class Service(SocketService):
self.retries = 0
self.show_peers = self.configuration.get('show_peers', False)
self.peer_rescan = self.configuration.get('peer_rescan', 60)
-
if self.show_peers:
self.definitions.update(PEER_CHARTS)
diff --git a/collectors/python.d.plugin/ntpd/ntpd.conf b/collectors/python.d.plugin/ntpd/ntpd.conf
index 7adc4074b..80bd468d1 100644
--- a/collectors/python.d.plugin/ntpd/ntpd.conf
+++ b/collectors/python.d.plugin/ntpd/ntpd.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# ----------------------------------------------------------------------
# JOBS (data collection sources)
@@ -52,7 +50,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
#
# Additionally to the above, ntp also supports the following:
#
diff --git a/collectors/python.d.plugin/nvidia_smi/README.md b/collectors/python.d.plugin/nvidia_smi/README.md
index 06acfc297..48b611951 100644
--- a/collectors/python.d.plugin/nvidia_smi/README.md
+++ b/collectors/python.d.plugin/nvidia_smi/README.md
@@ -36,4 +36,5 @@ Sample:
```yaml
poll_seconds: 1
-``` \ No newline at end of file
+```
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnvidia_smi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
index c3fff6219..7cb816c0d 100644
--- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
@@ -15,6 +15,8 @@ disabled_by_default = True
NVIDIA_SMI = 'nvidia-smi'
+BAD_VALUE = 'N/A'
+
EMPTY_ROW = ''
EMPTY_ROW_LIMIT = 500
POLLER_BREAK_ROW = '</nvidia_smi_log>'
@@ -47,39 +49,39 @@ def gpu_charts(gpu):
charts = {
PCI_BANDWIDTH: {
- 'options': [None, 'PCI Express Bandwidth Utilization', 'KB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'],
+ 'options': [None, 'PCI Express Bandwidth Utilization', 'KiB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'],
'lines': [
['rx_util', 'rx', 'absolute', 1, 1],
['tx_util', 'tx', 'absolute', 1, -1],
]
},
FAN_SPEED: {
- 'options': [None, 'Fan Speed', '%', fam, 'nvidia_smi.fan_speed', 'line'],
+ 'options': [None, 'Fan Speed', 'percentage', fam, 'nvidia_smi.fan_speed', 'line'],
'lines': [
['fan_speed', 'speed'],
]
},
GPU_UTIL: {
- 'options': [None, 'GPU Utilization', '%', fam, 'nvidia_smi.gpu_utilization', 'line'],
+ 'options': [None, 'GPU Utilization', 'percentage', fam, 'nvidia_smi.gpu_utilization', 'line'],
'lines': [
['gpu_util', 'utilization'],
]
},
MEM_UTIL: {
- 'options': [None, 'Memory Bandwidth Utilization', '%', fam, 'nvidia_smi.mem_utilization', 'line'],
+ 'options': [None, 'Memory Bandwidth Utilization', 'percentage', fam, 'nvidia_smi.mem_utilization', 'line'],
'lines': [
['memory_util', 'utilization'],
]
},
ENCODER_UTIL: {
- 'options': [None, 'Encoder/Decoder Utilization', '%', fam, 'nvidia_smi.encoder_utilization', 'line'],
+ 'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization', 'line'],
'lines': [
['encoder_util', 'encoder'],
['decoder_util', 'decoder'],
]
},
MEM_ALLOCATED: {
- 'options': [None, 'Memory Allocated', 'MB', fam, 'nvidia_smi.memory_allocated', 'line'],
+ 'options': [None, 'Memory Allocated', 'MiB', fam, 'nvidia_smi.memory_allocated', 'line'],
'lines': [
['fb_memory_usage', 'used'],
]
@@ -206,6 +208,15 @@ def handle_attr_error(method):
return on_call
+def handle_value_error(method):
+ def on_call(*args, **kwargs):
+ try:
+ return method(*args, **kwargs)
+ except ValueError:
+ return None
+ return on_call
+
+
class GPU:
def __init__(self, num, root):
self.num = num
@@ -272,6 +283,7 @@ class GPU:
def mem_clock(self):
return self.root.find('clocks').find('mem_clock').text.split()[0]
+ @handle_value_error
@handle_attr_error
def power_draw(self):
return float(self.root.find('power_readings').find('power_draw').text.split()[0]) * 100
@@ -294,7 +306,9 @@ class GPU:
'power_draw': self.power_draw(),
}
- return dict(('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items() if v is not None)
+ return dict(
+ ('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items() if v is not None and v != BAD_VALUE
+ )
class Service(SimpleService):
@@ -302,7 +316,6 @@ class Service(SimpleService):
super(Service, self).__init__(configuration=configuration, name=name)
self.order = list()
self.definitions = dict()
-
poll = int(configuration.get('poll_seconds', 1))
self.poller = NvidiaSMIPoller(poll)
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
index e1bcf3faf..53e544a5d 100644
--- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, example also supports the following:
diff --git a/collectors/python.d.plugin/openldap/README.md b/collectors/python.d.plugin/openldap/README.md
index 938535bca..629cc1539 100644
--- a/collectors/python.d.plugin/openldap/README.md
+++ b/collectors/python.d.plugin/openldap/README.md
@@ -55,3 +55,5 @@ openldap:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fopenldap%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/openldap/openldap.chart.py b/collectors/python.d.plugin/openldap/openldap.chart.py
index 6342d3863..768ed01e8 100644
--- a/collectors/python.d.plugin/openldap/openldap.chart.py
+++ b/collectors/python.d.plugin/openldap/openldap.chart.py
@@ -11,8 +11,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
-priority = 60000
DEFAULT_SERVER = 'localhost'
DEFAULT_PORT = '389'
@@ -36,7 +34,7 @@ CHARTS = {
]
},
'bytes_sent': {
- 'options': [None, 'Traffic', 'KB/s', 'ldap', 'openldap.traffic_stats', 'line'],
+ 'options': [None, 'Traffic', 'KiB/s', 'ldap', 'openldap.traffic_stats', 'line'],
'lines': [
['bytes_sent', 'sent', 'incremental', 1, 1024]
]
@@ -136,13 +134,11 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
-
self.server = configuration.get('server', DEFAULT_SERVER)
self.port = configuration.get('port', DEFAULT_PORT)
self.username = configuration.get('username')
self.password = configuration.get('password')
self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT)
-
self.alive = False
self.conn = None
diff --git a/collectors/python.d.plugin/openldap/openldap.conf b/collectors/python.d.plugin/openldap/openldap.conf
index 662cc58c4..6182b3ee2 100644
--- a/collectors/python.d.plugin/openldap/openldap.conf
+++ b/collectors/python.d.plugin/openldap/openldap.conf
@@ -28,11 +28,9 @@ update_every: 10
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -59,7 +57,7 @@ update_every: 10
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/ovpn_status_log/README.md b/collectors/python.d.plugin/ovpn_status_log/README.md
index be1ea279e..bcd1f00e3 100644
--- a/collectors/python.d.plugin/ovpn_status_log/README.md
+++ b/collectors/python.d.plugin/ovpn_status_log/README.md
@@ -30,3 +30,5 @@ default
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fovpn_status_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
index 64d7062d9..dc7a6002e 100644
--- a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
@@ -3,15 +3,18 @@
# Author: l2isbad
# SPDX-License-Identifier: GPL-3.0-or-later
-from re import compile as r_compile
+import re
from bases.FrameworkServices.SimpleService import SimpleService
-priority = 60000
-retries = 60
+
update_every = 10
-ORDER = ['users', 'traffic']
+ORDER = [
+ 'users',
+ 'traffic',
+]
+
CHARTS = {
'users': {
'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'],
@@ -20,15 +23,20 @@ CHARTS = {
]
},
'traffic': {
- 'options': [None, 'OpenVPN Traffic', 'KB/s', 'traffic', 'openvpn_status.traffic', 'area'],
+ 'options': [None, 'OpenVPN Traffic', 'KiB/s', 'traffic', 'openvpn_status.traffic', 'area'],
'lines': [
- ['bytes_in', 'in', 'incremental', 1, 1 << 10], ['bytes_out', 'out', 'incremental', 1, -1 << 10]
+ ['bytes_in', 'in', 'incremental', 1, 1 << 10],
+ ['bytes_out', 'out', 'incremental', -1, 1 << 10]
]
}
}
-TLS_REGEX = r_compile(r'(?:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)')
-STATIC_KEY_REGEX = r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)')
+TLS_REGEX = re.compile(
+ r'(?:[0-9a-f]+:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)'
+)
+STATIC_KEY_REGEX = re.compile(
+ r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)'
+)
class Service(SimpleService):
diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
index 6fb35a530..1d71f6b8e 100644
--- a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, openvpn status log also supports the following:
diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md
index 66930463f..d3aa85a7c 100644
--- a/collectors/python.d.plugin/phpfpm/README.md
+++ b/collectors/python.d.plugin/phpfpm/README.md
@@ -32,9 +32,10 @@ priority : 90100
local:
url : 'http://localhost/status'
- retries : 10
```
Without configuration, module attempts to connect to `http://localhost/status`
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fphpfpm%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
index a3f0963fc..70091e233 100644
--- a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
@@ -9,20 +9,8 @@ import re
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'url': 'http://localhost/status?full&json'
-# }}
-
-# charts order (can be overridden if you want less charts, or different order)
+REGEX = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
POOL_INFO = [
('active processes', 'active'),
@@ -50,7 +38,14 @@ CALC = [
('avg', average)
]
-ORDER = ['connections', 'requests', 'performance', 'request_duration', 'request_cpu', 'request_mem']
+ORDER = [
+ 'connections',
+ 'requests',
+ 'performance',
+ 'request_duration',
+ 'request_cpu',
+ 'request_mem',
+]
CHARTS = {
'connections': {
@@ -85,7 +80,7 @@ CHARTS = {
]
},
'request_cpu': {
- 'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'],
+ 'options': [None, 'PHP-FPM Request CPU', 'percentage', 'request CPU', 'phpfpm.request_cpu', 'line'],
'lines': [
['minReqCpu', 'min'],
['maxReqCpu', 'max'],
@@ -93,7 +88,7 @@ CHARTS = {
]
},
'request_mem': {
- 'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'],
+ 'options': [None, 'PHP-FPM Request Memory', 'KB', 'request memory', 'phpfpm.request_mem', 'line'],
'lines': [
['minReqMem', 'min', 'absolute', 1, 1024],
['maxReqMem', 'max', 'absolute', 1, 1024],
@@ -106,14 +101,14 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', 'http://localhost/status?full&json')
self.order = ORDER
self.definitions = CHARTS
- self.regex = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
+ self.url = self.configuration.get('url', 'http://localhost/status?full&json')
self.json = '&json' in self.url or '?json' in self.url
self.json_full = self.url.endswith(('?full&json', '?json&full'))
- self.if_all_processes_running = dict([(c_name + p_name, 0) for c_name, func in CALC
- for metric, p_name in PER_PROCESS_INFO])
+ self.if_all_processes_running = dict(
+ [(c_name + p_name, 0) for c_name, func in CALC for metric, p_name in PER_PROCESS_INFO]
+ )
def _get_data(self):
"""
@@ -124,7 +119,7 @@ class Service(UrlService):
if not raw:
return None
- raw_json = parse_raw_data_(is_json=self.json, regex=self.regex, raw_data=raw)
+ raw_json = parse_raw_data_(is_json=self.json, raw_data=raw)
# Per Pool info: active connections, requests and performance charts
to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO)
@@ -160,7 +155,7 @@ def fetch_data_(raw_data, metrics_list, pid=''):
return result
-def parse_raw_data_(is_json, regex, raw_data):
+def parse_raw_data_(is_json, raw_data):
"""
:param is_json: bool
:param regex: compiled regular expr
@@ -174,4 +169,4 @@ def parse_raw_data_(is_json, regex, raw_data):
return dict()
else:
raw_data = ' '.join(raw_data.split())
- return dict(regex.findall(raw_data))
+ return dict(REGEX.findall(raw_data))
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.conf b/collectors/python.d.plugin/phpfpm/phpfpm.conf
index 571eb9156..d31853903 100644
--- a/collectors/python.d.plugin/phpfpm/phpfpm.conf
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, PHP-FPM also supports the following:
diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md
index f1338d576..8f289c8de 100644
--- a/collectors/python.d.plugin/portcheck/README.md
+++ b/collectors/python.d.plugin/portcheck/README.md
@@ -33,3 +33,5 @@ server:
* Currently, the accuracy of the latency is low and should be used as reference only.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fportcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/portcheck/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py
index e86f82544..8479e38e4 100644
--- a/collectors/python.d.plugin/portcheck/portcheck.chart.py
+++ b/collectors/python.d.plugin/portcheck/portcheck.chart.py
@@ -12,9 +12,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
-priority = 60000
-retries = 60
PORT_LATENCY = 'connect'
@@ -26,7 +23,7 @@ ORDER = ['latency', 'status']
CHARTS = {
'latency': {
- 'options': [None, 'TCP connect latency', 'ms', 'latency', 'portcheck.latency', 'line'],
+ 'options': [None, 'TCP connect latency', 'milliseconds', 'latency', 'portcheck.latency', 'line'],
'lines': [
[PORT_LATENCY, 'connect', 'absolute', 100, 1000]
]
@@ -121,7 +118,7 @@ class Service(SimpleService):
:return: dict
"""
- af, _, proto, _, sa = socket_config
+ _, _, _, _, sa = socket_config
port = str(sa[1])
try:
self.debug('Connecting socket to "{address}", port {port}'.format(address=sa[0], port=port))
diff --git a/collectors/python.d.plugin/portcheck/portcheck.conf b/collectors/python.d.plugin/portcheck/portcheck.conf
index b3dd8bd3f..df67824bd 100644
--- a/collectors/python.d.plugin/portcheck/portcheck.conf
+++ b/collectors/python.d.plugin/portcheck/portcheck.conf
@@ -27,6 +27,10 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
# chart_cleanup sets the default chart cleanup interval in iterations.
# A chart is marked as obsolete if it has not been updated
# 'chart_cleanup' iterations in a row.
@@ -60,7 +64,7 @@ chart_cleanup: 0
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # [optional] the JOB's data collection frequency
# priority: 60000 # [optional] the JOB's order on the dashboard
-# retries: 60 # [optional] the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# timeout: 1 # [optional] the socket timeout when connecting
# host: 'dns or ip' # [required] the remote host address in either IPv4, IPv6 or as DNS name.
# port: 22 # [required] the port number to check. Specify an integer, not service name.
diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md
index 77c95ff44..e2147ac91 100644
--- a/collectors/python.d.plugin/postfix/README.md
+++ b/collectors/python.d.plugin/postfix/README.md
@@ -13,3 +13,5 @@ It produces only two charts:
Configuration is not needed.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostfix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/postfix/postfix.chart.py b/collectors/python.d.plugin/postfix/postfix.chart.py
index bdbd0feea..b650514ee 100644
--- a/collectors/python.d.plugin/postfix/postfix.chart.py
+++ b/collectors/python.d.plugin/postfix/postfix.chart.py
@@ -5,13 +5,12 @@
from bases.FrameworkServices.ExecutableService import ExecutableService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
+POSTQUEUE_COMMAND = 'postqueue -p'
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['qemails', 'qsize']
+ORDER = [
+ 'qemails',
+ 'qsize',
+]
CHARTS = {
'qemails': {
@@ -21,7 +20,7 @@ CHARTS = {
]
},
'qsize': {
- 'options': [None, 'Postfix Queue Emails Size', 'emails size in KB', 'queue', 'postfix.qsize', 'area'],
+ 'options': [None, 'Postfix Queue Emails Size', 'KiB', 'queue', 'postfix.qsize', 'area'],
'lines': [
['size', None, 'absolute']
]
@@ -32,9 +31,9 @@ CHARTS = {
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(self, configuration=configuration, name=name)
- self.command = 'postqueue -p'
self.order = ORDER
self.definitions = CHARTS
+ self.command = POSTQUEUE_COMMAND
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/postfix/postfix.conf b/collectors/python.d.plugin/postfix/postfix.conf
index e0d5a5f83..a4d2472ee 100644
--- a/collectors/python.d.plugin/postfix/postfix.conf
+++ b/collectors/python.d.plugin/postfix/postfix.conf
@@ -28,11 +28,9 @@ update_every: 10
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -59,7 +57,7 @@ update_every: 10
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, postfix also supports the following:
diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md
index e7b108d36..9939a0c48 100644
--- a/collectors/python.d.plugin/postgres/README.md
+++ b/collectors/python.d.plugin/postgres/README.md
@@ -66,3 +66,5 @@ tcp:
When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostgres%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/postgres/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py
index 7f43877c3..e988eec36 100644
--- a/collectors/python.d.plugin/postgres/postgres.chart.py
+++ b/collectors/python.d.plugin/postgres/postgres.chart.py
@@ -16,13 +16,34 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values
-update_every = 1
-priority = 60000
-retries = 60
+
+DEFAULT_PORT = 5432
+DEFAULT_USER = 'postgres'
+DEFAULT_CONNECT_TIMEOUT = 2 # seconds
+DEFAULT_STATEMENT_TIMEOUT = 5000 # ms
+
+
+WAL = 'WAL'
+ARCHIVE = 'ARCHIVE'
+BACKENDS = 'BACKENDS'
+TABLE_STATS = 'TABLE_STATS'
+INDEX_STATS = 'INDEX_STATS'
+DATABASE = 'DATABASE'
+BGWRITER = 'BGWRITER'
+LOCKS = 'LOCKS'
+DATABASES = 'DATABASES'
+STANDBY = 'STANDBY'
+REPLICATION_SLOT = 'REPLICATION_SLOT'
+STANDBY_DELTA = 'STANDBY_DELTA'
+REPSLOT_FILES = 'REPSLOT_FILES'
+IF_SUPERUSER = 'IF_SUPERUSER'
+SERVER_VERSION = 'SERVER_VERSION'
+AUTOVACUUM = 'AUTOVACUUM'
+DIFF_LSN = 'DIFF_LSN'
+WAL_WRITES = 'WAL_WRITES'
METRICS = {
- 'DATABASE': [
+ DATABASE: [
'connections',
'xact_commit',
'xact_rollback',
@@ -38,32 +59,32 @@ METRICS = {
'temp_bytes',
'size'
],
- 'BACKENDS': [
+ BACKENDS: [
'backends_active',
'backends_idle'
],
- 'INDEX_STATS': [
+ INDEX_STATS: [
'index_count',
'index_size'
],
- 'TABLE_STATS': [
+ TABLE_STATS: [
'table_size',
'table_count'
],
- 'WAL': [
+ WAL: [
'written_wal',
'recycled_wal',
'total_wal'
],
- 'WAL_WRITES': [
+ WAL_WRITES: [
'wal_writes'
],
- 'ARCHIVE': [
+ ARCHIVE: [
'ready_count',
'done_count',
'file_count'
],
- 'BGWRITER': [
+ BGWRITER: [
'checkpoint_scheduled',
'checkpoint_requested',
'buffers_checkpoint',
@@ -73,7 +94,7 @@ METRICS = {
'buffers_alloc',
'buffers_backend_fsync'
],
- 'LOCKS': [
+ LOCKS: [
'ExclusiveLock',
'RowShareLock',
'SIReadLock',
@@ -84,27 +105,61 @@ METRICS = {
'ShareLock',
'RowExclusiveLock'
],
- 'AUTOVACUUM': [
+ AUTOVACUUM: [
'analyze',
'vacuum_analyze',
'vacuum',
'vacuum_freeze',
'brin_summarize'
],
- 'STANDBY_DELTA': [
+ STANDBY_DELTA: [
'sent_delta',
'write_delta',
'flush_delta',
'replay_delta'
],
- 'REPSLOT_FILES': [
+ REPSLOT_FILES: [
'replslot_wal_keep',
'replslot_files'
]
}
-QUERIES = {
- 'WAL': """
+NO_VERSION = 0
+DEFAULT = 'DEFAULT'
+V96 = 'V96'
+V10 = 'V10'
+V11 = 'V11'
+
+
+QUERY_WAL = {
+ DEFAULT: """
+SELECT
+ count(*) as total_wal,
+ count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
+ count(*) FILTER (WHERE type = 'written') AS written_wal
+FROM
+ (SELECT
+ wal.name,
+ pg_walfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_wal_lsn()
+ END ),
+ CASE
+ WHEN wal.name > pg_walfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_wal_lsn()
+ END ) THEN 'recycled'
+ ELSE 'written'
+ END AS type
+ FROM pg_catalog.pg_ls_dir('pg_wal') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{24}$'
+ ORDER BY
+ (pg_stat_file('pg_wal/'||name)).modification,
+ wal.name DESC) sub;
+""",
+ V96: """
SELECT
count(*) as total_wal,
count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
@@ -112,34 +167,49 @@ SELECT
FROM
(SELECT
wal.name,
- pg_{0}file_name(
+ pg_xlogfile_name(
CASE pg_is_in_recovery()
WHEN true THEN NULL
- ELSE pg_current_{0}_{1}()
+ ELSE pg_current_xlog_location()
END ),
CASE
- WHEN wal.name > pg_{0}file_name(
+ WHEN wal.name > pg_xlogfile_name(
CASE pg_is_in_recovery()
WHEN true THEN NULL
- ELSE pg_current_{0}_{1}()
+ ELSE pg_current_xlog_location()
END ) THEN 'recycled'
ELSE 'written'
END AS type
- FROM pg_catalog.pg_ls_dir('pg_{0}') AS wal(name)
- WHERE name ~ '^[0-9A-F]{{24}}$'
+ FROM pg_catalog.pg_ls_dir('pg_xlog') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{24}$'
ORDER BY
- (pg_stat_file('pg_{0}/'||name)).modification,
+ (pg_stat_file('pg_xlog/'||name)).modification,
wal.name DESC) sub;
""",
- 'ARCHIVE': """
+}
+
+QUERY_ARCHIVE = {
+ DEFAULT: """
+SELECT
+ CAST(COUNT(*) AS INT) AS file_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count
+FROM
+ pg_catalog.pg_ls_dir('pg_wal/archive_status') AS archive_files (archive_file);
+""",
+ V96: """
SELECT
CAST(COUNT(*) AS INT) AS file_count,
CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count,
CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count
FROM
- pg_catalog.pg_ls_dir('pg_{0}/archive_status') AS archive_files (archive_file);
+ pg_catalog.pg_ls_dir('pg_xlog/archive_status') AS archive_files (archive_file);
+
""",
- 'BACKENDS': """
+}
+
+QUERY_BACKEND = {
+ DEFAULT: """
SELECT
count(*) - (SELECT count(*)
FROM pg_stat_activity
@@ -151,21 +221,30 @@ SELECT
AS backends_idle
FROM pg_stat_activity;
""",
- 'TABLE_STATS': """
+}
+
+QUERY_TABLE_STATS = {
+ DEFAULT: """
SELECT
((sum(relpages) * 8) * 1024) AS table_size,
count(1) AS table_count
FROM pg_class
WHERE relkind IN ('r', 't');
""",
- 'INDEX_STATS': """
+}
+
+QUERY_INDEX_STATS = {
+ DEFAULT: """
SELECT
((sum(relpages) * 8) * 1024) AS index_size,
count(1) AS index_count
FROM pg_class
WHERE relkind = 'i';
""",
- 'DATABASE': """
+}
+
+QUERY_DATABASE = {
+ DEFAULT: """
SELECT
datname AS database_name,
numbackends AS connections,
@@ -185,7 +264,10 @@ SELECT
FROM pg_stat_database
WHERE datname IN %(databases)s ;
""",
- 'BGWRITER': """
+}
+
+QUERY_BGWRITER = {
+ DEFAULT: """
SELECT
checkpoints_timed AS checkpoint_scheduled,
checkpoints_req AS checkpoint_requested,
@@ -197,7 +279,10 @@ SELECT
buffers_backend_fsync
FROM pg_stat_bgwriter;
""",
- 'LOCKS': """
+}
+
+QUERY_LOCKS = {
+ DEFAULT: """
SELECT
pg_database.datname as database_name,
mode,
@@ -208,7 +293,10 @@ INNER JOIN pg_database
GROUP BY datname, mode
ORDER BY datname, mode;
""",
- 'FIND_DATABASES': """
+}
+
+QUERY_DATABASES = {
+ DEFAULT: """
SELECT
datname
FROM pg_stat_database
@@ -217,48 +305,129 @@ WHERE
(SELECT current_user), datname, 'connect')
AND NOT datname ~* '^template\d ';
""",
- 'FIND_STANDBY': """
+}
+
+QUERY_STANDBY = {
+ DEFAULT: """
SELECT
application_name
FROM pg_stat_replication
WHERE application_name IS NOT NULL
GROUP BY application_name;
""",
- 'FIND_REPLICATION_SLOT': """
+}
+
+QUERY_REPLICATION_SLOT = {
+ DEFAULT: """
SELECT slot_name
FROM pg_replication_slots;
+"""
+}
+
+QUERY_STANDBY_DELTA = {
+ DEFAULT: """
+SELECT
+ application_name,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ sent_lsn) AS sent_delta,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ write_lsn) AS write_delta,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ flush_lsn) AS flush_delta,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ replay_lsn) AS replay_delta
+FROM pg_stat_replication
+WHERE application_name IS NOT NULL;
""",
- 'STANDBY_DELTA': """
+ V96: """
SELECT
application_name,
- pg_{0}_{1}_diff(
+ pg_xlog_location_diff(
CASE pg_is_in_recovery()
- WHEN true THEN pg_last_{0}_receive_{1}()
- ELSE pg_current_{0}_{1}()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
END,
- sent_{1}) AS sent_delta,
- pg_{0}_{1}_diff(
+ sent_location) AS sent_delta,
+ pg_xlog_location_diff(
CASE pg_is_in_recovery()
- WHEN true THEN pg_last_{0}_receive_{1}()
- ELSE pg_current_{0}_{1}()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
END,
- write_{1}) AS write_delta,
- pg_{0}_{1}_diff(
+ write_location) AS write_delta,
+ pg_xlog_location_diff(
CASE pg_is_in_recovery()
- WHEN true THEN pg_last_{0}_receive_{1}()
- ELSE pg_current_{0}_{1}()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
END,
- flush_{1}) AS flush_delta,
- pg_{0}_{1}_diff(
+ flush_location) AS flush_delta,
+ pg_xlog_location_diff(
CASE pg_is_in_recovery()
- WHEN true THEN pg_last_{0}_receive_{1}()
- ELSE pg_current_{0}_{1}()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
END,
- replay_{1}) AS replay_delta
+ replay_location) AS replay_delta
FROM pg_stat_replication
WHERE application_name IS NOT NULL;
""",
- 'REPSLOT_FILES': """
+}
+
+QUERY_REPSLOT_FILES = {
+ DEFAULT: """
+WITH wal_size AS (
+ SELECT
+ setting::int AS val
+ FROM pg_settings
+ WHERE name = 'wal_segment_size'
+ )
+SELECT
+ slot_name,
+ slot_type,
+ replslot_wal_keep,
+ count(slot_file) AS replslot_files
+FROM
+ (SELECT
+ slot.slot_name,
+ CASE
+ WHEN slot_file <> 'state' THEN 1
+ END AS slot_file ,
+ slot_type,
+ COALESCE (
+ floor(
+ (pg_wal_lsn_diff(pg_current_wal_lsn (),slot.restart_lsn)
+ - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)
+ ),0) AS replslot_wal_keep
+ FROM pg_replication_slots slot
+ LEFT JOIN (
+ SELECT
+ slot2.slot_name,
+ pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
+ FROM pg_replication_slots slot2
+ ) files (slot_name, slot_file)
+ ON slot.slot_name = files.slot_name
+ CROSS JOIN wal_size s
+ ) AS d
+GROUP BY
+ slot_name,
+ slot_type,
+ replslot_wal_keep;
+""",
+ V10: """
WITH wal_size AS (
SELECT
current_setting('wal_block_size')::INT * setting::INT AS val
@@ -297,13 +466,22 @@ GROUP BY
slot_type,
replslot_wal_keep;
""",
- 'IF_SUPERUSER': """
+}
+
+QUERY_SUPERUSER = {
+ DEFAULT: """
SELECT current_setting('is_superuser') = 'on' AS is_superuser;
""",
- 'DETECT_SERVER_VERSION': """
+}
+
+QUERY_SHOW_VERSION = {
+ DEFAULT: """
SHOW server_version_num;
""",
- 'AUTOVACUUM': """
+}
+
+QUERY_AUTOVACUUM = {
+ DEFAULT: """
SELECT
count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze,
count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze,
@@ -315,23 +493,78 @@ SELECT
FROM pg_stat_activity
WHERE query NOT LIKE '%%pg_stat_activity%%';
""",
- 'DIFF_LSN': """
+}
+
+QUERY_DIFF_LSN = {
+ DEFAULT: """
SELECT
- pg_{0}_{1}_diff(
+ pg_wal_lsn_diff(
CASE pg_is_in_recovery()
- WHEN true THEN pg_last_{0}_receive_{1}()
- ELSE pg_current_{0}_{1}()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
END,
'0/0') as wal_writes ;
-"""
+""",
+ V96: """
+SELECT
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ '0/0') as wal_writes ;
+""",
}
-QUERY_STATS = {
- QUERIES['DATABASE']: METRICS['DATABASE'],
- QUERIES['BACKENDS']: METRICS['BACKENDS'],
- QUERIES['LOCKS']: METRICS['LOCKS']
-}
+def query_factory(name, version=NO_VERSION):
+ if name == BACKENDS:
+ return QUERY_BACKEND[DEFAULT]
+ elif name == TABLE_STATS:
+ return QUERY_TABLE_STATS[DEFAULT]
+ elif name == INDEX_STATS:
+ return QUERY_INDEX_STATS[DEFAULT]
+ elif name == DATABASE:
+ return QUERY_DATABASE[DEFAULT]
+ elif name == BGWRITER:
+ return QUERY_BGWRITER[DEFAULT]
+ elif name == LOCKS:
+ return QUERY_LOCKS[DEFAULT]
+ elif name == DATABASES:
+ return QUERY_DATABASES[DEFAULT]
+ elif name == STANDBY:
+ return QUERY_STANDBY[DEFAULT]
+ elif name == REPLICATION_SLOT:
+ return QUERY_REPLICATION_SLOT[DEFAULT]
+ elif name == IF_SUPERUSER:
+ return QUERY_SUPERUSER[DEFAULT]
+ elif name == SERVER_VERSION:
+ return QUERY_SHOW_VERSION[DEFAULT]
+ elif name == AUTOVACUUM:
+ return QUERY_AUTOVACUUM[DEFAULT]
+ elif name == WAL:
+ if version < 100000:
+ return QUERY_WAL[V96]
+ return QUERY_WAL[DEFAULT]
+ elif name == ARCHIVE:
+ if version < 100000:
+ return QUERY_ARCHIVE[V96]
+ return QUERY_ARCHIVE[DEFAULT]
+ elif name == STANDBY_DELTA:
+ if version < 100000:
+ return QUERY_STANDBY_DELTA[V96]
+ return QUERY_STANDBY_DELTA[DEFAULT]
+ elif name == REPSLOT_FILES:
+ if version < 110000:
+ return QUERY_REPSLOT_FILES[V10]
+ return QUERY_REPSLOT_FILES[DEFAULT]
+ elif name == DIFF_LSN:
+ if version < 100000:
+ return QUERY_DIFF_LSN[V96]
+ return QUERY_DIFF_LSN[DEFAULT]
+
+ raise ValueError('unknown query')
+
ORDER = [
'db_stat_temp_files',
@@ -403,7 +636,7 @@ CHARTS = {
]
},
'db_stat_temp_bytes': {
- 'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes',
+ 'options': [None, 'Temp files written to disk', 'KiB/s', 'db statistics', 'postgres.db_stat_temp_bytes',
'line'],
'lines': [
['temp_bytes', 'size', 'incremental', 1, 1024]
@@ -417,7 +650,7 @@ CHARTS = {
]
},
'database_size': {
- 'options': [None, 'Database size', 'MB', 'database size', 'postgres.db_size', 'stacked'],
+ 'options': [None, 'Database size', 'MiB', 'database size', 'postgres.db_size', 'stacked'],
'lines': [
]
},
@@ -436,7 +669,7 @@ CHARTS = {
]
},
'index_size': {
- 'options': [None, 'Indexes size', 'MB', 'indexes', 'postgres.index_size', 'line'],
+ 'options': [None, 'Indexes size', 'MiB', 'indexes', 'postgres.index_size', 'line'],
'lines': [
['index_size', 'size', 'absolute', 1, 1024 * 1024]
]
@@ -448,7 +681,7 @@ CHARTS = {
]
},
'table_size': {
- 'options': [None, 'Tables size', 'MB', 'tables', 'postgres.table_size', 'line'],
+ 'options': [None, 'Tables size', 'MiB', 'tables', 'postgres.table_size', 'line'],
'lines': [
['table_size', 'size', 'absolute', 1, 1024 * 1024]
]
@@ -462,7 +695,7 @@ CHARTS = {
]
},
'wal_writes': {
- 'options': [None, 'Write-Ahead Logs', 'kilobytes/s', 'wal_writes', 'postgres.wal_writes', 'line'],
+ 'options': [None, 'Write-Ahead Logs', 'KiB/s', 'wal_writes', 'postgres.wal_writes', 'line'],
'lines': [
['wal_writes', 'writes', 'incremental', 1, 1024]
]
@@ -483,20 +716,20 @@ CHARTS = {
]
},
'stat_bgwriter_alloc': {
- 'options': [None, 'Buffers allocated', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'],
+ 'options': [None, 'Buffers allocated', 'KiB/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'],
'lines': [
['buffers_alloc', 'alloc', 'incremental', 1, 1024]
]
},
'stat_bgwriter_checkpoint': {
- 'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter',
+ 'options': [None, 'Buffers written during checkpoints', 'KiB/s', 'bgwriter',
'postgres.stat_bgwriter_checkpoint', 'line'],
'lines': [
['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024]
]
},
'stat_bgwriter_backend': {
- 'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter',
+ 'options': [None, 'Buffers written directly by a backend', 'KiB/s', 'bgwriter',
'postgres.stat_bgwriter_backend', 'line'],
'lines': [
['buffers_backend', 'backend', 'incremental', 1, 1024]
@@ -509,7 +742,7 @@ CHARTS = {
]
},
'stat_bgwriter_bgwriter': {
- 'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter',
+ 'options': [None, 'Buffers written by the background writer', 'KiB/s', 'bgwriter',
'postgres.bgwriter_bgwriter', 'line'],
'lines': [
['buffers_clean', 'clean', 'incremental', 1, 1024]
@@ -533,7 +766,7 @@ CHARTS = {
]
},
'standby_delta': {
- 'options': [None, 'Standby delta', 'kilobytes', 'replication delta', 'postgres.standby_delta', 'line'],
+ 'options': [None, 'Standby delta', 'KiB', 'replication delta', 'postgres.standby_delta', 'line'],
'lines': [
['sent_delta', 'sent delta', 'absolute', 1, 1024],
['write_delta', 'write delta', 'absolute', 1, 1024],
@@ -554,186 +787,218 @@ CHARTS = {
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER[:]
+ self.order = list(ORDER)
self.definitions = deepcopy(CHARTS)
- self.table_stats = configuration.pop('table_stats', False)
- self.index_stats = configuration.pop('index_stats', False)
- self.database_poll = configuration.pop('database_poll', None)
+ self.do_table_stats = configuration.pop('table_stats', False)
+ self.do_index_stats = configuration.pop('index_stats', False)
+ self.databases_to_poll = configuration.pop('database_poll', None)
+ self.statement_timeout = configuration.pop('statement_timeout', DEFAULT_STATEMENT_TIMEOUT)
self.configuration = configuration
- self.connection = False
+ self.conn = None
self.server_version = None
- self.data = dict()
- self.locks_zeroed = dict()
+ self.is_superuser = False
+ self.alive = False
self.databases = list()
self.secondaries = list()
self.replication_slots = list()
- self.queries = QUERY_STATS.copy()
-
- def _connect(self):
- params = dict(user='postgres',
- database=None,
- password=None,
- host=None,
- port=5432)
- params.update(self.configuration)
-
- if not self.connection:
- try:
- self.connection = psycopg2.connect(**params)
- self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
- self.connection.set_session(readonly=True)
- except OperationalError as error:
- return False, str(error)
- return True, True
+ self.queries = dict()
+ self.data = dict()
+
+ def reconnect(self):
+ return self.connect()
+
+ def connect(self):
+ if self.conn:
+ self.conn.close()
+ self.conn = None
+
+ try:
+ params = dict(
+ host=None,
+ port=DEFAULT_PORT,
+ database=None,
+ user=DEFAULT_USER,
+ password=None,
+ connect_timeout=DEFAULT_CONNECT_TIMEOUT,
+ options='-c statement_timeout={0}'.format(self.statement_timeout),
+ )
+ params.update(self.configuration)
+
+ self.conn = psycopg2.connect(**params)
+ self.conn.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ self.conn.set_session(readonly=True)
+ except OperationalError as error:
+ self.error(error)
+ self.alive = False
+ else:
+ self.alive = True
+
+ return self.alive
def check(self):
if not PSYCOPG2:
- self.error('\'python-psycopg2\' module is needed to use postgres.chart.py')
+ self.error("'python-psycopg2' package is needed to use postgres module")
return False
- result, error = self._connect()
- if not result:
- conf = dict((k, (lambda k, v: v if k != 'password' else '*****')(k, v))
- for k, v in self.configuration.items())
- self.error('Failed to connect to %s. Error: %s' % (str(conf), error))
+
+ if not self.connect():
+ self.error('failed to connect to {0}'.format(hide_password(self.configuration)))
return False
+
try:
- cursor = self.connection.cursor()
- self.databases = discover_databases_(cursor, QUERIES['FIND_DATABASES'])
- is_superuser = check_if_superuser_(cursor, QUERIES['IF_SUPERUSER'])
- self.secondaries = discover_secondaries_(cursor, QUERIES['FIND_STANDBY'])
- self.server_version = detect_server_version(cursor, QUERIES['DETECT_SERVER_VERSION'])
- if self.server_version >= 94000:
- self.replication_slots = discover_replication_slots_(cursor, QUERIES['FIND_REPLICATION_SLOT'])
- cursor.close()
-
- if self.database_poll and isinstance(self.database_poll, str):
- self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()] \
- or self.databases
-
- self.locks_zeroed = populate_lock_types(self.databases)
- self.add_additional_queries_(is_superuser)
- self.create_dynamic_charts_()
- return True
+ self.check_queries()
except Exception as error:
- self.error(str(error))
+ self.error(error)
return False
- def add_additional_queries_(self, is_superuser):
+ self.populate_queries()
+ self.create_dynamic_charts()
- if self.server_version >= 100000:
- wal = 'wal'
- lsn = 'lsn'
- else:
- wal = 'xlog'
- lsn = 'location'
- self.queries[QUERIES['BGWRITER']] = METRICS['BGWRITER']
- self.queries[QUERIES['DIFF_LSN'].format(wal, lsn)] = METRICS['WAL_WRITES']
- self.queries[QUERIES['STANDBY_DELTA'].format(wal, lsn)] = METRICS['STANDBY_DELTA']
-
- if self.index_stats:
- self.queries[QUERIES['INDEX_STATS']] = METRICS['INDEX_STATS']
- if self.table_stats:
- self.queries[QUERIES['TABLE_STATS']] = METRICS['TABLE_STATS']
- if is_superuser:
- self.queries[QUERIES['ARCHIVE'].format(wal)] = METRICS['ARCHIVE']
- if self.server_version >= 90400:
- self.queries[QUERIES['WAL'].format(wal, lsn)] = METRICS['WAL']
- if self.server_version >= 100000:
- self.queries[QUERIES['REPSLOT_FILES']] = METRICS['REPSLOT_FILES']
- if self.server_version >= 90400:
- self.queries[QUERIES['AUTOVACUUM']] = METRICS['AUTOVACUUM']
+ return True
- def create_dynamic_charts_(self):
+ def get_data(self):
+ if not self.alive and not self.reconnect():
+ return None
- for database_name in self.databases[::-1]:
- self.definitions['database_size']['lines'].append(
- [database_name + '_size', database_name, 'absolute', 1, 1024 * 1024])
- for chart_name in [name for name in self.order if name.startswith('db_stat')]:
- add_database_stat_chart_(order=self.order, definitions=self.definitions,
- name=chart_name, database_name=database_name)
+ try:
+ cursor = self.conn.cursor(cursor_factory=DictCursor)
- add_database_lock_chart_(order=self.order, definitions=self.definitions, database_name=database_name)
+ self.data.update(zero_lock_types(self.databases))
- for application_name in self.secondaries[::-1]:
- add_replication_delta_chart_(
- order=self.order,
- definitions=self.definitions,
- name='standby_delta',
- application_name=application_name)
+ for query, metrics in self.queries.items():
+ self.query_stats(cursor, query, metrics)
- for slot_name in self.replication_slots[::-1]:
- add_replication_slot_chart_(
- order=self.order,
- definitions=self.definitions,
- name='replication_slot',
- slot_name=slot_name)
-
- def _get_data(self):
- result, _ = self._connect()
- if result:
- cursor = self.connection.cursor(cursor_factory=DictCursor)
- try:
- self.data.update(self.locks_zeroed)
- for query, metrics in self.queries.items():
- self.query_stats_(cursor, query, metrics)
-
- except OperationalError:
- self.connection = False
- cursor.close()
- return None
- else:
- cursor.close()
- return self.data
- else:
+ except OperationalError:
+ self.alive = False
return None
- def query_stats_(self, cursor, query, metrics):
+ cursor.close()
+
+ return self.data
+
+ def query_stats(self, cursor, query, metrics):
cursor.execute(query, dict(databases=tuple(self.databases)))
+
for row in cursor:
for metric in metrics:
+ # databases
if 'database_name' in row:
dimension_id = '_'.join([row['database_name'], metric])
+ # secondaries
elif 'application_name' in row:
dimension_id = '_'.join([row['application_name'], metric])
+ # replication slots
elif 'slot_name' in row:
dimension_id = '_'.join([row['slot_name'], metric])
+ # other
else:
dimension_id = metric
+
if metric in row:
if row[metric] is not None:
self.data[dimension_id] = int(row[metric])
elif 'locks_count' in row:
- self.data[dimension_id] = row['locks_count'] if metric == row['mode'] else 0
+ if metric == row['mode']:
+ self.data[dimension_id] = row['locks_count']
+ def check_queries(self):
+ cursor = self.conn.cursor()
-def discover_databases_(cursor, query):
- cursor.execute(query)
- result = list()
- for db in [database[0] for database in cursor]:
- if db not in result:
- result.append(db)
- return result
+ self.server_version = detect_server_version(cursor, query_factory(SERVER_VERSION))
+ self.debug('server version: {0}'.format(self.server_version))
+ self.is_superuser = check_if_superuser(cursor, query_factory(IF_SUPERUSER))
+ self.debug('superuser: {0}'.format(self.is_superuser))
-def discover_secondaries_(cursor, query):
- cursor.execute(query)
- result = list()
- for sc in [standby[0] for standby in cursor]:
- if sc not in result:
- result.append(sc)
- return result
+ self.databases = discover(cursor, query_factory(DATABASES))
+ self.debug('discovered databases {0}'.format(self.databases))
+ if self.databases_to_poll:
+ to_poll = self.databases_to_poll.split()
+ self.databases = [db for db in self.databases if db in to_poll] or self.databases
+
+ self.secondaries = discover(cursor, query_factory(STANDBY))
+ self.debug('discovered secondaries: {0}'.format(self.secondaries))
+
+ if self.server_version >= 94000:
+ self.replication_slots = discover(cursor, query_factory(REPLICATION_SLOT))
+ self.debug('discovered replication slots: {0}'.format(self.replication_slots))
+
+ cursor.close()
+
+ def populate_queries(self):
+ self.queries[query_factory(DATABASE)] = METRICS[DATABASE]
+ self.queries[query_factory(BACKENDS)] = METRICS[BACKENDS]
+ self.queries[query_factory(LOCKS)] = METRICS[LOCKS]
+ self.queries[query_factory(BGWRITER)] = METRICS[BGWRITER]
+ self.queries[query_factory(DIFF_LSN, self.server_version)] = METRICS[WAL_WRITES]
+ self.queries[query_factory(STANDBY_DELTA, self.server_version)] = METRICS[STANDBY_DELTA]
+
+ if self.do_index_stats:
+ self.queries[query_factory(INDEX_STATS)] = METRICS[INDEX_STATS]
+ if self.do_table_stats:
+ self.queries[query_factory(TABLE_STATS)] = METRICS[TABLE_STATS]
+
+ if self.is_superuser:
+ self.queries[query_factory(ARCHIVE, self.server_version)] = METRICS[ARCHIVE]
+
+ if self.server_version >= 90400:
+ self.queries[query_factory(WAL, self.server_version)] = METRICS[WAL]
+
+ if self.server_version >= 100000:
+ self.queries[query_factory(REPSLOT_FILES, self.server_version)] = METRICS[REPSLOT_FILES]
+
+ if self.server_version >= 90400:
+ self.queries[query_factory(AUTOVACUUM)] = METRICS[AUTOVACUUM]
+
+ def create_dynamic_charts(self):
+ for database_name in self.databases[::-1]:
+ dim = [
+ database_name + '_size',
+ database_name,
+ 'absolute',
+ 1,
+ 1024 * 1024,
+ ]
+ self.definitions['database_size']['lines'].append(dim)
+ for chart_name in [name for name in self.order if name.startswith('db_stat')]:
+ add_database_stat_chart(
+ order=self.order,
+ definitions=self.definitions,
+ name=chart_name,
+ database_name=database_name,
+ )
+ add_database_lock_chart(
+ order=self.order,
+ definitions=self.definitions,
+ database_name=database_name,
+ )
+
+ for application_name in self.secondaries[::-1]:
+ add_replication_delta_chart(
+ order=self.order,
+ definitions=self.definitions,
+ name='standby_delta',
+ application_name=application_name,
+ )
+
+ for slot_name in self.replication_slots[::-1]:
+ add_replication_slot_chart(
+ order=self.order,
+ definitions=self.definitions,
+ name='replication_slot',
+ slot_name=slot_name,
+ )
-def discover_replication_slots_(cursor, query):
+def discover(cursor, query):
cursor.execute(query)
result = list()
- for slot in [replication_slot[0] for replication_slot in cursor]:
- if slot not in result:
- result.append(slot)
+ for v in [value[0] for value in cursor]:
+ if v not in result:
+ result.append(v)
return result
-def check_if_superuser_(cursor, query):
+def check_if_superuser(cursor, query):
cursor.execute(query)
return cursor.fetchone()[0]
@@ -743,7 +1008,7 @@ def detect_server_version(cursor, query):
return int(cursor.fetchone()[0])
-def populate_lock_types(databases):
+def zero_lock_types(databases):
result = dict()
for database in databases:
for lock_type in METRICS['LOCKS']:
@@ -753,7 +1018,11 @@ def populate_lock_types(databases):
return result
-def add_database_lock_chart_(order, definitions, database_name):
+def hide_password(config):
+ return dict((k, v if k != 'password' else '*****') for k, v in config.items())
+
+
+def add_database_lock_chart(order, definitions, database_name):
def create_lines(database):
result = list()
for lock_type in METRICS['LOCKS']:
@@ -770,7 +1039,7 @@ def add_database_lock_chart_(order, definitions, database_name):
}
-def add_database_stat_chart_(order, definitions, name, database_name):
+def add_database_stat_chart(order, definitions, name, database_name):
def create_lines(database, lines):
result = list()
for line in lines:
@@ -787,7 +1056,7 @@ def add_database_stat_chart_(order, definitions, name, database_name):
'lines': create_lines(database_name, chart_template['lines'])}
-def add_replication_delta_chart_(order, definitions, name, application_name):
+def add_replication_delta_chart(order, definitions, name, application_name):
def create_lines(standby, lines):
result = list()
for line in lines:
@@ -799,13 +1068,13 @@ def add_replication_delta_chart_(order, definitions, name, application_name):
chart_name = '_'.join([application_name, name])
position = order.index('database_size')
order.insert(position, chart_name)
- name, title, units, family, context, chart_type = chart_template['options']
+ name, title, units, _, context, chart_type = chart_template['options']
definitions[chart_name] = {
'options': [name, title + ': ' + application_name, units, 'replication delta', context, chart_type],
'lines': create_lines(application_name, chart_template['lines'])}
-def add_replication_slot_chart_(order, definitions, name, slot_name):
+def add_replication_slot_chart(order, definitions, name, slot_name):
def create_lines(slot, lines):
result = list()
for line in lines:
@@ -817,7 +1086,7 @@ def add_replication_slot_chart_(order, definitions, name, slot_name):
chart_name = '_'.join([slot_name, name])
position = order.index('database_size')
order.insert(position, chart_name)
- name, title, units, family, context, chart_type = chart_template['options']
+ name, title, units, _, context, chart_type = chart_template['options']
definitions[chart_name] = {
'options': [name, title + ': ' + slot_name, units, 'replication slot files', context, chart_type],
'lines': create_lines(slot_name, chart_template['lines'])}
diff --git a/collectors/python.d.plugin/postgres/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf
index b69ca3717..cde698f3c 100644
--- a/collectors/python.d.plugin/postgres/postgres.conf
+++ b/collectors/python.d.plugin/postgres/postgres.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,18 +56,20 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# A single connection is required in order to pull statistics.
#
# Connections can be configured with the following options:
#
-# database : 'example_db_name'
-# user : 'example_user'
-# password : 'example_pass'
-# host : 'localhost'
-# port : 5432
+# database : 'example_db_name'
+# user : 'example_user'
+# password : 'example_pass'
+# host : 'localhost'
+# port : 5432
+# connect_timeout : 2 # in seconds, default is 2
+# statement_timeout : 2000 # in ms, default is 2000
#
# Additionally, the following options allow selective disabling of charts
#
diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md
index 3c4b145e0..61aa5f6b7 100644
--- a/collectors/python.d.plugin/powerdns/README.md
+++ b/collectors/python.d.plugin/powerdns/README.md
@@ -75,3 +75,5 @@ local:
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpowerdns%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/powerdns/powerdns.chart.py b/collectors/python.d.plugin/powerdns/powerdns.chart.py
index 4264621b2..7ed1554f6 100644
--- a/collectors/python.d.plugin/powerdns/powerdns.chart.py
+++ b/collectors/python.d.plugin/powerdns/powerdns.chart.py
@@ -8,11 +8,14 @@ from json import loads
from bases.FrameworkServices.UrlService import UrlService
-priority = 60000
-retries = 60
-# update_every = 3
-ORDER = ['questions', 'cache_usage', 'cache_size', 'latency']
+ORDER = [
+ 'questions',
+ 'cache_usage',
+ 'cache_size',
+ 'latency',
+]
+
CHARTS = {
'questions': {
'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'],
diff --git a/collectors/python.d.plugin/powerdns/powerdns.conf b/collectors/python.d.plugin/powerdns/powerdns.conf
index ca6200df1..559bf175e 100644
--- a/collectors/python.d.plugin/powerdns/powerdns.conf
+++ b/collectors/python.d.plugin/powerdns/powerdns.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, apache also supports the following:
diff --git a/collectors/python.d.plugin/proxysql/README.md b/collectors/python.d.plugin/proxysql/README.md
index 02388276e..6e5a2127f 100644
--- a/collectors/python.d.plugin/proxysql/README.md
+++ b/collectors/python.d.plugin/proxysql/README.md
@@ -60,3 +60,5 @@ tcpipv4:
If no configuration is given, module will fail to run.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fproxysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/proxysql/proxysql.chart.py b/collectors/python.d.plugin/proxysql/proxysql.chart.py
index f7e3d49f9..c97147486 100644
--- a/collectors/python.d.plugin/proxysql/proxysql.chart.py
+++ b/collectors/python.d.plugin/proxysql/proxysql.chart.py
@@ -5,11 +5,6 @@
from bases.FrameworkServices.MySQLService import MySQLService
-# default module values (can be overridden per job in `config`)
-# update_every = 3
-priority = 60000
-retries = 60
-
def query(table, *params):
return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params))
@@ -133,8 +128,8 @@ CHARTS = {
'options': [None, 'ProxySQL Backend Overall Bandwidth', 'kilobits/s', 'overall_bandwidth',
'proxysql.pool_overall_net', 'area'],
'lines': [
- ['bytes_data_recv', 'in', 'incremental', 8, 1024],
- ['bytes_data_sent', 'out', 'incremental', -8, 1024]
+ ['bytes_data_recv', 'in', 'incremental', 8, 1000],
+ ['bytes_data_sent', 'out', 'incremental', -8, 1000]
]
},
'questions': {
@@ -156,7 +151,7 @@ CHARTS = {
]
},
'pool_latency': {
- 'options': [None, 'ProxySQL Backend Latency', 'ms', 'latency', 'proxysql.latency', 'line'],
+ 'options': [None, 'ProxySQL Backend Latency', 'milliseconds', 'latency', 'proxysql.latency', 'line'],
'lines': []
},
'connections': {
@@ -194,7 +189,7 @@ CHARTS = {
'lines': []
},
'commands_duration': {
- 'options': [None, 'ProxySQL Commands Duration', 'ms', 'commands', 'proxysql.commands_duration', 'line'],
+ 'options': [None, 'ProxySQL Commands Duration', 'milliseconds', 'commands', 'proxysql.commands_duration', 'line'],
'lines': []
}
}
diff --git a/collectors/python.d.plugin/proxysql/proxysql.conf b/collectors/python.d.plugin/proxysql/proxysql.conf
index d29c2e5be..3c503a895 100644
--- a/collectors/python.d.plugin/proxysql/proxysql.conf
+++ b/collectors/python.d.plugin/proxysql/proxysql.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, proxysql also supports the following:
diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md
index 8304c831e..b97eb70c5 100644
--- a/collectors/python.d.plugin/puppet/README.md
+++ b/collectors/python.d.plugin/puppet/README.md
@@ -26,16 +26,13 @@ puppetdb:
tls_cert_file: /path/to/client.crt
tls_key_file: /path/to/client.key
autodetection_retry: 1
- retries: 3600
puppetserver:
url: 'https://fqdn.example.com:8140'
autodetection_retry: 1
- retries: 3600
```
-When no configuration is given then `https://fqdn.example.com:8140` is
-tried without any retries.
+When no configuration is given, module uses `https://fqdn.example.com:8140`.
### notes
@@ -46,3 +43,5 @@ tried without any retries.
to default PuppetDB configuration though.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpuppet%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/puppet/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py
index 5c8e48bd9..30e219da4 100644
--- a/collectors/python.d.plugin/puppet/puppet.chart.py
+++ b/collectors/python.d.plugin/puppet/puppet.chart.py
@@ -11,29 +11,31 @@
# and tls_cert_file options then.
#
-from bases.FrameworkServices.UrlService import UrlService
-from json import loads
import socket
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
update_every = 5
-priority = 60000
-# very long clojure-based service startup time
-retries = 180
-MB = 1048576
+
+MiB = 1 << 20
CPU_SCALE = 1000
+
ORDER = [
'jvm_heap',
'jvm_nonheap',
'cpu',
'fd_open',
]
+
CHARTS = {
'jvm_heap': {
- 'options': [None, 'JVM Heap', 'MB', 'resources', 'puppet.jvm', 'area'],
+ 'options': [None, 'JVM Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
'lines': [
- ['jvm_heap_committed', 'committed', 'absolute', 1, MB],
- ['jvm_heap_used', 'used', 'absolute', 1, MB],
+ ['jvm_heap_committed', 'committed', 'absolute', 1, MiB],
+ ['jvm_heap_used', 'used', 'absolute', 1, MiB],
],
'variables': [
['jvm_heap_max'],
@@ -41,10 +43,10 @@ CHARTS = {
],
},
'jvm_nonheap': {
- 'options': [None, 'JVM Non-Heap', 'MB', 'resources', 'puppet.jvm', 'area'],
+ 'options': [None, 'JVM Non-Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
'lines': [
- ['jvm_nonheap_committed', 'committed', 'absolute', 1, MB],
- ['jvm_nonheap_used', 'used', 'absolute', 1, MB],
+ ['jvm_nonheap_committed', 'committed', 'absolute', 1, MiB],
+ ['jvm_nonheap_used', 'used', 'absolute', 1, MiB],
],
'variables': [
['jvm_nonheap_max'],
@@ -73,9 +75,9 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.url = 'https://{0}:8140'.format(socket.getfqdn())
self.order = ORDER
self.definitions = CHARTS
+ self.url = 'https://{0}:8140'.format(socket.getfqdn())
def _get_data(self):
# NOTE: there are several ways to retrieve data
diff --git a/collectors/python.d.plugin/puppet/puppet.conf b/collectors/python.d.plugin/puppet/puppet.conf
index 991bfabed..ff5c3d020 100644
--- a/collectors/python.d.plugin/puppet/puppet.conf
+++ b/collectors/python.d.plugin/puppet/puppet.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# These configuration comes from UrlService base:
@@ -89,10 +87,8 @@
# tls_cert_file: /path/to/client.crt
# tls_key_file: /path/to/client.key
# autodetection_retry: 1
-# retries: 3600
#
# puppetserver:
# url: 'https://fqdn.example.com:8140'
# autodetection_retry: 1
-# retries: 3600
#
diff --git a/collectors/python.d.plugin/python.d.conf b/collectors/python.d.plugin/python.d.conf
index 40c8c033f..72236209b 100644
--- a/collectors/python.d.plugin/python.d.conf
+++ b/collectors/python.d.plugin/python.d.conf
@@ -28,6 +28,7 @@ gc_interval: 300
# apache: yes
# apache_cache has been replaced by web_log
+# adaptec_raid: yes
apache_cache: no
# beanstalk: yes
# bind_rndc: yes
@@ -39,6 +40,7 @@ chrony: no
# cpuidle: yes
# dns_query_time: yes
# dnsdist: yes
+# dockerd: yes
# dovecot: yes
# elasticsearch: yes
@@ -54,6 +56,7 @@ go_expvar: no
gunicorn_log: no
# haproxy: yes
# hddtemp: yes
+# httpcheck: yes
# icecast: yes
# ipfs: yes
# isc_dhcpd: yes
@@ -61,6 +64,7 @@ gunicorn_log: no
# litespeed: yes
logind: no
# mdstat: yes
+# megacli: yes
# memcached: yes
# mongodb: yes
# monit: yes
@@ -76,6 +80,7 @@ nginx_log: no
# openldap: yes
# ovpn_status_log: yes
# phpfpm: yes
+# portcheck: yes
# postfix: yes
# postgres: yes
# powerdns: yes
@@ -91,6 +96,7 @@ nginx_log: no
# spigotmc: yes
# springboot: yes
# squid: yes
+# traefik: yes
# tomcat: yes
# tor: yes
unbound: no
diff --git a/collectors/python.d.plugin/python.d.plugin b/collectors/python.d.plugin/python.d.plugin
deleted file mode 100644
index efff22734..000000000
--- a/collectors/python.d.plugin/python.d.plugin
+++ /dev/null
@@ -1,427 +0,0 @@
-#!/usr/bin/env bash
-'''':; exec "$(command -v python || command -v python3 || command -v python2 ||
-echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # '''
-
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (l2isbad)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import gc
-import os
-import sys
-import threading
-
-from re import sub
-from sys import version_info, argv
-from time import sleep
-
-GC_RUN = True
-GC_COLLECT_EVERY = 300
-
-PY_VERSION = version_info[:2]
-
-USER_CONFIG_DIR = os.getenv('NETDATA_USER_CONFIG_DIR', '/usr/local/etc/netdata')
-STOCK_CONFIG_DIR = os.getenv('NETDATA_STOCK_CONFIG_DIR', '/usr/local/lib/netdata/conf.d')
-
-PLUGINS_USER_CONFIG_DIR = os.path.join(USER_CONFIG_DIR, 'python.d')
-PLUGINS_STOCK_CONFIG_DIR = os.path.join(STOCK_CONFIG_DIR, 'python.d')
-
-
-PLUGINS_DIR = os.path.abspath(os.getenv(
- 'NETDATA_PLUGINS_DIR',
- os.path.dirname(__file__)) + '/../python.d')
-
-
-PYTHON_MODULES_DIR = os.path.join(PLUGINS_DIR, 'python_modules')
-
-sys.path.append(PYTHON_MODULES_DIR)
-
-from bases.loaders import ModuleAndConfigLoader # noqa: E402
-from bases.loggers import PythonDLogger # noqa: E402
-from bases.collection import setdefault_values, run_and_exit # noqa: E402
-
-try:
- from collections import OrderedDict
-except ImportError:
- from third_party.ordereddict import OrderedDict
-
-BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1),
- 'retries': 60,
- 'priority': 60000,
- 'autodetection_retry': 0,
- 'chart_cleanup': 10,
- 'name': str()}
-
-
-MODULE_EXTENSION = '.chart.py'
-OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log', 'cpufreq']
-
-
-def module_ok(m):
- return m.endswith(MODULE_EXTENSION) and m[:-len(MODULE_EXTENSION)] not in OBSOLETE_MODULES
-
-
-ALL_MODULES = [m for m in sorted(os.listdir(PLUGINS_DIR)) if module_ok(m)]
-
-
-def parse_cmd():
- debug = 'debug' in argv[1:]
- trace = 'trace' in argv[1:]
- override_update_every = next((arg for arg in argv[1:] if arg.isdigit() and int(arg) > 1), False)
- modules = [''.join([m, MODULE_EXTENSION]) for m in argv[1:] if ''.join([m, MODULE_EXTENSION]) in ALL_MODULES]
- return debug, trace, override_update_every, modules or ALL_MODULES
-
-
-def multi_job_check(config):
- return next((True for key in config if isinstance(config[key], dict)), False)
-
-
-class RawModule:
- def __init__(self, name, path, explicitly_enabled=True):
- self.name = name
- self.path = path
- self.explicitly_enabled = explicitly_enabled
-
-
-class Job(object):
- def __init__(self, initialized_job, job_id):
- """
- :param initialized_job: instance of <Class Service>
- :param job_id: <str>
- """
- self.job = initialized_job
- self.id = job_id # key in Modules.jobs()
- self.module_name = self.job.__module__ # used in Plugin.delete_job()
- self.recheck_every = self.job.configuration.pop('autodetection_retry')
- self.checked = False # used in Plugin.check_job()
- self.created = False # used in Plugin.create_job_charts()
- if self.job.update_every < int(OVERRIDE_UPDATE_EVERY):
- self.job.update_every = int(OVERRIDE_UPDATE_EVERY)
-
- def __getattr__(self, item):
- return getattr(self.job, item)
-
- def __repr__(self):
- return self.job.__repr__()
-
- def is_dead(self):
- return bool(self.ident) and not self.is_alive()
-
- def not_launched(self):
- return not bool(self.ident)
-
- def is_autodetect(self):
- return self.recheck_every
-
-
-class Module(object):
- def __init__(self, service, config):
- """
- :param service: <Module>
- :param config: <dict>
- """
- self.service = service
- self.name = service.__name__
- self.config = self.jobs_configurations_builder(config)
- self.jobs = OrderedDict()
- self.counter = 1
-
- self.initialize_jobs()
-
- def __repr__(self):
- return "<Class Module '{name}'>".format(name=self.name)
-
- def __iter__(self):
- return iter(OrderedDict(self.jobs).values())
-
- def __getitem__(self, item):
- return self.jobs[item]
-
- def __delitem__(self, key):
- del self.jobs[key]
-
- def __len__(self):
- return len(self.jobs)
-
- def __bool__(self):
- return bool(self.jobs)
-
- def __nonzero__(self):
- return self.__bool__()
-
- def jobs_configurations_builder(self, config):
- """
- :param config: <dict>
- :return:
- """
- counter = 0
- job_base_config = dict()
-
- for attr in BASE_CONFIG:
- job_base_config[attr] = config.pop(attr, getattr(self.service, attr, BASE_CONFIG[attr]))
-
- if not config:
- config = {str(): dict()}
- elif not multi_job_check(config):
- config = {str(): config}
-
- for job_name in config:
- if not isinstance(config[job_name], dict):
- continue
-
- job_config = setdefault_values(config[job_name], base_dict=job_base_config)
- job_name = sub(r'\s+', '_', job_name)
- config[job_name]['name'] = sub(r'\s+', '_', config[job_name]['name'])
- counter += 1
- job_id = 'job' + str(counter).zfill(3)
-
- yield job_id, job_name, job_config
-
- def initialize_jobs(self):
- """
- :return:
- """
- for job_id, job_name, job_config in self.config:
- job_config['job_name'] = job_name
- job_config['override_name'] = job_config.pop('name')
-
- try:
- initialized_job = self.service.Service(configuration=job_config)
- except Exception as error:
- Logger.error("job initialization: '{module_name} {job_name}' "
- "=> ['FAILED'] ({error})".format(module_name=self.name,
- job_name=job_name,
- error=error))
- continue
- else:
- Logger.debug("job initialization: '{module_name} {job_name}' "
- "=> ['OK']".format(module_name=self.name,
- job_name=job_name or self.name))
- self.jobs[job_id] = Job(initialized_job=initialized_job,
- job_id=job_id)
- del self.config
- del self.service
-
-
-class Plugin(object):
- def __init__(self):
- self.loader = ModuleAndConfigLoader()
- self.modules = OrderedDict()
- self.sleep_time = 1
- self.runs_counter = 0
-
- user_config = os.path.join(USER_CONFIG_DIR, 'python.d.conf')
- stock_config = os.path.join(STOCK_CONFIG_DIR, 'python.d.conf')
-
- Logger.debug("loading '{0}'".format(user_config))
- self.config, error = self.loader.load_config_from_file(user_config)
-
- if error:
- Logger.error("cannot load '{0}': {1}. Will try stock version.".format(user_config, error))
- Logger.debug("loading '{0}'".format(stock_config))
- self.config, error = self.loader.load_config_from_file(stock_config)
- if error:
- Logger.error("cannot load '{0}': {1}".format(stock_config, error))
-
- self.do_gc = self.config.get("gc_run", GC_RUN)
- self.gc_interval = self.config.get("gc_interval", GC_COLLECT_EVERY)
-
- if not self.config.get('enabled', True):
- run_and_exit(Logger.info)('DISABLED in configuration file.')
-
- self.load_and_initialize_modules()
- if not self.modules:
- run_and_exit(Logger.info)('No modules to run. Exit...')
-
- def __iter__(self):
- return iter(OrderedDict(self.modules).values())
-
- @property
- def jobs(self):
- return (job for mod in self for job in mod)
-
- @property
- def dead_jobs(self):
- return (job for job in self.jobs if job.is_dead())
-
- @property
- def autodetect_jobs(self):
- return [job for job in self.jobs if job.not_launched()]
-
- def enabled_modules(self):
- for mod in MODULES_TO_RUN:
- mod_name = mod[:-len(MODULE_EXTENSION)]
- mod_path = os.path.join(PLUGINS_DIR, mod)
- if any(
- [
- self.config.get('default_run', True) and self.config.get(mod_name, True),
- (not self.config.get('default_run')) and self.config.get(mod_name),
- ]
- ):
- yield RawModule(
- name=mod_name,
- path=mod_path,
- explicitly_enabled=self.config.get(mod_name),
- )
-
- def load_and_initialize_modules(self):
- for mod in self.enabled_modules():
-
- # Load module from file ------------------------------------------------------------
- loaded_module, error = self.loader.load_module_from_file(mod.name, mod.path)
- log = Logger.error if error else Logger.debug
- log("module load source: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK',
- module_name=mod.name))
- if error:
- Logger.error("load source error : {0}".format(error))
- continue
-
- # Load module config from file ------------------------------------------------------
- user_config = os.path.join(PLUGINS_USER_CONFIG_DIR, mod.name + '.conf')
- stock_config = os.path.join(PLUGINS_STOCK_CONFIG_DIR, mod.name + '.conf')
-
- Logger.debug("loading '{0}'".format(user_config))
- loaded_config, error = self.loader.load_config_from_file(user_config)
- if error:
- Logger.error("cannot load '{0}' : {1}. Will try stock version.".format(user_config, error))
- Logger.debug("loading '{0}'".format(stock_config))
- loaded_config, error = self.loader.load_config_from_file(stock_config)
-
- if error:
- Logger.error("cannot load '{0}': {1}".format(stock_config, error))
-
- # Skip disabled modules
- if getattr(loaded_module, 'disabled_by_default', False) and not mod.explicitly_enabled:
- Logger.info("module '{0}' disabled by default".format(loaded_module.__name__))
- continue
-
- # Module initialization ---------------------------------------------------
-
- initialized_module = Module(service=loaded_module, config=loaded_config)
- Logger.debug("module status: '{module_name}' => [{status}] "
- "(jobs: {jobs_number})".format(status='OK' if initialized_module else 'FAILED',
- module_name=initialized_module.name,
- jobs_number=len(initialized_module)))
- if initialized_module:
- self.modules[initialized_module.name] = initialized_module
-
- @staticmethod
- def check_job(job):
- """
- :param job: <Job>
- :return:
- """
- try:
- check_ok = bool(job.check())
- except Exception as error:
- job.error('check() unhandled exception: {error}'.format(error=error))
- return None
- else:
- return check_ok
-
- @staticmethod
- def create_job_charts(job):
- """
- :param job: <Job>
- :return:
- """
- try:
- create_ok = job.create()
- except Exception as error:
- job.error('create() unhandled exception: {error}'.format(error=error))
- return False
- else:
- return create_ok
-
- def delete_job(self, job):
- """
- :param job: <Job>
- :return:
- """
- del self.modules[job.module_name][job.id]
-
- def run_check(self):
- checked = list()
- for job in self.jobs:
- if job.name in checked:
- job.info('check() => [DROPPED] (already served by another job)')
- self.delete_job(job)
- continue
- ok = self.check_job(job)
- if ok:
- job.info('check() => [OK]')
- checked.append(job.name)
- job.checked = True
- continue
- if not job.is_autodetect() or ok is None:
- job.info('check() => [FAILED]')
- self.delete_job(job)
- else:
- job.info('check() => [RECHECK] (autodetection_retry: {0})'.format(job.recheck_every))
-
- def run_create(self):
- for job in self.jobs:
- if not job.checked:
- # skip autodetection_retry jobs
- continue
- ok = self.create_job_charts(job)
- if ok:
- job.debug('create() => [OK] (charts: {0})'.format(len(job.charts)))
- job.created = True
- continue
- job.error('create() => [FAILED] (charts: {0})'.format(len(job.charts)))
- self.delete_job(job)
-
- def start(self):
- self.run_check()
- self.run_create()
- for job in self.jobs:
- if job.created:
- job.start()
-
- while True:
- if threading.active_count() <= 1 and not self.autodetect_jobs:
- run_and_exit(Logger.info)('FINISHED')
-
- sleep(self.sleep_time)
- self.cleanup()
- self.autodetect_retry()
-
- # FIXME: https://github.com/netdata/netdata/issues/3817
- if self.do_gc and self.runs_counter % self.gc_interval == 0:
- v = gc.collect()
- Logger.debug("GC full collection run result: {0}".format(v))
-
- def cleanup(self):
- for job in self.dead_jobs:
- self.delete_job(job)
- for mod in self:
- if not mod:
- del self.modules[mod.name]
-
- def autodetect_retry(self):
- self.runs_counter += self.sleep_time
- for job in self.autodetect_jobs:
- if self.runs_counter % job.recheck_every == 0:
- checked = self.check_job(job)
- if checked:
- created = self.create_job_charts(job)
- if not created:
- self.delete_job(job)
- continue
- job.start()
-
-
-if __name__ == '__main__':
- DEBUG, TRACE, OVERRIDE_UPDATE_EVERY, MODULES_TO_RUN = parse_cmd()
- Logger = PythonDLogger()
- if DEBUG:
- Logger.logger.severity = 'DEBUG'
- if TRACE:
- Logger.log_traceback = True
- Logger.info('Using python {version}'.format(version=PY_VERSION[0]))
-
- plugin = Plugin()
- plugin.start()
diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in
index 8b55ad41b..6521fed94 100755..100644
--- a/collectors/python.d.plugin/python.d.plugin.in
+++ b/collectors/python.d.plugin/python.d.plugin.in
@@ -48,15 +48,15 @@ except ImportError:
from third_party.ordereddict import OrderedDict
BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1),
- 'retries': 60,
'priority': 60000,
'autodetection_retry': 0,
'chart_cleanup': 10,
+ 'penalty': True,
'name': str()}
MODULE_EXTENSION = '.chart.py'
-OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log', 'cpufreq']
+OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log', 'cpufreq', 'cpuidle', 'mdstat', 'linux_power_supply']
def module_ok(m):
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
index 53807e2c4..9a694aa82 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
@@ -131,20 +131,22 @@ class MySQLService(SimpleService):
raw_data = dict()
queries = dict(self.queries)
try:
- with self.__connection as cursor:
- for name, query in queries.items():
- try:
- cursor.execute(query)
- except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error:
- if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)):
- raise RuntimeError
- self.error('Removed query: {name}[{query}]. Error: error'.format(name=name,
- query=query,
- error=error))
- self.queries.pop(name)
- continue
- else:
- raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall()
+ cursor = self.__connection.cursor()
+ for name, query in queries.items():
+ try:
+ cursor.execute(query)
+ except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error:
+ if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)):
+ cursor.close()
+ raise RuntimeError
+ self.error('Removed query: {name}[{query}]. Error: error'.format(name=name,
+ query=query,
+ error=error))
+ self.queries.pop(name)
+ continue
+ else:
+ raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall()
+ cursor.close()
self.__connection.commit()
except (MySQLdb.MySQLError, RuntimeError, TypeError, AttributeError):
self.__connection.close()
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
index dd53fbc14..c7ab7f244 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
@@ -5,7 +5,7 @@
# SPDX-License-Identifier: GPL-3.0-or-later
from threading import Thread
-from time import sleep
+from time import sleep, time
from third_party.monotonic import monotonic
@@ -17,25 +17,42 @@ RUNTIME_CHART_UPDATE = 'BEGIN netdata.runtime_{job_name} {since_last}\n' \
'SET run_time = {elapsed}\n' \
'END\n'
+PENALTY_EVERY = 5
+MAX_PENALTY = 10 * 60 # 10 minutes
+
class RuntimeCounters:
def __init__(self, configuration):
"""
:param configuration: <dict>
"""
- self.FREQ = int(configuration.pop('update_every'))
- self.START_RUN = 0
- self.NEXT_RUN = 0
- self.PREV_UPDATE = 0
- self.SINCE_UPDATE = 0
- self.ELAPSED = 0
- self.RETRIES = 0
- self.RETRIES_MAX = configuration.pop('retries')
- self.PENALTY = 0
- self.RUNS = 1
+ self.update_every = int(configuration.pop('update_every'))
+ self.do_penalty = configuration.pop('penalty')
+
+ self.start_mono = 0
+ self.start_real = 0
+ self.retries = 0
+ self.penalty = 0
+ self.elapsed = 0
+ self.prev_update = 0
+
+ self.runs = 1
- def is_sleep_time(self):
- return self.START_RUN < self.NEXT_RUN
+ def calc_next(self):
+ self.start_mono = monotonic()
+ return self.start_mono - (self.start_mono % self.update_every) + self.update_every + self.penalty
+
+ def sleep_until_next(self):
+ next_time = self.calc_next()
+ while self.start_mono < next_time:
+ sleep(next_time - self.start_mono)
+ self.start_mono = monotonic()
+ self.start_real = time()
+
+ def handle_retries(self):
+ self.retries += 1
+ if self.do_penalty and self.retries % PENALTY_EVERY == 0:
+ self.penalty = round(min(self.retries * self.update_every / 2, MAX_PENALTY))
class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, object):
@@ -83,11 +100,11 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
@property
def runs_counter(self):
- return self._runtime_counters.RUNS
+ return self._runtime_counters.runs
@property
def update_every(self):
- return self._runtime_counters.FREQ
+ return self._runtime_counters.update_every
@update_every.setter
def update_every(self, value):
@@ -95,7 +112,7 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
:param value: <int>
:return:
"""
- self._runtime_counters.FREQ = value
+ self._runtime_counters.update_every = value
def get_update_every(self):
return self.update_every
@@ -163,41 +180,36 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
:return: None
"""
job = self._runtime_counters
- self.debug('started, update frequency: {freq}, '
- 'retries: {retries}'.format(freq=job.FREQ, retries=job.RETRIES_MAX - job.RETRIES))
+ self.debug('started, update frequency: {freq}'.format(freq=job.update_every))
while True:
- job.START_RUN = monotonic()
-
- job.NEXT_RUN = job.START_RUN - (job.START_RUN % job.FREQ) + job.FREQ + job.PENALTY
+ job.sleep_until_next()
- self.sleep_until_next_run()
-
- if job.PREV_UPDATE:
- job.SINCE_UPDATE = int((job.START_RUN - job.PREV_UPDATE) * 1e6)
+ since = 0
+ if job.prev_update:
+ since = int((job.start_real - job.prev_update) * 1e6)
try:
- updated = self.update(interval=job.SINCE_UPDATE)
+ updated = self.update(interval=since)
except Exception as error:
self.error('update() unhandled exception: {error}'.format(error=error))
updated = False
- job.RUNS += 1
+ job.runs += 1
if not updated:
- if not self.manage_retries():
- return
+ job.handle_retries()
else:
- job.ELAPSED = int((monotonic() - job.START_RUN) * 1e3)
- job.PREV_UPDATE = job.START_RUN
- job.RETRIES, job.PENALTY = 0, 0
+ job.elapsed = int((monotonic() - job.start_mono) * 1e3)
+ job.prev_update = job.start_real
+ job.retries, job.penalty = 0, 0
safe_print(RUNTIME_CHART_UPDATE.format(job_name=self.name,
- since_last=job.SINCE_UPDATE,
- elapsed=job.ELAPSED))
- self.debug('update => [{status}] (elapsed time: {elapsed}, '
- 'retries left: {retries})'.format(status='OK' if updated else 'FAILED',
- elapsed=job.ELAPSED if updated else '-',
- retries=job.RETRIES_MAX - job.RETRIES))
+ since_last=since,
+ elapsed=job.elapsed))
+ self.debug('update => [{status}] (elapsed time: {elapsed}, failed retries in a row: {retries})'.format(
+ status='OK' if updated else 'FAILED',
+ elapsed=job.elapsed if updated else '-',
+ retries=job.retries))
def update(self, interval):
"""
@@ -233,27 +245,6 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
return updated
- def manage_retries(self):
- rc = self._runtime_counters
- rc.RETRIES += 1
- if rc.RETRIES % 5 == 0:
- rc.PENALTY = int(rc.RETRIES * self.update_every / 2)
- if rc.RETRIES >= rc.RETRIES_MAX:
- self.error('stopped after {0} data collection failures in a row'.format(rc.RETRIES_MAX))
- return False
- return True
-
- def sleep_until_next_run(self):
- job = self._runtime_counters
-
- # sleep() is interruptable
- while job.is_sleep_time():
- sleep_time = job.NEXT_RUN - job.START_RUN
- self.debug('sleeping for {sleep_time} to reach frequency of {freq} sec'.format(sleep_time=sleep_time,
- freq=job.FREQ + job.PENALTY))
- sleep(sleep_time)
- job.START_RUN = monotonic()
-
def get_data(self):
return self._get_data()
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
index e85455307..f5e6380b8 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
@@ -75,9 +75,11 @@ class SocketService(SimpleService):
keyfile=self.key,
certfile=self.cert,
server_side=False,
- cert_reqs=ssl.CERT_NONE)
+ cert_reqs=ssl.CERT_NONE,
+ ssl_version=ssl.PROTOCOL_TLS,
+ )
except (socket.error, ssl.SSLError) as error:
- self.error('Failed to wrap socket.')
+ self.error('failed to wrap socket : {0}'.format(error))
self._disconnect()
self.__socket_config = None
return False
@@ -169,8 +171,8 @@ class SocketService(SimpleService):
self.debug('closing socket')
self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all
self._sock.close()
- except Exception:
- pass
+ except Exception as error:
+ self.error(error)
self._sock = None
def _send(self, request=None):
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
index 856f38851..011efff9e 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
@@ -26,6 +26,7 @@ class UrlService(SimpleService):
self.method = self.configuration.get('method', 'GET')
self.header = self.configuration.get('header')
self.request_timeout = self.configuration.get('timeout', 1)
+ self.respect_retry_after_header = self.configuration.get('respect_retry_after_header')
self.tls_verify = self.configuration.get('tls_verify')
self.tls_ca_file = self.configuration.get('tls_ca_file')
self.tls_key_file = self.configuration.get('tls_key_file')
@@ -111,12 +112,18 @@ class UrlService(SimpleService):
"""
url = url or self.url
manager = manager or self._manager
- response = manager.request(method=self.method,
- url=url,
- timeout=self.request_timeout,
- retries=retries,
- headers=manager.headers,
- redirect=redirect)
+ retry = urllib3.Retry(retries)
+ if hasattr(retry, 'respect_retry_after_header'):
+ retry.respect_retry_after_header = bool(self.respect_retry_after_header)
+
+ response = manager.request(
+ method=self.method,
+ url=url,
+ timeout=self.request_timeout,
+ retries=retry,
+ headers=manager.headers,
+ redirect=redirect,
+ )
if isinstance(response.data, str):
return response.status, response.data
return response.status, response.data.decode()
diff --git a/collectors/python.d.plugin/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py
index 2963739ec..0a0719056 100644
--- a/collectors/python.d.plugin/python_modules/bases/charts.py
+++ b/collectors/python.d.plugin/python_modules/bases/charts.py
@@ -45,7 +45,7 @@ def create_runtime_chart(func):
ok = func(*args, **kwargs)
if ok:
safe_print(RUNTIME_CHART_CREATE.format(job_name=self.name,
- update_every=self._runtime_counters.FREQ))
+ update_every=self._runtime_counters.update_every))
return ok
return wrapper
diff --git a/collectors/python.d.plugin/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py
index 39be77a79..098294d3e 100644
--- a/collectors/python.d.plugin/python_modules/bases/loggers.py
+++ b/collectors/python.d.plugin/python_modules/bases/loggers.py
@@ -34,7 +34,7 @@ def limiter(log_max_count=30, allowed_in_seconds=60):
def on_decorator(func):
def on_call(*args):
- current_time = args[0]._runtime_counters.START_RUN
+ current_time = args[0]._runtime_counters.start_mono
lc = args[0]._logger_counters
if lc.logged and lc.logged % log_max_count == 0:
diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md
index 22d367c4d..4ac606057 100644
--- a/collectors/python.d.plugin/rabbitmq/README.md
+++ b/collectors/python.d.plugin/rabbitmq/README.md
@@ -54,3 +54,5 @@ socket:
When no configuration file is found, module tries to connect to: `localhost:15672`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frabbitmq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
index 8298b4032..a8f72592f 100644
--- a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
@@ -3,23 +3,12 @@
# Author: l2isbad
# SPDX-License-Identifier: GPL-3.0-or-later
-from collections import namedtuple
from json import loads
-from socket import gethostbyname, gaierror
-from threading import Thread
-try:
- from queue import Queue
-except ImportError:
- from Queue import Queue
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-update_every = 1
-priority = 60000
-retries = 60
-
-METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
+API_NODE = 'api/nodes'
+API_OVERVIEW = 'api/overview'
NODE_STATS = [
'fd_used',
@@ -64,15 +53,15 @@ CHARTS = {
]
},
'memory': {
- 'options': [None, 'Memory', 'MB', 'overview', 'rabbitmq.memory', 'line'],
+ 'options': [None, 'Memory', 'MiB', 'overview', 'rabbitmq.memory', 'area'],
'lines': [
- ['mem_used', 'used', 'absolute', 1, 1024 << 10]
+ ['mem_used', 'used', 'absolute', 1, 1 << 20]
]
},
'disk_space': {
- 'options': [None, 'Disk Space', 'GB', 'overview', 'rabbitmq.disk_space', 'line'],
+ 'options': [None, 'Disk Space', 'GiB', 'overview', 'rabbitmq.disk_space', 'area'],
'lines': [
- ['disk_free', 'free', 'absolute', 1, 1024 ** 3]
+ ['disk_free', 'free', 'absolute', 1, 1 << 30]
]
},
'socket_descriptors': {
@@ -111,7 +100,7 @@ CHARTS = {
]
},
'message_rates': {
- 'options': [None, 'Message Rates', 'messages/s', 'overview', 'rabbitmq.message_rates', 'stacked'],
+ 'options': [None, 'Message Rates', 'messages/s', 'overview', 'rabbitmq.message_rates', 'line'],
'lines': [
['message_stats_ack', 'ack', 'incremental'],
['message_stats_redeliver', 'redeliver', 'incremental'],
@@ -127,74 +116,62 @@ class Service(UrlService):
UrlService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
- self.host = self.configuration.get('host', '127.0.0.1')
- self.port = self.configuration.get('port', 15672)
- self.scheme = self.configuration.get('scheme', 'http')
+ self.url = '{0}://{1}:{2}'.format(
+ configuration.get('scheme', 'http'),
+ configuration.get('host', '127.0.0.1'),
+ configuration.get('port', 15672),
+ )
+ self.node_name = str()
- def check(self):
- # We can't start if <host> AND <port> not specified
- if not (self.host and self.port):
- self.error('Host is not defined in the module configuration file')
- return False
+ def _get_data(self):
+ data = dict()
- # Hostname -> ip address
- try:
- self.host = gethostbyname(self.host)
- except gaierror as error:
- self.error(str(error))
- return False
-
- # Add handlers (auth, self signed cert accept)
- self.url = '{scheme}://{host}:{port}/api'.format(scheme=self.scheme,
- host=self.host,
- port=self.port)
- # Add methods
- api_node = self.url + '/nodes'
- api_overview = self.url + '/overview'
- self.methods = [METHODS(get_data=self._get_overview_stats,
- url=api_node,
- stats=NODE_STATS),
- METHODS(get_data=self._get_overview_stats,
- url=api_overview,
- stats=OVERVIEW_STATS)]
- return UrlService.check(self)
+ stats = self.get_overview_stats()
- def _get_data(self):
- threads = list()
- queue = Queue()
- result = dict()
+ if not stats:
+ return None
+
+ data.update(stats)
+
+ stats = self.get_nodes_stats()
+
+ if not stats:
+ return None
+
+ data.update(stats)
- for method in self.methods:
- th = Thread(target=method.get_data,
- args=(queue, method.url, method.stats))
- th.start()
- threads.append(th)
+ return data or None
- for thread in threads:
- thread.join()
- result.update(queue.get())
+ def get_overview_stats(self):
+ url = '{0}/{1}'.format(self.url, API_OVERVIEW)
- return result or None
+ raw = self._get_raw_data(url)
- def _get_overview_stats(self, queue, url, stats):
- """
- Format data received from http request
- :return: dict
- """
+ if not raw:
+ return None
- raw_data = self._get_raw_data(url)
+ data = loads(raw)
- if not raw_data:
- return queue.put(dict())
- data = loads(raw_data)
- data = data[0] if isinstance(data, list) else data
+ self.node_name = data['node']
- to_netdata = fetch_data(raw_data=data, metrics=stats)
- return queue.put(to_netdata)
+ return fetch_data(raw_data=data, metrics=OVERVIEW_STATS)
+
+ def get_nodes_stats(self):
+ url = '{0}/{1}/{2}'.format(self.url, API_NODE, self.node_name)
+
+ raw = self._get_raw_data(url)
+
+ if not raw:
+ return None
+
+ data = loads(raw)
+
+ return fetch_data(raw_data=data, metrics=NODE_STATS)
def fetch_data(raw_data, metrics):
data = dict()
+
for metric in metrics:
value = raw_data
metrics_list = metric.split('.')
@@ -204,4 +181,5 @@ def fetch_data(raw_data, metrics):
except KeyError:
continue
data['_'.join(metrics_list)] = value
+
return data
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
index 3f90da8a2..ae0dbdb75 100644
--- a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, rabbitmq plugin also supports the following:
diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md
index 8d21df0ca..0bea0376e 100644
--- a/collectors/python.d.plugin/redis/README.md
+++ b/collectors/python.d.plugin/redis/README.md
@@ -40,3 +40,5 @@ localhost:
When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fredis%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/redis/redis.chart.py b/collectors/python.d.plugin/redis/redis.chart.py
index 37d55ebfe..9dbb2c164 100644
--- a/collectors/python.d.plugin/redis/redis.chart.py
+++ b/collectors/python.d.plugin/redis/redis.chart.py
@@ -47,13 +47,13 @@ CHARTS = {
]
},
'hit_rate': {
- 'options': [None, 'Hit rate', 'percent', 'hits', 'redis.hit_rate', 'line'],
+ 'options': [None, 'Hit rate', 'percentage', 'hits', 'redis.hit_rate', 'line'],
'lines': [
['hit_rate', 'rate', 'absolute']
]
},
'memory': {
- 'options': [None, 'Memory utilization', 'kilobytes', 'memory', 'redis.memory', 'line'],
+ 'options': [None, 'Memory utilization', 'KiB', 'memory', 'redis.memory', 'line'],
'lines': [
['used_memory', 'total', 'absolute', 1, 1024],
['used_memory_lua', 'lua', 'absolute', 1, 1024]
@@ -62,8 +62,8 @@ CHARTS = {
'net': {
'options': [None, 'Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'],
'lines': [
- ['total_net_input_bytes', 'in', 'incremental', 8, 1024],
- ['total_net_output_bytes', 'out', 'incremental', -8, 1024]
+ ['total_net_input_bytes', 'in', 'incremental', 8, 1000],
+ ['total_net_output_bytes', 'out', 'incremental', -8, 1000]
]
},
'keys_redis': {
@@ -146,16 +146,13 @@ RE = re.compile(r'\n([a-z_0-9 ]+):(?:keys=)?([^,\r]+)')
class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
- self._keep_alive = True
-
self.order = list()
self.definitions = dict()
-
+ self._keep_alive = True
self.host = self.configuration.get('host', 'localhost')
self.port = self.configuration.get('port', 6379)
self.unix_socket = self.configuration.get('socket')
p = self.configuration.get('pass')
-
self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None
self.request = 'INFO\r\n'.encode()
self.bgsave_time = 0
diff --git a/collectors/python.d.plugin/redis/redis.conf b/collectors/python.d.plugin/redis/redis.conf
index 6363f6da7..b456d75d3 100644
--- a/collectors/python.d.plugin/redis/redis.conf
+++ b/collectors/python.d.plugin/redis/redis.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, redis also supports the following:
diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md
index 5d357fa49..183c7f733 100644
--- a/collectors/python.d.plugin/rethinkdbs/README.md
+++ b/collectors/python.d.plugin/rethinkdbs/README.md
@@ -32,3 +32,5 @@ localhost:
When no configuration file is found, module tries to connect to `127.0.0.1:28015`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frethinkdbs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
index 127e9ad4b..da2f26f4a 100644
--- a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
@@ -136,13 +136,11 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = list(ORDER)
self.definitions = cluster_charts()
-
self.host = self.configuration.get('host', '127.0.0.1')
self.port = self.configuration.get('port', 28015)
self.user = self.configuration.get('user', 'admin')
self.password = self.configuration.get('password')
self.timeout = self.configuration.get('timeout', 2)
-
self.conn = None
self.alive = True
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
index 73544fc2e..d671acbb0 100644
--- a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, rethinkdb also supports the following:
diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md
index e95095c65..a8a58880e 100644
--- a/collectors/python.d.plugin/retroshare/README.md
+++ b/collectors/python.d.plugin/retroshare/README.md
@@ -1 +1,3 @@
# retroshare
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fretroshare%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/retroshare/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py
index 1d8e35050..feb871fbd 100644
--- a/collectors/python.d.plugin/retroshare/retroshare.chart.py
+++ b/collectors/python.d.plugin/retroshare/retroshare.chart.py
@@ -7,26 +7,25 @@ import json
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['bandwidth', 'peers', 'dht']
+ORDER = [
+ 'bandwidth',
+ 'peers',
+ 'dht',
+]
CHARTS = {
'bandwidth': {
- 'options': [None, 'RetroShare Bandwidth', 'kB/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
+ 'options': [None, 'RetroShare Bandwidth', 'kilobits/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
'lines': [
- ['bandwidth_up_kb', 'Upload'],
+ ['bandwidth_up_kb', 'Upload'],
['bandwidth_down_kb', 'Download']
]
},
'peers': {
'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'],
'lines': [
- ['peers_all', 'All friends'],
+ ['peers_all', 'All friends'],
['peers_connected', 'Connected friends']
]
},
@@ -34,7 +33,7 @@ CHARTS = {
'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'],
'lines': [
['dht_size_all', 'DHT nodes estimated'],
- ['dht_size_rs', 'RS nodes estimated']
+ ['dht_size_rs', 'RS nodes estimated']
]
}
}
@@ -43,9 +42,9 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.baseurl = self.configuration.get('url', 'http://localhost:9090')
self.order = ORDER
self.definitions = CHARTS
+ self.baseurl = self.configuration.get('url', 'http://localhost:9090')
def _get_stats(self):
"""
diff --git a/collectors/python.d.plugin/retroshare/retroshare.conf b/collectors/python.d.plugin/retroshare/retroshare.conf
index 9c92583f7..3d0af538d 100644
--- a/collectors/python.d.plugin/retroshare/retroshare.conf
+++ b/collectors/python.d.plugin/retroshare/retroshare.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, RetroShare also supports the following:
diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md
index 44610d373..97f2e3d33 100644
--- a/collectors/python.d.plugin/samba/README.md
+++ b/collectors/python.d.plugin/samba/README.md
@@ -65,3 +65,5 @@ samba: yes
```
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsamba%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/samba/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py
index b2278de9e..ac89c29b0 100644
--- a/collectors/python.d.plugin/samba/samba.chart.py
+++ b/collectors/python.d.plugin/samba/samba.chart.py
@@ -24,10 +24,7 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
disabled_by_default = True
-# default module values (can be overridden per job in `config`)
update_every = 5
-priority = 60000
-retries = 60
ORDER = [
'syscall_rw',
@@ -41,14 +38,14 @@ ORDER = [
CHARTS = {
'syscall_rw': {
- 'options': [None, 'R/Ws', 'kilobytes/s', 'syscall', 'syscall.rw', 'area'],
+ 'options': [None, 'R/Ws', 'KiB/s', 'syscall', 'syscall.rw', 'area'],
'lines': [
['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
]
},
'smb2_rw': {
- 'options': [None, 'R/Ws', 'kilobytes/s', 'smb2', 'smb2.rw', 'area'],
+ 'options': [None, 'R/Ws', 'KiB/s', 'smb2', 'smb2.rw', 'area'],
'lines': [
['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
diff --git a/collectors/python.d.plugin/samba/samba.conf b/collectors/python.d.plugin/samba/samba.conf
index ee513c60f..db15d4e9e 100644
--- a/collectors/python.d.plugin/samba/samba.conf
+++ b/collectors/python.d.plugin/samba/samba.conf
@@ -27,11 +27,9 @@ update_every: 5
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,5 +56,5 @@ update_every: 5
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds \ No newline at end of file
diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md
index eb1642d90..e3f956f11 100644
--- a/collectors/python.d.plugin/sensors/README.md
+++ b/collectors/python.d.plugin/sensors/README.md
@@ -15,3 +15,5 @@ We are tracking such cases in issue [#827](https://github.com/netdata/netdata/is
Please join this discussion for help.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsensors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py
index d70af3b05..e622eb8e6 100644
--- a/collectors/python.d.plugin/sensors/sensors.chart.py
+++ b/collectors/python.d.plugin/sensors/sensors.chart.py
@@ -7,8 +7,6 @@ from third_party import lm_sensors as sensors
from bases.FrameworkServices.SimpleService import SimpleService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
ORDER = [
'temperature',
@@ -139,7 +137,7 @@ class Service(SimpleService):
except sensors.SensorsError as error:
self.error('{0}: {1}'.format(sf.name, error))
continue
- if not vals or vals[0] == 0:
+ if not vals or (vals[0] == 0 and feature.type != 1):
continue
if TYPE_MAP[feature.type] == sensor:
# create chart
diff --git a/collectors/python.d.plugin/sensors/sensors.conf b/collectors/python.d.plugin/sensors/sensors.conf
index 83bbffd7d..d3369ba66 100644
--- a/collectors/python.d.plugin/sensors/sensors.conf
+++ b/collectors/python.d.plugin/sensors/sensors.conf
@@ -19,11 +19,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md
index a31ad0c7a..3b0816fb8 100644
--- a/collectors/python.d.plugin/smartd_log/README.md
+++ b/collectors/python.d.plugin/smartd_log/README.md
@@ -99,3 +99,5 @@ local:
If no configuration is given, module will attempt to read log files in `/var/log/smartd/` directory.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsmartd_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
index 13762fabe..871025a47 100644
--- a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
@@ -268,7 +268,7 @@ CHARTS = {
'algo': INCREMENTAL,
},
'reserved_block_count': {
- 'options': [None, 'Reserved Block Count', '%', 'wear', 'smartd_log.reserved_block_count', 'line'],
+ 'options': [None, 'Reserved Block Count', 'percentage', 'wear', 'smartd_log.reserved_block_count', 'line'],
'lines': [],
'attrs': [ATTR170],
'algo': ABSOLUTE,
@@ -321,7 +321,7 @@ CHARTS = {
},
'percent_lifetime_used': {
- 'options': [None, 'Percent Lifetime Used', '%', 'wear', 'smartd_log.percent_lifetime_used', 'line'],
+ 'options': [None, 'Percent Lifetime Used', 'percentage', 'wear', 'smartd_log.percent_lifetime_used', 'line'],
'lines': [],
'attrs': [ATTR202],
'algo': ABSOLUTE,
@@ -453,6 +453,11 @@ class Ata190(BaseAtaSmartAttribute):
return 100 - int(self.normalized_value)
+class Ata194(BaseAtaSmartAttribute):
+ def value(self):
+ return min(int(self.normalized_value), int(self.raw_value))
+
+
class BaseSCSISmartAttribute:
def __init__(self, name, raw_value):
self.name = name
@@ -474,10 +479,11 @@ def ata_attribute_factory(value):
return Ata9(*value)
elif name == ATTR190:
return Ata190(*value)
+ elif name == ATTR194:
+ return Ata194(*value)
elif name in [
ATTR1,
ATTR7,
- ATTR194,
ATTR202,
ATTR206,
]:
@@ -580,11 +586,9 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = deepcopy(CHARTS)
-
self.log_path = configuration.get('log_path', DEF_PATH)
self.age = configuration.get('age', DEF_AGE)
self.exclude = configuration.get('exclude_disks', str()).split()
-
self.disks = list()
self.runs = 0
@@ -646,6 +650,10 @@ class Service(SimpleService):
return len(self.disks)
def create_disk_from_file(self, full_name, current_time):
+ if not full_name.endswith(CSV):
+ self.debug('skipping {0}: not a csv file'.format(full_name))
+ return None
+
name = os.path.basename(full_name).split('.')[-3]
path = os.path.join(self.log_path, full_name)
@@ -655,10 +663,6 @@ class Service(SimpleService):
if [p for p in self.exclude if p in name]:
return None
- if not full_name.endswith(CSV):
- self.debug('skipping {0}: not a csv file'.format(full_name))
- return None
-
if not os.access(path, os.R_OK):
self.debug('skipping {0}: not readable'.format(full_name))
return None
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.conf b/collectors/python.d.plugin/smartd_log/smartd_log.conf
index ab7f45b0f..4f138d17a 100644
--- a/collectors/python.d.plugin/smartd_log/smartd_log.conf
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, smartd_log also supports the following:
diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md
index ae5602587..c38930558 100644
--- a/collectors/python.d.plugin/spigotmc/README.md
+++ b/collectors/python.d.plugin/spigotmc/README.md
@@ -20,3 +20,5 @@ password: pass
By default, a connection to port 25575 on the local system is attempted with an empty password.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspigotmc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
index a5e5ee0ee..09674f5c9 100644
--- a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
@@ -16,7 +16,10 @@ update_every = 5
PRECISION = 100
-ORDER = ['tps', 'users']
+ORDER = [
+ 'tps',
+ 'users',
+]
CHARTS = {
'tps': {
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.conf b/collectors/python.d.plugin/spigotmc/spigotmc.conf
index 3ba492def..ccb5e2636 100644
--- a/collectors/python.d.plugin/spigotmc/spigotmc.conf
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# In addition to the above, spigotmc supports the following:
diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md
index a1817cc2b..b5b776dd0 100644
--- a/collectors/python.d.plugin/springboot/README.md
+++ b/collectors/python.d.plugin/springboot/README.md
@@ -120,3 +120,5 @@ You can disable the default charts by set `defaults.<chart-id>: false`.
The dimension name of extras charts should replace `.` to `_`.
Please check [springboot.conf](springboot.conf) for more examples.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspringboot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/springboot/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py
index 7df37e1d0..eec870ebf 100644
--- a/collectors/python.d.plugin/springboot/springboot.chart.py
+++ b/collectors/python.d.plugin/springboot/springboot.chart.py
@@ -6,13 +6,14 @@
import json
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-DEFAULT_ORDER = ['response_code', 'threads', 'gc_time', 'gc_ope', 'heap']
+DEFAULT_ORDER = [
+ 'response_code',
+ 'threads',
+ 'gc_time',
+ 'gc_ope',
+ 'heap',
+]
DEFAULT_CHARTS = {
'response_code': {
@@ -60,7 +61,7 @@ DEFAULT_CHARTS = {
]
},
'heap': {
- 'options': [None, "Heap Memory Usage", "KB", "heap memory", "springboot.heap", "area"],
+ 'options': [None, "Heap Memory Usage", "KiB", "heap memory", "springboot.heap", "area"],
'lines': [
["heap_committed", 'committed', "absolute"],
["heap_used", 'used', "absolute"],
diff --git a/collectors/python.d.plugin/springboot/springboot.conf b/collectors/python.d.plugin/springboot/springboot.conf
index 40b5fb437..13a398955 100644
--- a/collectors/python.d.plugin/springboot/springboot.conf
+++ b/collectors/python.d.plugin/springboot/springboot.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -53,7 +51,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, this plugin also supports the following:
diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md
index 9c9b62f27..b278f4191 100644
--- a/collectors/python.d.plugin/squid/README.md
+++ b/collectors/python.d.plugin/squid/README.md
@@ -36,3 +36,5 @@ local:
Without any configuration module will try to autodetect where squid presents its `counters` data
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsquid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/squid/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py
index fd54168f0..c00556b56 100644
--- a/collectors/python.d.plugin/squid/squid.chart.py
+++ b/collectors/python.d.plugin/squid/squid.chart.py
@@ -6,13 +6,12 @@
from bases.FrameworkServices.SocketService import SocketService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['clients_net', 'clients_requests', 'servers_net', 'servers_requests']
+ORDER = [
+ 'clients_net',
+ 'clients_requests',
+ 'servers_net',
+ 'servers_requests',
+]
CHARTS = {
'clients_net': {
diff --git a/collectors/python.d.plugin/squid/squid.conf b/collectors/python.d.plugin/squid/squid.conf
index 564187f00..b90a52c0c 100644
--- a/collectors/python.d.plugin/squid/squid.conf
+++ b/collectors/python.d.plugin/squid/squid.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, squid also supports the following:
diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md
index e548bd338..21e3896a3 100644
--- a/collectors/python.d.plugin/tomcat/README.md
+++ b/collectors/python.d.plugin/tomcat/README.md
@@ -31,3 +31,5 @@ Without configuration, module attempts to connect to `http://localhost:8080/mana
So it will probably fail.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftomcat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/tomcat/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py
index 3c2d0ed40..01578c56e 100644
--- a/collectors/python.d.plugin/tomcat/tomcat.chart.py
+++ b/collectors/python.d.plugin/tomcat/tomcat.chart.py
@@ -8,13 +8,18 @@ import xml.etree.ElementTree as ET
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
+MiB = 1 << 20
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['accesses', 'bandwidth', 'processing_time', 'threads', 'jvm', 'jvm_eden', 'jvm_survivor', 'jvm_tenured']
+ORDER = [
+ 'accesses',
+ 'bandwidth',
+ 'processing_time',
+ 'threads',
+ 'jvm',
+ 'jvm_eden',
+ 'jvm_survivor',
+ 'jvm_tenured',
+]
CHARTS = {
'accesses': {
@@ -25,7 +30,7 @@ CHARTS = {
]
},
'bandwidth': {
- 'options': [None, 'Bandwidth', 'KB/s', 'statistics', 'tomcat.bandwidth', 'area'],
+ 'options': [None, 'Bandwidth', 'KiB/s', 'statistics', 'tomcat.bandwidth', 'area'],
'lines': [
['bytesSent', 'sent', 'incremental', 1, 1024],
['bytesReceived', 'received', 'incremental', 1, 1024],
@@ -45,39 +50,39 @@ CHARTS = {
]
},
'jvm': {
- 'options': [None, 'JVM Memory Pool Usage', 'MB', 'memory', 'tomcat.jvm', 'stacked'],
+ 'options': [None, 'JVM Memory Pool Usage', 'MiB', 'memory', 'tomcat.jvm', 'stacked'],
'lines': [
- ['free', 'free', 'absolute', 1, 1048576],
- ['eden_used', 'eden', 'absolute', 1, 1048576],
- ['survivor_used', 'survivor', 'absolute', 1, 1048576],
- ['tenured_used', 'tenured', 'absolute', 1, 1048576],
- ['code_cache_used', 'code cache', 'absolute', 1, 1048576],
- ['compressed_used', 'compressed', 'absolute', 1, 1048576],
- ['metaspace_used', 'metaspace', 'absolute', 1, 1048576],
+ ['free', 'free', 'absolute', 1, MiB],
+ ['eden_used', 'eden', 'absolute', 1, MiB],
+ ['survivor_used', 'survivor', 'absolute', 1, MiB],
+ ['tenured_used', 'tenured', 'absolute', 1, MiB],
+ ['code_cache_used', 'code cache', 'absolute', 1, MiB],
+ ['compressed_used', 'compressed', 'absolute', 1, MiB],
+ ['metaspace_used', 'metaspace', 'absolute', 1, MiB],
]
},
'jvm_eden': {
- 'options': [None, 'Eden Memory Usage', 'MB', 'memory', 'tomcat.jvm_eden', 'area'],
+ 'options': [None, 'Eden Memory Usage', 'MiB', 'memory', 'tomcat.jvm_eden', 'area'],
'lines': [
- ['eden_used', 'used', 'absolute', 1, 1048576],
- ['eden_committed', 'committed', 'absolute', 1, 1048576],
- ['eden_max', 'max', 'absolute', 1, 1048576]
+ ['eden_used', 'used', 'absolute', 1, MiB],
+ ['eden_committed', 'committed', 'absolute', 1, MiB],
+ ['eden_max', 'max', 'absolute', 1, MiB]
]
},
'jvm_survivor': {
- 'options': [None, 'Survivor Memory Usage', 'MB', 'memory', 'tomcat.jvm_survivor', 'area'],
+ 'options': [None, 'Survivor Memory Usage', 'MiB', 'memory', 'tomcat.jvm_survivor', 'area'],
'lines': [
- ['survivor_used', 'used', 'absolute', 1, 1048576],
- ['survivor_committed', 'committed', 'absolute', 1, 1048576],
- ['survivor_max', 'max', 'absolute', 1, 1048576]
+ ['survivor_used', 'used', 'absolute', 1, MiB],
+ ['survivor_committed', 'committed', 'absolute', 1, MiB],
+ ['survivor_max', 'max', 'absolute', 1, MiB],
]
},
'jvm_tenured': {
- 'options': [None, 'Tenured Memory Usage', 'MB', 'memory', 'tomcat.jvm_tenured', 'area'],
+ 'options': [None, 'Tenured Memory Usage', 'MiB', 'memory', 'tomcat.jvm_tenured', 'area'],
'lines': [
- ['tenured_used', 'used', 'absolute', 1, 1048576],
- ['tenured_committed', 'committed', 'absolute', 1, 1048576],
- ['tenured_max', 'max', 'absolute', 1, 1048576]
+ ['tenured_used', 'used', 'absolute', 1, MiB],
+ ['tenured_committed', 'committed', 'absolute', 1, MiB],
+ ['tenured_max', 'max', 'absolute', 1, MiB]
]
}
}
@@ -86,10 +91,10 @@ CHARTS = {
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
- self.connector_name = self.configuration.get('connector_name', None)
self.order = ORDER
self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
+ self.connector_name = self.configuration.get('connector_name', None)
def _get_data(self):
"""
diff --git a/collectors/python.d.plugin/tomcat/tomcat.conf b/collectors/python.d.plugin/tomcat/tomcat.conf
index c63f06cfa..009591bdf 100644
--- a/collectors/python.d.plugin/tomcat/tomcat.conf
+++ b/collectors/python.d.plugin/tomcat/tomcat.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, tomcat also supports the following:
diff --git a/collectors/python.d.plugin/tor/README.md b/collectors/python.d.plugin/tor/README.md
index 4a8833730..2ce0f25f3 100644
--- a/collectors/python.d.plugin/tor/README.md
+++ b/collectors/python.d.plugin/tor/README.md
@@ -44,3 +44,5 @@ For more options please read the manual.
Without configuration, module attempts to connect to `127.0.0.1:9051`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/tor/tor.chart.py b/collectors/python.d.plugin/tor/tor.chart.py
index b77632bd4..dd61e6e9e 100644
--- a/collectors/python.d.plugin/tor/tor.chart.py
+++ b/collectors/python.d.plugin/tor/tor.chart.py
@@ -24,7 +24,7 @@ ORDER = [
CHARTS = {
'traffic': {
- 'options': [None, 'Tor Traffic', 'KB/s', 'traffic', 'tor.traffic', 'area'],
+ 'options': [None, 'Tor Traffic', 'KiB/s', 'traffic', 'tor.traffic', 'area'],
'lines': [
['read', 'read', 'incremental', 1, 1024],
['write', 'write', 'incremental', 1, -1024],
@@ -39,10 +39,8 @@ class Service(SimpleService):
super(Service, self).__init__(configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
-
self.port = self.configuration.get('control_port', DEF_PORT)
self.password = self.configuration.get('password')
-
self.use_socket = isinstance(self.port, str) and self.port != DEF_PORT and not self.port.isdigit()
self.conn = None
self.alive = False
diff --git a/collectors/python.d.plugin/tor/tor.conf b/collectors/python.d.plugin/tor/tor.conf
index 8245414fb..91b517a62 100644
--- a/collectors/python.d.plugin/tor/tor.conf
+++ b/collectors/python.d.plugin/tor/tor.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 10 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, tor plugin also supports the following:
diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md
index 9b4a18208..61e0fdb72 100644
--- a/collectors/python.d.plugin/traefik/README.md
+++ b/collectors/python.d.plugin/traefik/README.md
@@ -46,9 +46,10 @@ priority : 60000
local:
url : 'http://localhost:8080/health'
- retries : 10
```
Without configuration, module attempts to connect to `http://localhost:8080/health`.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftraefik%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/traefik/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py
index dc8933220..570339d0a 100644
--- a/collectors/python.d.plugin/traefik/traefik.chart.py
+++ b/collectors/python.d.plugin/traefik/traefik.chart.py
@@ -3,16 +3,13 @@
# Author: Alexandre Menezes (@ale_menezes)
# SPDX-License-Identifier: GPL-3.0-or-later
-from json import loads
from collections import defaultdict
+
+from json import loads
+
from bases.FrameworkServices.UrlService import UrlService
-# default module values (can be overridden per job in `config`)
-update_every = 1
-priority = 60000
-retries = 10
-# charts order (can be overridden if you want less charts, or different order)
ORDER = [
'response_statuses',
'response_codes',
@@ -99,14 +96,22 @@ class Service(UrlService):
self.url = self.configuration.get('url', 'http://localhost:8080/health')
self.order = ORDER
self.definitions = CHARTS
- self.data = {
- 'successful_requests': 0, 'redirects': 0, 'bad_requests': 0,
- 'server_errors': 0, 'other_requests': 0, '1xx': 0, '2xx': 0,
- '3xx': 0, '4xx': 0, '5xx': 0, 'other': 0,
- 'average_response_time_per_iteration_sec': 0
- }
self.last_total_response_time = 0
self.last_total_count = 0
+ self.data = {
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0,
+ '1xx': 0,
+ '2xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '5xx': 0,
+ 'other': 0,
+ 'average_response_time_per_iteration_sec': 0,
+ }
def _get_data(self):
data = self._get_raw_data()
diff --git a/collectors/python.d.plugin/traefik/traefik.conf b/collectors/python.d.plugin/traefik/traefik.conf
index 909b9e549..e3f182d32 100644
--- a/collectors/python.d.plugin/traefik/traefik.conf
+++ b/collectors/python.d.plugin/traefik/traefik.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 10 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, traefik plugin also supports the following:
diff --git a/collectors/python.d.plugin/unbound/README.md b/collectors/python.d.plugin/unbound/README.md
index 3b4fa16fd..e213683ca 100644
--- a/collectors/python.d.plugin/unbound/README.md
+++ b/collectors/python.d.plugin/unbound/README.md
@@ -74,3 +74,5 @@ While it's a bit more complicated to set up correctly, it is recommended
that you use a UNIX socket as it provides far better performance.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Funbound%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/unbound/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py
index 52fcbf7e2..adb58d417 100644
--- a/collectors/python.d.plugin/unbound/unbound.chart.py
+++ b/collectors/python.d.plugin/unbound/unbound.chart.py
@@ -13,7 +13,11 @@ from bases.loaders import YamlOrderedLoader
PRECISION = 1000
-ORDER = ['queries', 'recursion', 'reqlist']
+ORDER = [
+ 'queries',
+ 'recursion',
+ 'reqlist',
+]
CHARTS = {
'queries': {
diff --git a/collectors/python.d.plugin/unbound/unbound.conf b/collectors/python.d.plugin/unbound/unbound.conf
index 46c4b097f..68561366b 100644
--- a/collectors/python.d.plugin/unbound/unbound.conf
+++ b/collectors/python.d.plugin/unbound/unbound.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_everye
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, unbound also supports the following:
diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md
index a062710df..9d455cfca 100644
--- a/collectors/python.d.plugin/uwsgi/README.md
+++ b/collectors/python.d.plugin/uwsgi/README.md
@@ -35,3 +35,5 @@ localhost:
```
When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:1717`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fuwsgi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
index 5ebcfb55b..511b770cf 100644
--- a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
@@ -7,10 +7,6 @@ import json
from copy import deepcopy
from bases.FrameworkServices.SocketService import SocketService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
ORDER = [
'requests',
@@ -40,27 +36,27 @@ CHARTS = {
]
},
'tx': {
- 'options': [None, 'Transmitted data', 'KB/s', 'requests', 'uwsgi.tx', 'stacked'],
+ 'options': [None, 'Transmitted data', 'KiB/s', 'requests', 'uwsgi.tx', 'stacked'],
'lines': [
['tx', 'tx', 'incremental']
]
},
'avg_rt': {
- 'options': [None, 'Average request time', 'ms', 'requests', 'uwsgi.avg_rt', 'line'],
+ 'options': [None, 'Average request time', 'milliseconds', 'requests', 'uwsgi.avg_rt', 'line'],
'lines': [
['avg_rt', 'avg_rt', 'absolute']
]
},
'memory_rss': {
- 'options': [None, 'RSS (Resident Set Size)', 'MB', 'memory', 'uwsgi.memory_rss', 'stacked'],
+ 'options': [None, 'RSS (Resident Set Size)', 'MiB', 'memory', 'uwsgi.memory_rss', 'stacked'],
'lines': [
- ['memory_rss', 'memory_rss', 'absolute', 1, 1024 * 1024]
+ ['memory_rss', 'memory_rss', 'absolute', 1, 1 << 20]
]
},
'memory_vsz': {
- 'options': [None, 'VSZ (Virtual Memory Size)', 'MB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
+ 'options': [None, 'VSZ (Virtual Memory Size)', 'MiB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
'lines': [
- ['memory_vsz', 'memory_vsz', 'absolute', 1, 1024 * 1024]
+ ['memory_vsz', 'memory_vsz', 'absolute', 1, 1 << 20]
]
},
'exceptions': {
@@ -87,15 +83,13 @@ CHARTS = {
class Service(SocketService):
def __init__(self, configuration=None, name=None):
super(Service, self).__init__(configuration=configuration, name=name)
- self.url = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', 1717)
self.order = ORDER
self.definitions = deepcopy(CHARTS)
-
+ self.url = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 1717)
# Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time
for chart in DYNAMIC_CHARTS:
self.definitions[chart]['lines'] = []
-
self.last_result = {}
self.workers = []
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.conf b/collectors/python.d.plugin/uwsgi/uwsgi.conf
index be1c2ada3..7d09e7330 100644
--- a/collectors/python.d.plugin/uwsgi/uwsgi.conf
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, uwsgi also supports the following:
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
index 96c7cafaa..44d64efe1 100644
--- a/collectors/python.d.plugin/varnish/README.md
+++ b/collectors/python.d.plugin/varnish/README.md
@@ -64,6 +64,14 @@ It produces:
### configuration
-No configuration is needed.
+Only one parameter is supported:
+
+```yaml
+instance_name: 'name'
+```
+
+The name of the varnishd instance to get logs from. If not specified, the host name is used.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fvarnish%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/varnish/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py
index d889c2b33..da6781576 100644
--- a/collectors/python.d.plugin/varnish/varnish.chart.py
+++ b/collectors/python.d.plugin/varnish/varnish.chart.py
@@ -8,10 +8,6 @@ import re
from bases.collection import find_binary
from bases.FrameworkServices.ExecutableService import ExecutableService
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
-retries = 60
ORDER = [
'session_connections',
@@ -47,7 +43,7 @@ CHARTS = {
]
},
'all_time_hit_rate': {
- 'options': [None, 'All History Hit Rate Ratio', 'percent', 'cache performance',
+ 'options': [None, 'All History Hit Rate Ratio', 'percentage', 'cache performance',
'varnish.all_time_hit_rate', 'stacked'],
'lines': [
['cache_hit', 'hit', 'percentage-of-absolute-row'],
@@ -55,7 +51,7 @@ CHARTS = {
['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']]
},
'current_poll_hit_rate': {
- 'options': [None, 'Current Poll Hit Rate Ratio', 'percent', 'cache performance',
+ 'options': [None, 'Current Poll Hit Rate Ratio', 'percentage', 'cache performance',
'varnish.current_poll_hit_rate', 'stacked'],
'lines': [
['cache_hit', 'hit', 'percentage-of-incremental-row'],
@@ -127,7 +123,7 @@ CHARTS = {
]
},
'memory_usage': {
- 'options': [None, 'Memory Usage', 'MB', 'memory usage', 'varnish.memory_usage', 'stacked'],
+ 'options': [None, 'Memory Usage', 'MiB', 'memory usage', 'varnish.memory_usage', 'stacked'],
'lines': [
['memory_free', 'free', 'absolute', 1, 1 << 20],
['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]]
@@ -140,6 +136,8 @@ CHARTS = {
}
}
+VARNISHSTAT = 'varnishstat'
+
class Parser:
_backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)')
@@ -176,19 +174,31 @@ class Service(ExecutableService):
ExecutableService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
- varnishstat = find_binary('varnishstat')
- self.command = [varnishstat, '-1'] if varnishstat else None
+ self.instance_name = configuration.get('instance_name')
self.parser = Parser()
+ self.command = None
+
+ def create_command(self):
+ varnishstat = find_binary(VARNISHSTAT)
+
+ if not varnishstat:
+ self.error("can't locate '{0}' binary or binary is not executable by user netdata".format(VARNISHSTAT))
+ return False
+
+ if self.instance_name:
+ self.command = [varnishstat, '-1', '-n', self.instance_name, '-t', '1']
+ else:
+ self.command = [varnishstat, '-1', '-t', '1']
+ return True
def check(self):
- if not self.command:
- self.error("Can't locate 'varnishstat' binary or binary is not executable by user netdata")
+ if not self.create_command():
return False
# STDOUT is not empty
reply = self._get_raw_data()
if not reply:
- self.error("No output from 'varnishstat'. Not enough privileges?")
+ self.error("No output from 'varnishstat'. Is it running? Not enough privileges?")
return False
self.parser.init(reply)
diff --git a/collectors/python.d.plugin/varnish/varnish.conf b/collectors/python.d.plugin/varnish/varnish.conf
index 4b069d514..54bfe4dee 100644
--- a/collectors/python.d.plugin/varnish/varnish.conf
+++ b/collectors/python.d.plugin/varnish/varnish.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,11 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
+# Additionally to the above, varnish also supports the following:
+#
+# instance_name: 'name' # the name of the varnishd instance to get logs from. If not specified, the host name is used.
+#
# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md
index b18f08351..94717c812 100644
--- a/collectors/python.d.plugin/w1sensor/README.md
+++ b/collectors/python.d.plugin/w1sensor/README.md
@@ -11,3 +11,5 @@ Charts are created dynamically based on the number of detected sensors.
For detailed configuration information please read [`w1sensor.conf`](w1sensor.conf) file.
---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fw1sensor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
index 493c4a135..e50312fc5 100644
--- a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
@@ -16,7 +16,9 @@ W1_DIR = '/sys/bus/w1/devices/'
# Lines matching the following regular expression contain a temperature value
RE_TEMP = re.compile(r' t=(\d+)')
-ORDER = ['temp']
+ORDER = [
+ 'temp',
+]
CHARTS = {
'temp': {
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.conf b/collectors/python.d.plugin/w1sensor/w1sensor.conf
index a4aed8dd7..17271001b 100644
--- a/collectors/python.d.plugin/w1sensor/w1sensor.conf
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -58,7 +56,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 5 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, example also supports the following:
diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md
index e25a03fb3..176551cf4 100644
--- a/collectors/python.d.plugin/web_log/README.md
+++ b/collectors/python.d.plugin/web_log/README.md
@@ -21,7 +21,7 @@ netdata turns this "useless" log file, into a powerful performance and health mo
If netdata is installed on a system running a web server, it will detect it and it will automatically present a series of charts, with information obtained from the web server API, like these (*these do not come from the web server log file*):
![image](https://cloud.githubusercontent.com/assets/2662304/22900686/e283f636-f237-11e6-93d2-cbdf63de150c.png)
-*[**netdata**](https://my-netdata.io/) charts based on metrics collected by querying the `nginx` API (i.e. `/stab_status`).*
+*[**netdata**](https://my-netdata.io/) charts based on metrics collected by querying the `nginx` API (i.e. `/stub_status`).*
> [**netdata**](https://my-netdata.io/) supports `apache`, `nginx`, `lighttpd` and `tomcat`. To obtain real-time information from a web server API, the web server needs to expose it. For directions on configuring your web server, check the config files for each web server. There is a directory with a config file for each web server under [`/etc/netdata/python.d/`](../).
@@ -199,3 +199,5 @@ The column `minimum requests` state the minimum number of requests required for
[**netdata**](https://my-netdata.io/) alarms are user configurable. Sample config files can be found under directory `health/health.d` of the netdata github repository. So, even [`web_log` alarms can be adapted to your needs](../../../health/health.d/web_log.conf).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fweb_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/web_log/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py
index 20e15f4cb..992790462 100644
--- a/collectors/python.d.plugin/web_log/web_log.chart.py
+++ b/collectors/python.d.plugin/web_log/web_log.chart.py
@@ -25,7 +25,9 @@ from bases.collection import read_last_line
from bases.FrameworkServices.LogService import LogService
-ORDER_APACHE_CACHE = ['apache_cache']
+ORDER_APACHE_CACHE = [
+ 'apache_cache',
+]
ORDER_WEB = [
'response_statuses',
@@ -182,7 +184,7 @@ CHARTS_WEB = {
CHARTS_APACHE_CACHE = {
'apache_cache': {
- 'options': [None, 'Apache Cached Responses', 'percent cached', 'cached', 'web_log.apache_cache_cache',
+ 'options': [None, 'Apache Cached Responses', 'percentage', 'cached', 'web_log.apache_cache_cache',
'stacked'],
'lines': [
['hit', 'cache', 'percentage-of-absolute-row'],
diff --git a/collectors/python.d.plugin/web_log/web_log.conf b/collectors/python.d.plugin/web_log/web_log.conf
index a67957aef..0ac17f665 100644
--- a/collectors/python.d.plugin/web_log/web_log.conf
+++ b/collectors/python.d.plugin/web_log/web_log.conf
@@ -27,11 +27,9 @@
# If unset, the default for python.d.plugin is used.
# priority: 60000
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 60
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
@@ -61,7 +59,7 @@
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
-# retries: 60 # the JOB's number of restoration attempts
+# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, web_log also supports the following: